hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
0974323f2b846e10e41f6bbfdcc9852e2e799ed8 | 3,362 | use std::collections::HashMap;
use std::fmt;
use std::fs;
use std::net::IpAddr;
use log::trace;
use serde::{Deserialize, Serialize};
use crate::model::view::auction::AcceptedBid;
use crate::model::{BidId, NodeId};
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Node<T> {
pub parent: Option<NodeId>,
pub children: Vec<NodeId>,
/// The actual data which will be stored within the tree
pub data: T,
}
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct NodeRecord {
/// URI, only in the case of the market node
pub ip: Option<IpAddr>,
pub port: Option<u16>,
pub accepted_bids: HashMap<BidId, AcceptedBid>,
}
#[derive(Debug)]
pub struct NodeIdList {
list: Vec<NodeId>,
}
impl fmt::Display for NodeIdList {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.list)
}
}
impl From<Vec<NodeId>> for NodeIdList {
fn from(list: Vec<NodeId>) -> Self {
NodeIdList { list }
}
}
#[derive(Debug, Clone)]
pub struct NodeDescription {
pub ip: IpAddr,
pub port: u16,
}
#[derive(Debug)]
pub enum NodeSituationData {
MarketConnected {
children: HashMap<NodeId, NodeDescription>,
market_ip: IpAddr,
market_port: u16,
my_id: NodeId,
},
NodeConnected {
children: HashMap<NodeId, NodeDescription>,
parent_id: NodeId,
parent_node_ip: IpAddr,
parent_node_port: u16,
my_id: NodeId,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum NodeSituationDisk {
MarketConnected {
market_ip: IpAddr,
market_port: u16,
my_id: NodeId,
},
NodeConnected {
parent_id: NodeId,
parent_node_ip: IpAddr,
parent_node_port: u16,
my_id: NodeId,
},
}
/// Loads from a file objects of the form:
/// ```ron
/// MarketConnected (
/// market_uri: "localhost:8080",
/// my_id: "e13f2a63-2934-480a-a448-b1b01af7e170",
/// )
/// ```
/// or another example:
/// ```ron
/// NodeConnected (
/// parent_id: "e13f2a63-2934-480a-a448-b1b01af7e170",
/// parent_node_uri: "localhost:8080",
/// my_id: "49aaea47-7af7-4c68-b29a-b445ef194d3a",
/// )
/// ```
impl NodeSituationDisk {
pub fn new(path: String) -> anyhow::Result<Self> {
let content = fs::read_to_string(path.clone())?;
let situation = ron::from_str::<NodeSituationDisk>(&content)?;
trace!("Loading nodes from disk, path: {}", path);
Ok(situation)
}
}
impl From<NodeSituationDisk> for NodeSituationData {
fn from(disk: NodeSituationDisk) -> Self {
match disk {
NodeSituationDisk::MarketConnected {
market_port,
market_ip,
my_id,
} => NodeSituationData::MarketConnected {
children: HashMap::new(),
market_ip,
market_port,
my_id,
},
NodeSituationDisk::NodeConnected {
parent_id,
parent_node_port,
parent_node_ip,
my_id,
} => NodeSituationData::NodeConnected {
children: HashMap::new(),
parent_id,
parent_node_ip,
parent_node_port,
my_id,
},
}
}
}
| 24.720588 | 70 | 0.580309 |
282e9c0a4bdd2bbc44a0b2ad9efd4eeeae8391e9 | 54,795 | //! See `Semantics`.
mod source_to_def;
use std::{cell::RefCell, fmt, iter};
use base_db::{FileId, FileRange};
use hir_def::{
body, macro_id_to_def_id,
resolver::{self, HasResolver, Resolver, TypeNs},
type_ref::Mutability,
AsMacroCall, FunctionId, MacroId, TraitId, VariantId,
};
use hir_expand::{
db::AstDatabase,
name::{known, AsName},
ExpansionInfo, MacroCallId,
};
use hir_ty::Interner;
use itertools::Itertools;
use rustc_hash::{FxHashMap, FxHashSet};
use smallvec::{smallvec, SmallVec};
use syntax::{
algo::skip_trivia_token,
ast::{self, HasAttrs as _, HasGenericParams, HasLoopBody},
match_ast, AstNode, Direction, SyntaxNode, SyntaxNodePtr, SyntaxToken, TextSize,
};
use crate::{
db::HirDatabase,
semantics::source_to_def::{ChildContainer, SourceToDefCache, SourceToDefCtx},
source_analyzer::{resolve_hir_path, SourceAnalyzer},
Access, BindingMode, BuiltinAttr, Callable, ConstParam, Crate, Field, Function, HasSource,
HirFileId, Impl, InFile, Label, LifetimeParam, Local, Macro, Module, ModuleDef, Name, Path,
ScopeDef, ToolModule, Trait, Type, TypeAlias, TypeParam, VariantDef,
};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PathResolution {
/// An item
Def(ModuleDef),
/// A local binding (only value namespace)
Local(Local),
/// A type parameter
TypeParam(TypeParam),
/// A const parameter
ConstParam(ConstParam),
SelfType(Impl),
BuiltinAttr(BuiltinAttr),
ToolModule(ToolModule),
}
impl PathResolution {
pub(crate) fn in_type_ns(&self) -> Option<TypeNs> {
match self {
PathResolution::Def(ModuleDef::Adt(adt)) => Some(TypeNs::AdtId((*adt).into())),
PathResolution::Def(ModuleDef::BuiltinType(builtin)) => {
Some(TypeNs::BuiltinType((*builtin).into()))
}
PathResolution::Def(
ModuleDef::Const(_)
| ModuleDef::Variant(_)
| ModuleDef::Macro(_)
| ModuleDef::Function(_)
| ModuleDef::Module(_)
| ModuleDef::Static(_)
| ModuleDef::Trait(_),
) => None,
PathResolution::Def(ModuleDef::TypeAlias(alias)) => {
Some(TypeNs::TypeAliasId((*alias).into()))
}
PathResolution::BuiltinAttr(_)
| PathResolution::ToolModule(_)
| PathResolution::Local(_)
| PathResolution::ConstParam(_) => None,
PathResolution::TypeParam(param) => Some(TypeNs::GenericParam((*param).into())),
PathResolution::SelfType(impl_def) => Some(TypeNs::SelfType((*impl_def).into())),
}
}
}
#[derive(Debug)]
pub struct TypeInfo {
/// The original type of the expression or pattern.
pub original: Type,
/// The adjusted type, if an adjustment happened.
pub adjusted: Option<Type>,
}
impl TypeInfo {
pub fn original(self) -> Type {
self.original
}
pub fn has_adjustment(&self) -> bool {
self.adjusted.is_some()
}
/// The adjusted type, or the original in case no adjustments occurred.
pub fn adjusted(self) -> Type {
self.adjusted.unwrap_or(self.original)
}
}
/// Primary API to get semantic information, like types, from syntax trees.
pub struct Semantics<'db, DB> {
pub db: &'db DB,
imp: SemanticsImpl<'db>,
}
pub struct SemanticsImpl<'db> {
pub db: &'db dyn HirDatabase,
s2d_cache: RefCell<SourceToDefCache>,
expansion_info_cache: RefCell<FxHashMap<HirFileId, Option<ExpansionInfo>>>,
// Rootnode to HirFileId cache
cache: RefCell<FxHashMap<SyntaxNode, HirFileId>>,
// MacroCall to its expansion's HirFileId cache
macro_call_cache: RefCell<FxHashMap<InFile<ast::MacroCall>, HirFileId>>,
}
impl<DB> fmt::Debug for Semantics<'_, DB> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Semantics {{ ... }}")
}
}
impl<'db, DB: HirDatabase> Semantics<'db, DB> {
pub fn new(db: &DB) -> Semantics<DB> {
let impl_ = SemanticsImpl::new(db);
Semantics { db, imp: impl_ }
}
pub fn parse(&self, file_id: FileId) -> ast::SourceFile {
self.imp.parse(file_id)
}
pub fn parse_or_expand(&self, file_id: HirFileId) -> Option<SyntaxNode> {
self.imp.parse_or_expand(file_id)
}
pub fn expand(&self, macro_call: &ast::MacroCall) -> Option<SyntaxNode> {
self.imp.expand(macro_call)
}
/// If `item` has an attribute macro attached to it, expands it.
pub fn expand_attr_macro(&self, item: &ast::Item) -> Option<SyntaxNode> {
self.imp.expand_attr_macro(item)
}
pub fn expand_derive_as_pseudo_attr_macro(&self, attr: &ast::Attr) -> Option<SyntaxNode> {
self.imp.expand_derive_as_pseudo_attr_macro(attr)
}
pub fn resolve_derive_macro(&self, derive: &ast::Attr) -> Option<Vec<Option<Macro>>> {
self.imp.resolve_derive_macro(derive)
}
pub fn expand_derive_macro(&self, derive: &ast::Attr) -> Option<Vec<SyntaxNode>> {
self.imp.expand_derive_macro(derive)
}
pub fn is_attr_macro_call(&self, item: &ast::Item) -> bool {
self.imp.is_attr_macro_call(item)
}
pub fn is_derive_annotated(&self, item: &ast::Adt) -> bool {
self.imp.is_derive_annotated(item)
}
pub fn speculative_expand(
&self,
actual_macro_call: &ast::MacroCall,
speculative_args: &ast::TokenTree,
token_to_map: SyntaxToken,
) -> Option<(SyntaxNode, SyntaxToken)> {
self.imp.speculative_expand(actual_macro_call, speculative_args, token_to_map)
}
pub fn speculative_expand_attr_macro(
&self,
actual_macro_call: &ast::Item,
speculative_args: &ast::Item,
token_to_map: SyntaxToken,
) -> Option<(SyntaxNode, SyntaxToken)> {
self.imp.speculative_expand_attr(actual_macro_call, speculative_args, token_to_map)
}
pub fn speculative_expand_derive_as_pseudo_attr_macro(
&self,
actual_macro_call: &ast::Attr,
speculative_args: &ast::Attr,
token_to_map: SyntaxToken,
) -> Option<(SyntaxNode, SyntaxToken)> {
self.imp.speculative_expand_derive_as_pseudo_attr_macro(
actual_macro_call,
speculative_args,
token_to_map,
)
}
/// Descend the token into macrocalls to its first mapped counterpart.
pub fn descend_into_macros_single(&self, token: SyntaxToken) -> SyntaxToken {
self.imp.descend_into_macros_single(token)
}
/// Descend the token into macrocalls to all its mapped counterparts.
pub fn descend_into_macros(&self, token: SyntaxToken) -> SmallVec<[SyntaxToken; 1]> {
self.imp.descend_into_macros(token)
}
/// Descend the token into macrocalls to all its mapped counterparts that have the same text as the input token.
///
/// Returns the original non descended token if none of the mapped counterparts have the same text.
pub fn descend_into_macros_with_same_text(
&self,
token: SyntaxToken,
) -> SmallVec<[SyntaxToken; 1]> {
self.imp.descend_into_macros_with_same_text(token)
}
/// Maps a node down by mapping its first and last token down.
pub fn descend_node_into_attributes<N: AstNode>(&self, node: N) -> SmallVec<[N; 1]> {
self.imp.descend_node_into_attributes(node)
}
/// Search for a definition's source and cache its syntax tree
pub fn source<Def: HasSource>(&self, def: Def) -> Option<InFile<Def::Ast>>
where
Def::Ast: AstNode,
{
self.imp.source(def)
}
pub fn hir_file_for(&self, syntax_node: &SyntaxNode) -> HirFileId {
self.imp.find_file(syntax_node).file_id
}
/// Attempts to map the node out of macro expanded files returning the original file range.
/// If upmapping is not possible, this will fall back to the range of the macro call of the
/// macro file the node resides in.
pub fn original_range(&self, node: &SyntaxNode) -> FileRange {
self.imp.original_range(node)
}
/// Attempts to map the node out of macro expanded files returning the original file range.
pub fn original_range_opt(&self, node: &SyntaxNode) -> Option<FileRange> {
self.imp.original_range_opt(node)
}
/// Attempts to map the node out of macro expanded files.
/// This only work for attribute expansions, as other ones do not have nodes as input.
pub fn original_ast_node<N: AstNode>(&self, node: N) -> Option<N> {
self.imp.original_ast_node(node)
}
pub fn diagnostics_display_range(&self, diagnostics: InFile<SyntaxNodePtr>) -> FileRange {
self.imp.diagnostics_display_range(diagnostics)
}
pub fn token_ancestors_with_macros(
&self,
token: SyntaxToken,
) -> impl Iterator<Item = SyntaxNode> + '_ {
token.parent().into_iter().flat_map(move |it| self.ancestors_with_macros(it))
}
/// Iterates the ancestors of the given node, climbing up macro expansions while doing so.
pub fn ancestors_with_macros(&self, node: SyntaxNode) -> impl Iterator<Item = SyntaxNode> + '_ {
self.imp.ancestors_with_macros(node)
}
pub fn ancestors_at_offset_with_macros(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> impl Iterator<Item = SyntaxNode> + '_ {
self.imp.ancestors_at_offset_with_macros(node, offset)
}
/// Find an AstNode by offset inside SyntaxNode, if it is inside *Macrofile*,
/// search up until it is of the target AstNode type
pub fn find_node_at_offset_with_macros<N: AstNode>(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> Option<N> {
self.imp.ancestors_at_offset_with_macros(node, offset).find_map(N::cast)
}
/// Find an AstNode by offset inside SyntaxNode, if it is inside *MacroCall*,
/// descend it and find again
pub fn find_node_at_offset_with_descend<N: AstNode>(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> Option<N> {
self.imp.descend_node_at_offset(node, offset).flatten().find_map(N::cast)
}
/// Find an AstNode by offset inside SyntaxNode, if it is inside *MacroCall*,
/// descend it and find again
pub fn find_nodes_at_offset_with_descend<'slf, N: AstNode + 'slf>(
&'slf self,
node: &SyntaxNode,
offset: TextSize,
) -> impl Iterator<Item = N> + 'slf {
self.imp.descend_node_at_offset(node, offset).filter_map(|mut it| it.find_map(N::cast))
}
pub fn resolve_lifetime_param(&self, lifetime: &ast::Lifetime) -> Option<LifetimeParam> {
self.imp.resolve_lifetime_param(lifetime)
}
pub fn resolve_label(&self, lifetime: &ast::Lifetime) -> Option<Label> {
self.imp.resolve_label(lifetime)
}
pub fn resolve_type(&self, ty: &ast::Type) -> Option<Type> {
self.imp.resolve_type(ty)
}
// FIXME: Figure out a nice interface to inspect adjustments
pub fn is_implicit_reborrow(&self, expr: &ast::Expr) -> Option<Mutability> {
self.imp.is_implicit_reborrow(expr)
}
pub fn type_of_expr(&self, expr: &ast::Expr) -> Option<TypeInfo> {
self.imp.type_of_expr(expr)
}
pub fn type_of_pat(&self, pat: &ast::Pat) -> Option<TypeInfo> {
self.imp.type_of_pat(pat)
}
pub fn type_of_self(&self, param: &ast::SelfParam) -> Option<Type> {
self.imp.type_of_self(param)
}
pub fn pattern_adjustments(&self, pat: &ast::Pat) -> SmallVec<[Type; 1]> {
self.imp.pattern_adjustments(pat)
}
pub fn binding_mode_of_pat(&self, pat: &ast::IdentPat) -> Option<BindingMode> {
self.imp.binding_mode_of_pat(pat)
}
pub fn resolve_method_call(&self, call: &ast::MethodCallExpr) -> Option<Function> {
self.imp.resolve_method_call(call).map(Function::from)
}
pub fn resolve_method_call_as_callable(&self, call: &ast::MethodCallExpr) -> Option<Callable> {
self.imp.resolve_method_call_as_callable(call)
}
pub fn resolve_field(&self, field: &ast::FieldExpr) -> Option<Field> {
self.imp.resolve_field(field)
}
pub fn resolve_record_field(
&self,
field: &ast::RecordExprField,
) -> Option<(Field, Option<Local>, Type)> {
self.imp.resolve_record_field(field)
}
pub fn resolve_record_pat_field(&self, field: &ast::RecordPatField) -> Option<Field> {
self.imp.resolve_record_pat_field(field)
}
pub fn resolve_macro_call(&self, macro_call: &ast::MacroCall) -> Option<Macro> {
self.imp.resolve_macro_call(macro_call)
}
pub fn is_unsafe_macro_call(&self, macro_call: &ast::MacroCall) -> bool {
self.imp.is_unsafe_macro_call(macro_call)
}
pub fn resolve_attr_macro_call(&self, item: &ast::Item) -> Option<Macro> {
self.imp.resolve_attr_macro_call(item)
}
pub fn resolve_path(&self, path: &ast::Path) -> Option<PathResolution> {
self.imp.resolve_path(path)
}
pub fn resolve_extern_crate(&self, extern_crate: &ast::ExternCrate) -> Option<Crate> {
self.imp.resolve_extern_crate(extern_crate)
}
pub fn resolve_variant(&self, record_lit: ast::RecordExpr) -> Option<VariantDef> {
self.imp.resolve_variant(record_lit).map(VariantDef::from)
}
pub fn resolve_bind_pat_to_const(&self, pat: &ast::IdentPat) -> Option<ModuleDef> {
self.imp.resolve_bind_pat_to_const(pat)
}
pub fn record_literal_missing_fields(&self, literal: &ast::RecordExpr) -> Vec<(Field, Type)> {
self.imp.record_literal_missing_fields(literal)
}
pub fn record_pattern_missing_fields(&self, pattern: &ast::RecordPat) -> Vec<(Field, Type)> {
self.imp.record_pattern_missing_fields(pattern)
}
pub fn to_def<T: ToDef>(&self, src: &T) -> Option<T::Def> {
let src = self.imp.find_file(src.syntax()).with_value(src).cloned();
T::to_def(&self.imp, src)
}
pub fn to_module_def(&self, file: FileId) -> Option<Module> {
self.imp.to_module_def(file).next()
}
pub fn to_module_defs(&self, file: FileId) -> impl Iterator<Item = Module> {
self.imp.to_module_def(file)
}
pub fn scope(&self, node: &SyntaxNode) -> Option<SemanticsScope<'db>> {
self.imp.scope(node)
}
pub fn scope_at_offset(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> Option<SemanticsScope<'db>> {
self.imp.scope_at_offset(node, offset)
}
pub fn scope_for_def(&self, def: Trait) -> SemanticsScope<'db> {
self.imp.scope_for_def(def)
}
pub fn assert_contains_node(&self, node: &SyntaxNode) {
self.imp.assert_contains_node(node)
}
pub fn is_unsafe_method_call(&self, method_call_expr: &ast::MethodCallExpr) -> bool {
self.imp.is_unsafe_method_call(method_call_expr)
}
pub fn is_unsafe_ref_expr(&self, ref_expr: &ast::RefExpr) -> bool {
self.imp.is_unsafe_ref_expr(ref_expr)
}
pub fn is_unsafe_ident_pat(&self, ident_pat: &ast::IdentPat) -> bool {
self.imp.is_unsafe_ident_pat(ident_pat)
}
}
impl<'db> SemanticsImpl<'db> {
fn new(db: &'db dyn HirDatabase) -> Self {
SemanticsImpl {
db,
s2d_cache: Default::default(),
cache: Default::default(),
expansion_info_cache: Default::default(),
macro_call_cache: Default::default(),
}
}
fn parse(&self, file_id: FileId) -> ast::SourceFile {
let tree = self.db.parse(file_id).tree();
self.cache(tree.syntax().clone(), file_id.into());
tree
}
fn parse_or_expand(&self, file_id: HirFileId) -> Option<SyntaxNode> {
let node = self.db.parse_or_expand(file_id)?;
self.cache(node.clone(), file_id);
Some(node)
}
fn expand(&self, macro_call: &ast::MacroCall) -> Option<SyntaxNode> {
let sa = self.analyze_no_infer(macro_call.syntax())?;
let file_id = sa.expand(self.db, InFile::new(sa.file_id, macro_call))?;
let node = self.parse_or_expand(file_id)?;
Some(node)
}
fn expand_attr_macro(&self, item: &ast::Item) -> Option<SyntaxNode> {
let src = self.wrap_node_infile(item.clone());
let macro_call_id = self.with_ctx(|ctx| ctx.item_to_macro_call(src))?;
self.parse_or_expand(macro_call_id.as_file())
}
fn expand_derive_as_pseudo_attr_macro(&self, attr: &ast::Attr) -> Option<SyntaxNode> {
let src = self.wrap_node_infile(attr.clone());
let adt = attr.syntax().parent().and_then(ast::Adt::cast)?;
let call_id = self.with_ctx(|ctx| {
ctx.attr_to_derive_macro_call(src.with_value(&adt), src).map(|(_, it, _)| it)
})?;
self.parse_or_expand(call_id.as_file())
}
fn resolve_derive_macro(&self, attr: &ast::Attr) -> Option<Vec<Option<Macro>>> {
let calls = self.derive_macro_calls(attr)?;
self.with_ctx(|ctx| {
Some(
calls
.into_iter()
.map(|call| {
macro_call_to_macro_id(ctx, self.db.upcast(), call?).map(|id| Macro { id })
})
.collect(),
)
})
}
fn expand_derive_macro(&self, attr: &ast::Attr) -> Option<Vec<SyntaxNode>> {
let res: Vec<_> = self
.derive_macro_calls(attr)?
.into_iter()
.flat_map(|call| {
let file_id = call?.as_file();
let node = self.db.parse_or_expand(file_id)?;
self.cache(node.clone(), file_id);
Some(node)
})
.collect();
Some(res)
}
fn derive_macro_calls(&self, attr: &ast::Attr) -> Option<Vec<Option<MacroCallId>>> {
let adt = attr.syntax().parent().and_then(ast::Adt::cast)?;
let file_id = self.find_file(adt.syntax()).file_id;
let adt = InFile::new(file_id, &adt);
let src = InFile::new(file_id, attr.clone());
self.with_ctx(|ctx| {
let (.., res) = ctx.attr_to_derive_macro_call(adt, src)?;
Some(res.to_vec())
})
}
fn is_derive_annotated(&self, adt: &ast::Adt) -> bool {
let file_id = self.find_file(adt.syntax()).file_id;
let adt = InFile::new(file_id, adt);
self.with_ctx(|ctx| ctx.has_derives(adt))
}
fn is_attr_macro_call(&self, item: &ast::Item) -> bool {
let file_id = self.find_file(item.syntax()).file_id;
let src = InFile::new(file_id, item.clone());
self.with_ctx(|ctx| ctx.item_to_macro_call(src).is_some())
}
fn speculative_expand(
&self,
actual_macro_call: &ast::MacroCall,
speculative_args: &ast::TokenTree,
token_to_map: SyntaxToken,
) -> Option<(SyntaxNode, SyntaxToken)> {
let SourceAnalyzer { file_id, resolver, .. } =
self.analyze_no_infer(actual_macro_call.syntax())?;
let macro_call = InFile::new(file_id, actual_macro_call);
let krate = resolver.krate();
let macro_call_id = macro_call.as_call_id(self.db.upcast(), krate, |path| {
resolver
.resolve_path_as_macro(self.db.upcast(), &path)
.map(|it| macro_id_to_def_id(self.db.upcast(), it))
})?;
hir_expand::db::expand_speculative(
self.db.upcast(),
macro_call_id,
speculative_args.syntax(),
token_to_map,
)
}
fn speculative_expand_attr(
&self,
actual_macro_call: &ast::Item,
speculative_args: &ast::Item,
token_to_map: SyntaxToken,
) -> Option<(SyntaxNode, SyntaxToken)> {
let macro_call = self.wrap_node_infile(actual_macro_call.clone());
let macro_call_id = self.with_ctx(|ctx| ctx.item_to_macro_call(macro_call))?;
hir_expand::db::expand_speculative(
self.db.upcast(),
macro_call_id,
speculative_args.syntax(),
token_to_map,
)
}
fn speculative_expand_derive_as_pseudo_attr_macro(
&self,
actual_macro_call: &ast::Attr,
speculative_args: &ast::Attr,
token_to_map: SyntaxToken,
) -> Option<(SyntaxNode, SyntaxToken)> {
let attr = self.wrap_node_infile(actual_macro_call.clone());
let adt = actual_macro_call.syntax().parent().and_then(ast::Adt::cast)?;
let macro_call_id = self.with_ctx(|ctx| {
ctx.attr_to_derive_macro_call(attr.with_value(&adt), attr).map(|(_, it, _)| it)
})?;
hir_expand::db::expand_speculative(
self.db.upcast(),
macro_call_id,
speculative_args.syntax(),
token_to_map,
)
}
// This might not be the correct way to do this, but it works for now
fn descend_node_into_attributes<N: AstNode>(&self, node: N) -> SmallVec<[N; 1]> {
let mut res = smallvec![];
let tokens = (|| {
let first = skip_trivia_token(node.syntax().first_token()?, Direction::Next)?;
let last = skip_trivia_token(node.syntax().last_token()?, Direction::Prev)?;
Some((first, last))
})();
let (first, last) = match tokens {
Some(it) => it,
None => return res,
};
if first == last {
self.descend_into_macros_impl(first, &mut |InFile { value, .. }| {
if let Some(node) = value.ancestors().find_map(N::cast) {
res.push(node)
}
false
});
} else {
// Descend first and last token, then zip them to look for the node they belong to
let mut scratch: SmallVec<[_; 1]> = smallvec![];
self.descend_into_macros_impl(first, &mut |token| {
scratch.push(token);
false
});
let mut scratch = scratch.into_iter();
self.descend_into_macros_impl(
last,
&mut |InFile { value: last, file_id: last_fid }| {
if let Some(InFile { value: first, file_id: first_fid }) = scratch.next() {
if first_fid == last_fid {
if let Some(p) = first.parent() {
let range = first.text_range().cover(last.text_range());
let node = find_root(&p)
.covering_element(range)
.ancestors()
.take_while(|it| it.text_range() == range)
.find_map(N::cast);
if let Some(node) = node {
res.push(node);
}
}
}
}
false
},
);
}
res
}
fn descend_into_macros(&self, token: SyntaxToken) -> SmallVec<[SyntaxToken; 1]> {
let mut res = smallvec![];
self.descend_into_macros_impl(token, &mut |InFile { value, .. }| {
res.push(value);
false
});
res
}
fn descend_into_macros_with_same_text(&self, token: SyntaxToken) -> SmallVec<[SyntaxToken; 1]> {
let text = token.text();
let mut res = smallvec![];
self.descend_into_macros_impl(token.clone(), &mut |InFile { value, .. }| {
if value.text() == text {
res.push(value);
}
false
});
if res.is_empty() {
res.push(token);
}
res
}
fn descend_into_macros_single(&self, token: SyntaxToken) -> SyntaxToken {
let mut res = token.clone();
self.descend_into_macros_impl(token, &mut |InFile { value, .. }| {
res = value;
true
});
res
}
fn descend_into_macros_impl(
&self,
token: SyntaxToken,
f: &mut dyn FnMut(InFile<SyntaxToken>) -> bool,
) {
let _p = profile::span("descend_into_macros");
let parent = match token.parent() {
Some(it) => it,
None => return,
};
let sa = match self.analyze_no_infer(&parent) {
Some(it) => it,
None => return,
};
let mut stack: SmallVec<[_; 4]> = smallvec![InFile::new(sa.file_id, token)];
let mut cache = self.expansion_info_cache.borrow_mut();
let mut mcache = self.macro_call_cache.borrow_mut();
let mut process_expansion_for_token =
|stack: &mut SmallVec<_>, macro_file, item, token: InFile<&_>| {
let expansion_info = cache
.entry(macro_file)
.or_insert_with(|| macro_file.expansion_info(self.db.upcast()))
.as_ref()?;
{
let InFile { file_id, value } = expansion_info.expanded();
self.cache(value, file_id);
}
let mapped_tokens = expansion_info.map_token_down(self.db.upcast(), item, token)?;
let len = stack.len();
// requeue the tokens we got from mapping our current token down
stack.extend(mapped_tokens);
// if the length changed we have found a mapping for the token
(stack.len() != len).then(|| ())
};
// Remap the next token in the queue into a macro call its in, if it is not being remapped
// either due to not being in a macro-call or because its unused push it into the result vec,
// otherwise push the remapped tokens back into the queue as they can potentially be remapped again.
while let Some(token) = stack.pop() {
self.db.unwind_if_cancelled();
let was_not_remapped = (|| {
// are we inside an attribute macro call
let containing_attribute_macro_call = self.with_ctx(|ctx| {
token.value.ancestors().filter_map(ast::Item::cast).find_map(|item| {
if item.attrs().next().is_none() {
// Don't force populate the dyn cache for items that don't have an attribute anyways
return None;
}
Some((ctx.item_to_macro_call(token.with_value(item.clone()))?, item))
})
});
if let Some((call_id, item)) = containing_attribute_macro_call {
let file_id = call_id.as_file();
return process_expansion_for_token(
&mut stack,
file_id,
Some(item),
token.as_ref(),
);
}
// or are we inside a function-like macro call
if let Some(tt) =
// FIXME replace map.while_some with take_while once stable
token.value.ancestors().map(ast::TokenTree::cast).while_some().last()
{
let parent = tt.syntax().parent()?;
// check for derive attribute here
let macro_call = match_ast! {
match parent {
ast::MacroCall(mcall) => mcall,
// attribute we failed expansion for earlier, this might be a derive invocation
// so try downmapping the token into the pseudo derive expansion
// see [hir_expand::builtin_attr_macro] for how the pseudo derive expansion works
ast::Meta(meta) => {
let attr = meta.parent_attr()?;
let adt = attr.syntax().parent().and_then(ast::Adt::cast)?;
let call_id = self.with_ctx(|ctx| {
let (_, call_id, _) = ctx.attr_to_derive_macro_call(
token.with_value(&adt),
token.with_value(attr),
)?;
Some(call_id)
})?;
let file_id = call_id.as_file();
return process_expansion_for_token(
&mut stack,
file_id,
Some(adt.into()),
token.as_ref(),
);
},
_ => return None,
}
};
if tt.left_delimiter_token().map_or(false, |it| it == token.value) {
return None;
}
if tt.right_delimiter_token().map_or(false, |it| it == token.value) {
return None;
}
let mcall = token.with_value(macro_call);
let file_id = match mcache.get(&mcall) {
Some(&it) => it,
None => {
let it = sa.expand(self.db, mcall.as_ref())?;
mcache.insert(mcall, it);
it
}
};
return process_expansion_for_token(&mut stack, file_id, None, token.as_ref());
}
// outside of a macro invocation so this is a "final" token
None
})()
.is_none();
if was_not_remapped && f(token) {
break;
}
}
}
// Note this return type is deliberate as [`find_nodes_at_offset_with_descend`] wants to stop
// traversing the inner iterator when it finds a node.
// The outer iterator is over the tokens descendants
// The inner iterator is the ancestors of a descendant
fn descend_node_at_offset(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> impl Iterator<Item = impl Iterator<Item = SyntaxNode> + '_> + '_ {
node.token_at_offset(offset)
.map(move |token| self.descend_into_macros(token))
.map(|descendants| {
descendants.into_iter().map(move |it| self.token_ancestors_with_macros(it))
})
// re-order the tokens from token_at_offset by returning the ancestors with the smaller first nodes first
// See algo::ancestors_at_offset, which uses the same approach
.kmerge_by(|left, right| {
left.clone()
.map(|node| node.text_range().len())
.lt(right.clone().map(|node| node.text_range().len()))
})
}
fn original_range(&self, node: &SyntaxNode) -> FileRange {
let node = self.find_file(node);
node.original_file_range(self.db.upcast())
}
fn original_range_opt(&self, node: &SyntaxNode) -> Option<FileRange> {
let node = self.find_file(node);
node.original_file_range_opt(self.db.upcast())
}
fn original_ast_node<N: AstNode>(&self, node: N) -> Option<N> {
self.wrap_node_infile(node).original_ast_node(self.db.upcast()).map(|it| it.value)
}
fn diagnostics_display_range(&self, src: InFile<SyntaxNodePtr>) -> FileRange {
let root = self.parse_or_expand(src.file_id).unwrap();
let node = src.map(|it| it.to_node(&root));
node.as_ref().original_file_range(self.db.upcast())
}
fn token_ancestors_with_macros(
&self,
token: SyntaxToken,
) -> impl Iterator<Item = SyntaxNode> + Clone + '_ {
token.parent().into_iter().flat_map(move |parent| self.ancestors_with_macros(parent))
}
fn ancestors_with_macros(
&self,
node: SyntaxNode,
) -> impl Iterator<Item = SyntaxNode> + Clone + '_ {
let node = self.find_file(&node);
let db = self.db.upcast();
iter::successors(Some(node.cloned()), move |&InFile { file_id, ref value }| {
match value.parent() {
Some(parent) => Some(InFile::new(file_id, parent)),
None => {
self.cache(value.clone(), file_id);
file_id.call_node(db)
}
}
})
.map(|it| it.value)
}
fn ancestors_at_offset_with_macros(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> impl Iterator<Item = SyntaxNode> + '_ {
node.token_at_offset(offset)
.map(|token| self.token_ancestors_with_macros(token))
.kmerge_by(|node1, node2| node1.text_range().len() < node2.text_range().len())
}
fn resolve_lifetime_param(&self, lifetime: &ast::Lifetime) -> Option<LifetimeParam> {
let text = lifetime.text();
let lifetime_param = lifetime.syntax().ancestors().find_map(|syn| {
let gpl = ast::AnyHasGenericParams::cast(syn)?.generic_param_list()?;
gpl.lifetime_params()
.find(|tp| tp.lifetime().as_ref().map(|lt| lt.text()).as_ref() == Some(&text))
})?;
let src = self.wrap_node_infile(lifetime_param);
ToDef::to_def(self, src)
}
fn resolve_label(&self, lifetime: &ast::Lifetime) -> Option<Label> {
let text = lifetime.text();
let label = lifetime.syntax().ancestors().find_map(|syn| {
let label = match_ast! {
match syn {
ast::ForExpr(it) => it.label(),
ast::WhileExpr(it) => it.label(),
ast::LoopExpr(it) => it.label(),
ast::BlockExpr(it) => it.label(),
_ => None,
}
};
label.filter(|l| {
l.lifetime()
.and_then(|lt| lt.lifetime_ident_token())
.map_or(false, |lt| lt.text() == text)
})
})?;
let src = self.wrap_node_infile(label);
ToDef::to_def(self, src)
}
fn resolve_type(&self, ty: &ast::Type) -> Option<Type> {
let analyze = self.analyze(ty.syntax())?;
let ctx = body::LowerCtx::new(self.db.upcast(), analyze.file_id);
let ty = hir_ty::TyLoweringContext::new(self.db, &analyze.resolver)
.lower_ty(&crate::TypeRef::from_ast(&ctx, ty.clone()));
Some(Type::new_with_resolver(self.db, &analyze.resolver, ty))
}
fn is_implicit_reborrow(&self, expr: &ast::Expr) -> Option<Mutability> {
self.analyze(expr.syntax())?.is_implicit_reborrow(self.db, expr)
}
fn type_of_expr(&self, expr: &ast::Expr) -> Option<TypeInfo> {
self.analyze(expr.syntax())?
.type_of_expr(self.db, expr)
.map(|(ty, coerced)| TypeInfo { original: ty, adjusted: coerced })
}
fn type_of_pat(&self, pat: &ast::Pat) -> Option<TypeInfo> {
self.analyze(pat.syntax())?
.type_of_pat(self.db, pat)
.map(|(ty, coerced)| TypeInfo { original: ty, adjusted: coerced })
}
fn type_of_self(&self, param: &ast::SelfParam) -> Option<Type> {
self.analyze(param.syntax())?.type_of_self(self.db, param)
}
fn pattern_adjustments(&self, pat: &ast::Pat) -> SmallVec<[Type; 1]> {
self.analyze(pat.syntax())
.and_then(|it| it.pattern_adjustments(self.db, pat))
.unwrap_or_default()
}
fn binding_mode_of_pat(&self, pat: &ast::IdentPat) -> Option<BindingMode> {
self.analyze(pat.syntax())?.binding_mode_of_pat(self.db, pat)
}
fn resolve_method_call(&self, call: &ast::MethodCallExpr) -> Option<FunctionId> {
self.analyze(call.syntax())?.resolve_method_call(self.db, call).map(|(id, _)| id)
}
fn resolve_method_call_as_callable(&self, call: &ast::MethodCallExpr) -> Option<Callable> {
let source_analyzer = self.analyze(call.syntax())?;
let (func, subst) = source_analyzer.resolve_method_call(self.db, call)?;
let ty = self.db.value_ty(func.into()).substitute(Interner, &subst);
let resolver = source_analyzer.resolver;
let ty = Type::new_with_resolver(self.db, &resolver, ty);
let mut res = ty.as_callable(self.db)?;
res.is_bound_method = true;
Some(res)
}
fn resolve_field(&self, field: &ast::FieldExpr) -> Option<Field> {
self.analyze(field.syntax())?.resolve_field(self.db, field)
}
fn resolve_record_field(
&self,
field: &ast::RecordExprField,
) -> Option<(Field, Option<Local>, Type)> {
self.analyze(field.syntax())?.resolve_record_field(self.db, field)
}
fn resolve_record_pat_field(&self, field: &ast::RecordPatField) -> Option<Field> {
self.analyze(field.syntax())?.resolve_record_pat_field(self.db, field)
}
fn resolve_macro_call(&self, macro_call: &ast::MacroCall) -> Option<Macro> {
let sa = self.analyze(macro_call.syntax())?;
let macro_call = self.find_file(macro_call.syntax()).with_value(macro_call);
sa.resolve_macro_call(self.db, macro_call)
}
fn is_unsafe_macro_call(&self, macro_call: &ast::MacroCall) -> bool {
let sa = match self.analyze(macro_call.syntax()) {
Some(it) => it,
None => return false,
};
let macro_call = self.find_file(macro_call.syntax()).with_value(macro_call);
sa.is_unsafe_macro_call(self.db, macro_call)
}
fn resolve_attr_macro_call(&self, item: &ast::Item) -> Option<Macro> {
let item_in_file = self.wrap_node_infile(item.clone());
let id = self.with_ctx(|ctx| {
let macro_call_id = ctx.item_to_macro_call(item_in_file)?;
macro_call_to_macro_id(ctx, self.db.upcast(), macro_call_id)
})?;
Some(Macro { id })
}
fn resolve_path(&self, path: &ast::Path) -> Option<PathResolution> {
self.analyze(path.syntax())?.resolve_path(self.db, path)
}
fn resolve_extern_crate(&self, extern_crate: &ast::ExternCrate) -> Option<Crate> {
let krate = self.scope(extern_crate.syntax())?.krate();
let name = extern_crate.name_ref()?.as_name();
if name == known::SELF_PARAM {
return Some(krate);
}
krate
.dependencies(self.db)
.into_iter()
.find_map(|dep| (dep.name == name).then(|| dep.krate))
}
fn resolve_variant(&self, record_lit: ast::RecordExpr) -> Option<VariantId> {
self.analyze(record_lit.syntax())?.resolve_variant(self.db, record_lit)
}
fn resolve_bind_pat_to_const(&self, pat: &ast::IdentPat) -> Option<ModuleDef> {
self.analyze(pat.syntax())?.resolve_bind_pat_to_const(self.db, pat)
}
fn record_literal_missing_fields(&self, literal: &ast::RecordExpr) -> Vec<(Field, Type)> {
self.analyze(literal.syntax())
.and_then(|it| it.record_literal_missing_fields(self.db, literal))
.unwrap_or_default()
}
fn record_pattern_missing_fields(&self, pattern: &ast::RecordPat) -> Vec<(Field, Type)> {
self.analyze(pattern.syntax())
.and_then(|it| it.record_pattern_missing_fields(self.db, pattern))
.unwrap_or_default()
}
fn with_ctx<F: FnOnce(&mut SourceToDefCtx) -> T, T>(&self, f: F) -> T {
let mut cache = self.s2d_cache.borrow_mut();
let mut ctx = SourceToDefCtx { db: self.db, cache: &mut *cache };
f(&mut ctx)
}
fn to_module_def(&self, file: FileId) -> impl Iterator<Item = Module> {
self.with_ctx(|ctx| ctx.file_to_def(file)).into_iter().map(Module::from)
}
fn scope(&self, node: &SyntaxNode) -> Option<SemanticsScope<'db>> {
self.analyze_no_infer(node).map(|SourceAnalyzer { file_id, resolver, .. }| SemanticsScope {
db: self.db,
file_id,
resolver,
})
}
fn scope_at_offset(&self, node: &SyntaxNode, offset: TextSize) -> Option<SemanticsScope<'db>> {
self.analyze_with_offset_no_infer(node, offset).map(
|SourceAnalyzer { file_id, resolver, .. }| SemanticsScope {
db: self.db,
file_id,
resolver,
},
)
}
fn scope_for_def(&self, def: Trait) -> SemanticsScope<'db> {
let file_id = self.db.lookup_intern_trait(def.id).id.file_id();
let resolver = def.id.resolver(self.db.upcast());
SemanticsScope { db: self.db, file_id, resolver }
}
fn source<Def: HasSource>(&self, def: Def) -> Option<InFile<Def::Ast>>
where
Def::Ast: AstNode,
{
let res = def.source(self.db)?;
self.cache(find_root(res.value.syntax()), res.file_id);
Some(res)
}
/// Returns none if the file of the node is not part of a crate.
fn analyze(&self, node: &SyntaxNode) -> Option<SourceAnalyzer> {
self.analyze_impl(node, None, true)
}
/// Returns none if the file of the node is not part of a crate.
fn analyze_no_infer(&self, node: &SyntaxNode) -> Option<SourceAnalyzer> {
self.analyze_impl(node, None, false)
}
fn analyze_with_offset_no_infer(
&self,
node: &SyntaxNode,
offset: TextSize,
) -> Option<SourceAnalyzer> {
self.analyze_impl(node, Some(offset), false)
}
fn analyze_impl(
&self,
node: &SyntaxNode,
offset: Option<TextSize>,
infer_body: bool,
) -> Option<SourceAnalyzer> {
let _p = profile::span("Semantics::analyze_impl");
let node = self.find_file(node);
let container = match self.with_ctx(|ctx| ctx.find_container(node)) {
Some(it) => it,
None => return None,
};
let resolver = match container {
ChildContainer::DefWithBodyId(def) => {
return Some(if infer_body {
SourceAnalyzer::new_for_body(self.db, def, node, offset)
} else {
SourceAnalyzer::new_for_body_no_infer(self.db, def, node, offset)
})
}
ChildContainer::TraitId(it) => it.resolver(self.db.upcast()),
ChildContainer::ImplId(it) => it.resolver(self.db.upcast()),
ChildContainer::ModuleId(it) => it.resolver(self.db.upcast()),
ChildContainer::EnumId(it) => it.resolver(self.db.upcast()),
ChildContainer::VariantId(it) => it.resolver(self.db.upcast()),
ChildContainer::TypeAliasId(it) => it.resolver(self.db.upcast()),
ChildContainer::GenericDefId(it) => it.resolver(self.db.upcast()),
};
Some(SourceAnalyzer::new_for_resolver(resolver, node))
}
fn cache(&self, root_node: SyntaxNode, file_id: HirFileId) {
assert!(root_node.parent().is_none());
let mut cache = self.cache.borrow_mut();
let prev = cache.insert(root_node, file_id);
assert!(prev == None || prev == Some(file_id))
}
fn assert_contains_node(&self, node: &SyntaxNode) {
self.find_file(node);
}
fn lookup(&self, root_node: &SyntaxNode) -> Option<HirFileId> {
let cache = self.cache.borrow();
cache.get(root_node).copied()
}
fn wrap_node_infile<N: AstNode>(&self, node: N) -> InFile<N> {
let InFile { file_id, .. } = self.find_file(node.syntax());
InFile::new(file_id, node)
}
/// Wraps the node in a [`InFile`] with the file id it belongs to.
fn find_file<'node>(&self, node: &'node SyntaxNode) -> InFile<&'node SyntaxNode> {
let root_node = find_root(node);
let file_id = self.lookup(&root_node).unwrap_or_else(|| {
panic!(
"\n\nFailed to lookup {:?} in this Semantics.\n\
Make sure to use only query nodes, derived from this instance of Semantics.\n\
root node: {:?}\n\
known nodes: {}\n\n",
node,
root_node,
self.cache
.borrow()
.keys()
.map(|it| format!("{:?}", it))
.collect::<Vec<_>>()
.join(", ")
)
});
InFile::new(file_id, node)
}
fn is_unsafe_method_call(&self, method_call_expr: &ast::MethodCallExpr) -> bool {
method_call_expr
.receiver()
.and_then(|expr| {
let field_expr = match expr {
ast::Expr::FieldExpr(field_expr) => field_expr,
_ => return None,
};
let ty = self.type_of_expr(&field_expr.expr()?)?.original;
if !ty.is_packed(self.db) {
return None;
}
let func = self.resolve_method_call(method_call_expr).map(Function::from)?;
let res = match func.self_param(self.db)?.access(self.db) {
Access::Shared | Access::Exclusive => true,
Access::Owned => false,
};
Some(res)
})
.unwrap_or(false)
}
fn is_unsafe_ref_expr(&self, ref_expr: &ast::RefExpr) -> bool {
ref_expr
.expr()
.and_then(|expr| {
let field_expr = match expr {
ast::Expr::FieldExpr(field_expr) => field_expr,
_ => return None,
};
let expr = field_expr.expr()?;
self.type_of_expr(&expr)
})
// Binding a reference to a packed type is possibly unsafe.
.map(|ty| ty.original.is_packed(self.db))
.unwrap_or(false)
// FIXME This needs layout computation to be correct. It will highlight
// more than it should with the current implementation.
}
fn is_unsafe_ident_pat(&self, ident_pat: &ast::IdentPat) -> bool {
if ident_pat.ref_token().is_none() {
return false;
}
ident_pat
.syntax()
.parent()
.and_then(|parent| {
// `IdentPat` can live under `RecordPat` directly under `RecordPatField` or
// `RecordPatFieldList`. `RecordPatField` also lives under `RecordPatFieldList`,
// so this tries to lookup the `IdentPat` anywhere along that structure to the
// `RecordPat` so we can get the containing type.
let record_pat = ast::RecordPatField::cast(parent.clone())
.and_then(|record_pat| record_pat.syntax().parent())
.or_else(|| Some(parent.clone()))
.and_then(|parent| {
ast::RecordPatFieldList::cast(parent)?
.syntax()
.parent()
.and_then(ast::RecordPat::cast)
});
// If this doesn't match a `RecordPat`, fallback to a `LetStmt` to see if
// this is initialized from a `FieldExpr`.
if let Some(record_pat) = record_pat {
self.type_of_pat(&ast::Pat::RecordPat(record_pat))
} else if let Some(let_stmt) = ast::LetStmt::cast(parent) {
let field_expr = match let_stmt.initializer()? {
ast::Expr::FieldExpr(field_expr) => field_expr,
_ => return None,
};
self.type_of_expr(&field_expr.expr()?)
} else {
None
}
})
// Binding a reference to a packed type is possibly unsafe.
.map(|ty| ty.original.is_packed(self.db))
.unwrap_or(false)
}
}
fn macro_call_to_macro_id(
ctx: &mut SourceToDefCtx,
db: &dyn AstDatabase,
macro_call_id: MacroCallId,
) -> Option<MacroId> {
let loc = db.lookup_intern_macro_call(macro_call_id);
match loc.def.kind {
hir_expand::MacroDefKind::Declarative(it)
| hir_expand::MacroDefKind::BuiltIn(_, it)
| hir_expand::MacroDefKind::BuiltInAttr(_, it)
| hir_expand::MacroDefKind::BuiltInDerive(_, it)
| hir_expand::MacroDefKind::BuiltInEager(_, it) => {
ctx.macro_to_def(InFile::new(it.file_id, it.to_node(db)))
}
hir_expand::MacroDefKind::ProcMacro(_, _, it) => {
ctx.proc_macro_to_def(InFile::new(it.file_id, it.to_node(db)))
}
}
}
pub trait ToDef: AstNode + Clone {
type Def;
fn to_def(sema: &SemanticsImpl, src: InFile<Self>) -> Option<Self::Def>;
}
macro_rules! to_def_impls {
($(($def:path, $ast:path, $meth:ident)),* ,) => {$(
impl ToDef for $ast {
type Def = $def;
fn to_def(sema: &SemanticsImpl, src: InFile<Self>) -> Option<Self::Def> {
sema.with_ctx(|ctx| ctx.$meth(src)).map(<$def>::from)
}
}
)*}
}
to_def_impls![
(crate::Module, ast::Module, module_to_def),
(crate::Module, ast::SourceFile, source_file_to_def),
(crate::Struct, ast::Struct, struct_to_def),
(crate::Enum, ast::Enum, enum_to_def),
(crate::Union, ast::Union, union_to_def),
(crate::Trait, ast::Trait, trait_to_def),
(crate::Impl, ast::Impl, impl_to_def),
(crate::TypeAlias, ast::TypeAlias, type_alias_to_def),
(crate::Const, ast::Const, const_to_def),
(crate::Static, ast::Static, static_to_def),
(crate::Function, ast::Fn, fn_to_def),
(crate::Field, ast::RecordField, record_field_to_def),
(crate::Field, ast::TupleField, tuple_field_to_def),
(crate::Variant, ast::Variant, enum_variant_to_def),
(crate::TypeParam, ast::TypeParam, type_param_to_def),
(crate::LifetimeParam, ast::LifetimeParam, lifetime_param_to_def),
(crate::ConstParam, ast::ConstParam, const_param_to_def),
(crate::GenericParam, ast::GenericParam, generic_param_to_def),
(crate::Macro, ast::Macro, macro_to_def),
(crate::Local, ast::IdentPat, bind_pat_to_def),
(crate::Local, ast::SelfParam, self_param_to_def),
(crate::Label, ast::Label, label_to_def),
(crate::Adt, ast::Adt, adt_to_def),
];
fn find_root(node: &SyntaxNode) -> SyntaxNode {
node.ancestors().last().unwrap()
}
/// `SemanticScope` encapsulates the notion of a scope (the set of visible
/// names) at a particular program point.
///
/// It is a bit tricky, as scopes do not really exist inside the compiler.
/// Rather, the compiler directly computes for each reference the definition it
/// refers to. It might transiently compute the explicit scope map while doing
/// so, but, generally, this is not something left after the analysis.
///
/// However, we do very much need explicit scopes for IDE purposes --
/// completion, at its core, lists the contents of the current scope. The notion
/// of scope is also useful to answer questions like "what would be the meaning
/// of this piece of code if we inserted it into this position?".
///
/// So `SemanticsScope` is constructed from a specific program point (a syntax
/// node or just a raw offset) and provides access to the set of visible names
/// on a somewhat best-effort basis.
///
/// Note that if you are wondering "what does this specific existing name mean?",
/// you'd better use the `resolve_` family of methods.
#[derive(Debug)]
pub struct SemanticsScope<'a> {
pub db: &'a dyn HirDatabase,
file_id: HirFileId,
resolver: Resolver,
}
impl<'a> SemanticsScope<'a> {
pub fn module(&self) -> Module {
Module { id: self.resolver.module() }
}
pub fn krate(&self) -> Crate {
Crate { id: self.resolver.krate() }
}
pub(crate) fn resolver(&self) -> &Resolver {
&self.resolver
}
/// Note: `VisibleTraits` should be treated as an opaque type, passed into `Type
pub fn visible_traits(&self) -> VisibleTraits {
let resolver = &self.resolver;
VisibleTraits(resolver.traits_in_scope(self.db.upcast()))
}
pub fn process_all_names(&self, f: &mut dyn FnMut(Name, ScopeDef)) {
let scope = self.resolver.names_in_scope(self.db.upcast());
for (name, entries) in scope {
for entry in entries {
let def = match entry {
resolver::ScopeDef::ModuleDef(it) => ScopeDef::ModuleDef(it.into()),
resolver::ScopeDef::Unknown => ScopeDef::Unknown,
resolver::ScopeDef::ImplSelfType(it) => ScopeDef::ImplSelfType(it.into()),
resolver::ScopeDef::AdtSelfType(it) => ScopeDef::AdtSelfType(it.into()),
resolver::ScopeDef::GenericParam(id) => ScopeDef::GenericParam(id.into()),
resolver::ScopeDef::Local(pat_id) => match self.resolver.body_owner() {
Some(parent) => ScopeDef::Local(Local { parent, pat_id }),
None => continue,
},
resolver::ScopeDef::Label(label_id) => match self.resolver.body_owner() {
Some(parent) => ScopeDef::Label(Label { parent, label_id }),
None => continue,
},
};
f(name.clone(), def)
}
}
}
/// Resolve a path as-if it was written at the given scope. This is
/// necessary a heuristic, as it doesn't take hygiene into account.
pub fn speculative_resolve(&self, path: &ast::Path) -> Option<PathResolution> {
let ctx = body::LowerCtx::new(self.db.upcast(), self.file_id);
let path = Path::from_src(path.clone(), &ctx)?;
resolve_hir_path(self.db, &self.resolver, &path)
}
/// Iterates over associated types that may be specified after the given path (using
/// `Ty::Assoc` syntax).
pub fn assoc_type_shorthand_candidates<R>(
&self,
resolution: &PathResolution,
mut cb: impl FnMut(&Name, TypeAlias) -> Option<R>,
) -> Option<R> {
let def = self.resolver.generic_def()?;
hir_ty::associated_type_shorthand_candidates(
self.db,
def,
resolution.in_type_ns()?,
|name, _, id| cb(name, id.into()),
)
}
}
pub struct VisibleTraits(pub FxHashSet<TraitId>);
| 37.868003 | 117 | 0.574541 |
b9eebbe0f9f4f863a13fe24b8ef54b3b545cb810 | 6,501 | //! This crate contains implementation of logging interface.
#![feature(cell_update)]
#![deny(unconditional_recursion)]
#![warn(missing_copy_implementations)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![warn(trivial_casts)]
#![warn(trivial_numeric_casts)]
#![warn(unsafe_code)]
#![warn(unused_import_braces)]
pub mod disabled;
pub mod enabled;
use enso_prelude::*;
// ==============
// === Message ===
// ==============
/// Message that can be logged.
pub trait Message {
/// Turns message into `&str` and passes it to input function.
fn with<T,F:FnOnce(&str)->T>(&self, f:F) -> T;
}
impl Message for &str {
fn with<T,F:FnOnce(&str)->T>(&self, f:F) -> T {
f(self)
}
}
impl<G:Fn()->S, S:AsRef<str>> Message for G {
fn with<T,F:FnOnce(&str)->T>(&self, f:F) -> T {
f(self().as_ref())
}
}
// =================
// === AnyLogger ===
// =================
/// Interface common to all loggers.
pub trait AnyLogger {
/// Owned type of the logger.
type Owned;
/// Creates a new logger. Path should be a unique identifier for this logger.
fn new(path:impl Into<ImString>) -> Self::Owned;
/// Path that is used as an unique identifier of this logger.
fn path(&self) -> &str;
/// Creates a new logger with this logger as a parent.
fn sub(logger:impl AnyLogger, path:impl Into<ImString>) -> Self::Owned {
let path = path.into();
let super_path = logger.path();
if super_path.is_empty() { Self::new(path) }
else { Self::new(iformat!("{super_path}.{path}")) }
}
/// Creates a logger from AnyLogger.
fn from_logger(logger:impl AnyLogger) -> Self::Owned {
Self::new(logger.path())
}
/// Evaluates function `f` and visually groups all logs will occur during its execution.
fn group<T,F:FnOnce() -> T>(&self, msg:impl Message, f:F) -> T {
self.group_begin(msg);
let out = f();
self.group_end();
out
}
/// Log with stacktrace and info level verbosity.
fn trace(&self, _msg:impl Message) {}
/// Log with debug level verbosity
fn debug(&self, _msg:impl Message) {}
/// Log with info level verbosity.
fn info(&self, _msg:impl Message) {}
/// Log with warning level verbosity.
fn warning(&self, _msg:impl Message) {}
/// Log with error level verbosity.
fn error(&self, _msg:impl Message) {}
/// Visually groups all logs between group_begin and group_end.
fn group_begin(&self, _msg:impl Message) {}
/// Visually groups all logs between group_begin and group_end.
fn group_end(&self) {}
/// Start tracing all copies of this logger. See `TraceCopies` docs for details.
fn trace_copies(&self) {}
}
impl<T:AnyLogger> AnyLogger for &T {
type Owned = T::Owned;
fn path (&self) -> &str { T::path(self) }
fn new (path:impl Into<ImString>) -> Self::Owned { T::new(path) }
fn trace (&self, msg:impl Message) { T::trace (self,msg) }
fn debug (&self, msg:impl Message) { T::debug (self,msg) }
fn info (&self, msg:impl Message) { T::info (self,msg) }
fn warning (&self, msg:impl Message) { T::warning (self,msg) }
fn error (&self, msg:impl Message) { T::error (self,msg) }
fn group_begin (&self, msg:impl Message) { T::group_begin (self,msg) }
fn group_end (&self) { T::group_end (self) }
fn trace_copies(&self) { T::trace_copies(self) }
}
// ==============
// === Macros ===
// ==============
/// Shortcut for `|| format!(..)`.
#[macro_export]
macro_rules! fmt {
($($arg:tt)*) => (||(format!($($arg)*)))
}
/// Evaluates expression and visually groups all logs will occur during its execution.
#[macro_export]
macro_rules! group {
($logger:expr, $message:tt, {$($body:tt)*}) => {{
let __logger = $logger.clone();
__logger.group_begin(|| iformat!{$message});
let out = {$($body)*};
__logger.group_end();
out
}};
}
/// Logs a message on on given level.
#[macro_export]
macro_rules! log_template {
($method:ident $logger:expr, $message:tt $($rest:tt)*) => {
$crate::log_template_impl! {$method $logger, iformat!($message) $($rest)*}
};
}
/// Logs a message on on given level.
#[macro_export]
macro_rules! log_template_impl {
($method:ident $logger:expr, $expr:expr) => {{
$logger.$method(|| $expr);
}};
($method:ident $logger:expr, $expr:expr, $body:tt) => {{
let __logger = $logger.clone();
__logger.group_begin(|| $expr);
let out = $body;
__logger.group_end();
out
}};
}
/// Logs an internal error with descriptive message.
#[macro_export]
macro_rules! with_internal_bug_message { ($f:ident $($args:tt)*) => { $crate::$f! {
"This is a bug. Please report it and and provide us with as much information as \
possible at https://github.com/luna/enso/issues. Thank you!"
$($args)*
}};}
/// Logs an internal error.
#[macro_export]
macro_rules! log_internal_bug_template {
($($toks:tt)*) => {
$crate::with_internal_bug_message! { log_internal_bug_template_impl $($toks)* }
};
}
/// Logs an internal error.
#[macro_export]
macro_rules! log_internal_bug_template_impl {
($note:tt $method:ident $logger:expr, $message:tt $($rest:tt)*) => {
$crate::log_template_impl! {$method $logger,
format!("Internal Error. {}\n\n{}",iformat!($message),$note) $($rest)*
}
};
}
/// Log with stacktrace and level:info.
#[macro_export]
macro_rules! trace {
($($toks:tt)*) => {
$crate::log_template! {trace $($toks)*}
};
}
/// Log with level:debug
#[macro_export]
macro_rules! debug {
($($toks:tt)*) => {
$crate::log_template! {debug $($toks)*}
};
}
/// Log with level:info.
#[macro_export]
macro_rules! info {
($($toks:tt)*) => {
$crate::log_template! {info $($toks)*}
};
}
/// Log with level:warning.
#[macro_export]
macro_rules! warning {
($($toks:tt)*) => {
$crate::log_template! {warning $($toks)*}
};
}
/// Log with level:error.
#[macro_export]
macro_rules! error {
($($toks:tt)*) => {
$crate::log_template! {error $($toks)*}
};
}
/// Logs an internal warning.
#[macro_export]
macro_rules! internal_warning {
($($toks:tt)*) => {
$crate::log_internal_bug_template! {warning $($toks)*}
};
}
| 27.0875 | 92 | 0.583295 |
892536e215f4561ade6a504710dd7e6614bfd116 | 206 | pub use wrapped::WrappedRcRefCell;
pub mod arraydef;
pub mod arrayparser;
pub mod env;
pub mod error;
pub mod fsutils;
pub mod parser;
pub mod serverdir;
pub mod setup;
pub mod timeutils;
pub mod wrapped;
| 15.846154 | 34 | 0.771845 |
de8dca97a098f5d0425fc4cce6a47b1e0d7ee4d7 | 2,598 | #![allow(unknown_lints)]
#![allow(unused_doc_comments)]
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(
feature = "allocator",
feature(alloc_system, global_allocator, allocator_api)
)]
#[cfg(feature = "allocator")]
extern crate alloc_system;
#[cfg(feature = "allocator")]
use alloc_system::System;
#[cfg(feature = "allocator")]
#[global_allocator]
static A: System = System;
extern crate amy;
extern crate base32;
extern crate base64;
extern crate bincode;
extern crate byteorder;
extern crate chrono;
#[macro_use]
extern crate error_chain;
extern crate fnv;
extern crate fs_extra;
extern crate getopts;
extern crate http_range;
extern crate httparse;
#[macro_use]
extern crate lazy_static;
extern crate metrohash;
extern crate net2;
extern crate nix;
extern crate num_bigint;
extern crate openssl;
extern crate rand;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate shellexpand;
extern crate toml;
extern crate url;
extern crate adns;
extern crate synapse_bencode as bencode;
extern crate synapse_protocol as protocol;
extern crate synapse_rpc as rpc_lib;
extern crate synapse_session as session;
#[macro_use]
mod log;
mod args;
mod buffers;
mod config;
mod control;
mod disk;
mod handle;
mod init;
mod listener;
mod rpc;
mod socket;
mod stat;
mod throttle;
mod torrent;
mod tracker;
mod util;
// We need to do this for the log macros
use log::LogLevel;
use std::process;
use std::sync::atomic;
pub use protocol::DHT_EXT;
pub use protocol::EXT_PROTO;
pub use protocol::UT_META_ID;
pub use protocol::UT_PEX_ID;
/// Throttler max token amount
pub const THROT_TOKS: usize = 2 * 1024 * 1024;
pub static SHUTDOWN: atomic::AtomicBool = atomic::AtomicBool::new(false);
lazy_static! {
pub static ref CONFIG: config::Config = { config::Config::load() };
pub static ref PEER_ID: [u8; 20] = {
use rand::{self, Rng};
let mut pid = [0u8; 20];
let prefix = b"-SY0010-";
pid[..prefix.len()].clone_from_slice(&prefix[..]);
let mut rng = rand::thread_rng();
for i in prefix.len()..20 {
pid[i] = rng.gen::<u8>();
}
pid
};
pub static ref DL_TOKEN: String = { util::random_string(20) };
}
fn main() {
let args = args::args();
match init::init(args) {
Ok(()) => {}
Err(()) => {
error!("Failed to initialize synapse!");
process::exit(1);
}
}
info!("Initialized, starting!");
match init::run() {
Ok(()) => process::exit(0),
Err(()) => process::exit(1),
}
}
| 22.205128 | 73 | 0.670901 |
69fb0199711a87cce1956755cab453169bb38f5d | 433 | use rltk::prelude::*;
use std::sync::Mutex;
lazy_static! {
static ref RNG: Mutex<RandomNumberGenerator> = Mutex::new(RandomNumberGenerator::new());
}
pub fn reseed(seed: u64) {
*RNG.lock().unwrap() = RandomNumberGenerator::seeded(seed);
}
pub fn roll_dice(n: i32, die_type: i32) -> i32 {
RNG.lock().unwrap().roll_dice(n, die_type)
}
pub fn range(min: i32, max: i32) -> i32 {
RNG.lock().unwrap().range(min, max)
}
| 22.789474 | 92 | 0.655889 |
672de59642db2426a375bf9414b0048e42135d69 | 342 | // Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
//
// A test that checks various simplifications
pub fn main() {
foo(true);
}
fn foo(b: bool) {
debug_assert!(b || !b);
debug_assert!(!b || b);
}
| 20.117647 | 66 | 0.657895 |
e6546c9c699052e3f88a24bc7082d4a08a396213 | 106 | use smoked_meats::real_main;
use hotham::HothamResult;
fn main() -> HothamResult<()> {
real_main()
}
| 15.142857 | 31 | 0.679245 |
6743e54554f0a05cb31178408a151da3659d267c | 8,304 | use clap::{App, AppSettings, Arg, Shell, SubCommand};
pub fn build_cli() -> App<'static, 'static> {
let database_arg = Arg::with_name("DATABASE_URL")
.long("database-url")
.help(
"Specifies the database URL to connect to. Falls back to \
the DATABASE_URL environment variable if unspecified.",
)
.global(true)
.takes_value(true);
let migration_subcommand = SubCommand::with_name("migration")
.about(
"A group of commands for generating, running, and reverting \
migrations.",
)
.setting(AppSettings::VersionlessSubcommands)
.arg(migration_dir_arg())
.subcommand(SubCommand::with_name("run").about("Runs all pending migrations"))
.subcommand(SubCommand::with_name("revert").about("Reverts the latest run migration"))
.subcommand(SubCommand::with_name("redo").about(
"Reverts and re-runs the latest migration. Useful \
for testing that a migration can in fact be reverted.",
))
.subcommand(
SubCommand::with_name("list")
.about("Lists all available migrations, marking those that have been applied."),
)
.subcommand(
SubCommand::with_name("pending")
.about("Returns true if there are any pending migrations."),
)
.subcommand(
SubCommand::with_name("generate")
.about(
"Generate a new migration with the given name, and \
the current timestamp as the version",
)
.arg(
Arg::with_name("MIGRATION_NAME")
.help("The name of the migration to create")
.required(true),
)
.arg(
Arg::with_name("MIGRATION_VERSION")
.long("version")
.help(
"The version number to use when generating the migration. \
Defaults to the current timestamp, which should suffice \
for most use cases.",
)
.takes_value(true),
)
.arg(
Arg::with_name("MIGRATION_FORMAT")
.long("format")
.possible_values(&["sql", "barrel"])
.default_value("sql")
.takes_value(true)
.help("The format of the migration to be generated."),
),
)
.setting(AppSettings::SubcommandRequiredElseHelp);
let setup_subcommand = SubCommand::with_name("setup")
.arg(migration_dir_arg())
.about(
"Creates the migrations directory, creates the database \
specified in your DATABASE_URL, and runs existing migrations.",
);
let database_subcommand = SubCommand::with_name("database")
.alias("db")
.arg(migration_dir_arg())
.about("A group of commands for setting up and resetting your database.")
.setting(AppSettings::VersionlessSubcommands)
.subcommand(SubCommand::with_name("setup").about(
"Creates the database specified in your DATABASE_URL, \
and then runs any existing migrations.",
))
.subcommand(SubCommand::with_name("reset").about(
"Resets your database by dropping the database specified \
in your DATABASE_URL and then running `diesel database setup`.",
))
.subcommand(
SubCommand::with_name("drop")
.about("Drops the database specified in your DATABASE_URL.")
.setting(AppSettings::Hidden),
)
.setting(AppSettings::SubcommandRequiredElseHelp);
let generate_bash_completion_subcommand = SubCommand::with_name("bash-completion")
.about("DEPRECATED: Generate bash completion script for the diesel command.");
let generate_completions_subcommand = SubCommand::with_name("completions")
.about("Generate shell completion scripts for the diesel command.")
.arg(
Arg::with_name("SHELL")
.index(1)
.required(true)
.possible_values(&Shell::variants()),
);
let infer_schema_subcommand = SubCommand::with_name("print-schema")
.setting(AppSettings::VersionlessSubcommands)
.about("Print table definitions for database schema.")
.arg(
Arg::with_name("schema")
.long("schema")
.short("s")
.takes_value(true)
.help("The name of the schema."),
)
.arg(
Arg::with_name("table-name")
.index(1)
.takes_value(true)
.multiple(true)
.help("Table names to filter (default only-tables if not empty)"),
)
.arg(
Arg::with_name("only-tables")
.short("o")
.long("only-tables")
.help("Only include tables from table-name that matches regexp")
.conflicts_with("except-tables"),
)
.arg(
Arg::with_name("except-tables")
.short("e")
.long("except-tables")
.help("Exclude tables from table-name that matches regex")
.conflicts_with("only-tables"),
)
.arg(
Arg::with_name("with-docs")
.long("with-docs")
.help("Render documentation comments for tables and columns"),
)
.arg(
Arg::with_name("patch-file")
.long("patch-file")
.takes_value(true)
.help("A unified diff file to be applied to the final schema"),
)
.arg(
Arg::with_name("import-types")
.long("import-types")
.takes_value(true)
.multiple(true)
.number_of_values(1)
.help("A list of types to import for every table, separated by commas"),
);
let config_arg = Arg::with_name("CONFIG_FILE")
.long("config-file")
.help(
"The location of the configuration file to use. Falls back to the \
`DIESEL_CONFIG_FILE` environment variable if unspecified. Defaults \
to `diesel.toml` in your project root. See \
diesel.rs/guides/configuring-diesel-cli for documentation on this file.",
)
.global(true)
.takes_value(true);
let locked_schema_arg = Arg::with_name("LOCKED_SCHEMA")
.long("locked-schema")
.help("Require that the schema file is up to date")
.long_help(
"When `print_schema.file` is specified in your config file, this \
flag will cause Diesel CLI to error if any command would result in \
changes to that file. It is recommended that you use this flag when \
running migrations in CI or production.",
)
.global(true);
App::new("diesel")
.version(env!("CARGO_PKG_VERSION"))
.setting(AppSettings::VersionlessSubcommands)
.after_help(
"You can also run `diesel SUBCOMMAND -h` to get more information about that subcommand.",
)
.arg(database_arg)
.arg(config_arg)
.arg(locked_schema_arg)
.subcommand(migration_subcommand)
.subcommand(setup_subcommand)
.subcommand(database_subcommand)
.subcommand(generate_bash_completion_subcommand)
.subcommand(generate_completions_subcommand)
.subcommand(infer_schema_subcommand)
.setting(AppSettings::SubcommandRequiredElseHelp)
}
fn migration_dir_arg<'a, 'b>() -> Arg<'a, 'b> {
Arg::with_name("MIGRATION_DIRECTORY")
.long("migration-dir")
.help(
"The location of your migration directory. By default this \
will look for a directory called `migrations` in the \
current directory and its parents.",
)
.takes_value(true)
.global(true)
}
| 40.115942 | 101 | 0.552987 |
dd55dbeeaab184e8441a8a511c7858eb267dd343 | 1,838 | use crate::{
bindings,
bsonc::Bsonc,
error::{BsoncError, Result},
};
use std::ptr;
#[derive(Debug)]
pub struct ChangeStreamc {
inner: *mut bindings::mongoc_change_stream_t,
}
pub trait ChangeStream {}
impl ChangeStreamc {
pub fn from_ptr(inner: *mut bindings::mongoc_change_stream_t) -> Self {
ChangeStreamc { inner }
}
pub fn get_error(&self) -> Option<BsoncError> {
assert!(!self.inner.is_null(), "change stream ptr null");
let mut error = BsoncError::empty();
let reply = Bsonc::empty();
let has_c_error = unsafe {
bindings::mongoc_change_stream_error_document(
self.inner,
error.as_mut_ptr(),
&mut reply.as_ptr(),
)
};
if !has_c_error {
return None;
}
if error.is_empty() {
None
} else {
Some(error)
}
}
}
impl ChangeStream for ChangeStreamc {}
impl Iterator for ChangeStreamc {
type Item = Result<bson::Document>;
fn next(&mut self) -> Option<Self::Item> {
let mut bson_ptr: *const bindings::bson_t = ptr::null_mut();
let success = unsafe { bindings::mongoc_change_stream_next(self.inner, &mut bson_ptr) };
if let Some(err) = self.get_error() {
Some(Err(err.into()))
} else if success {
dbg!("success?");
let bsonc = Bsonc::from_ptr(bson_ptr);
Some(bsonc.as_document())
} else {
dbg!("none?");
None
}
}
}
impl Drop for ChangeStreamc {
fn drop(&mut self) {
if !self.inner.is_null() {
unsafe {
bindings::mongoc_change_stream_destroy(self.inner);
};
self.inner = ptr::null_mut();
}
}
}
| 23.265823 | 96 | 0.538629 |
eb4401e1835a25230b394cfc918eda5270369e1c | 1,410 | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// The rust-book JavaScript in string form.
pub static JAVASCRIPT: &'static str = r#"
<script type="text/javascript">
document.addEventListener("DOMContentLoaded", function(event) {
document.getElementById("toggle-nav").onclick = toggleNav;
function toggleNav() {
var toc = document.getElementById("toc");
var pagewrapper = document.getElementById("page-wrapper");
toggleClass(toc, "mobile-hidden");
toggleClass(pagewrapper, "mobile-hidden");
};
function toggleClass(el, className) {
// from http://youmightnotneedjquery.com/
if (el.classList) {
el.classList.toggle(className);
} else {
var classes = el.className.split(' ');
var existingIndex = classes.indexOf(className);
if (existingIndex >= 0) {
classes.splice(existingIndex, 1);
} else {
classes.push(className);
}
el.className = classes.join(' ');
}
}
});
</script>
"#;
| 32.045455 | 68 | 0.676596 |
16ff9816a7ad66886c896c540ba89c7719e53a6e | 5,571 | use std::usize;
use text_block_layout::Block;
struct Item {
description: String,
unit_price: f64,
quantity: u32,
}
impl Item {
fn new(description: &str, unit_price: f64, quantity: u32) -> Item {
Item {
description: description.into(),
unit_price,
quantity,
}
}
fn ammount(&self) -> f64 {
self.unit_price * self.quantity as f64
}
}
struct Invoice {
date: String,
invoice_no: String,
company_name: String,
company_slogan: String,
company_addres: Vec<String>,
bill_to: Vec<String>,
ship_to: Vec<String>,
items: Vec<Item>,
tax_rate: f64,
}
impl Invoice {
fn subtotal(&self) -> f64 {
self.items
.iter()
.fold(0.0, |acc, item| acc + item.ammount())
}
fn sales_tax(&self) -> f64 {
self.tax_rate * self.subtotal()
}
fn total(&self) -> f64 {
self.sales_tax() + self.subtotal()
}
}
fn info_single(left_column: usize, title: &str, content_line: &str) -> Block {
info(left_column, title, &[content_line.to_string()])
}
fn info(left_column: usize, title: &str, content_lines: &[String]) -> Block {
let left = Block::of(title).pad_to_width_left(left_column);
let right = Block::empty().add_multiple_texts(content_lines);
left.pad_right(1).beside_top(&right)
}
fn money(value: f64, width: usize) -> Block {
Block::of(format!["$ {:.2}", value]).pad_to_width_left(width)
}
/// Create item specification with columns description (36), unit price (12),
/// quantity (10) and ammount (12)
fn item_line(item: &Item) -> Block {
let desc = Block::of(&item.description).pad_to_width_right(36);
let unit = money(item.unit_price, 12);
let quant = Block::of(item.quantity).pad_to_width_left(10);
let amnt = money(item.ammount(), 12);
desc.beside_top(&unit).beside_top(&quant).beside_top(&amnt)
}
fn create_text_invoice(i: &Invoice) -> Block {
let page_width: usize = 70;
let left_margin = 2;
let info_left_margin = 10;
let right_column = page_width - 32;
// Invoice top -------------------------------------------------------------
let company_info = Block::of(&i.company_name)
.add_text(&i.company_slogan)
.pad_bottom(1)
.add_multiple_texts(&i.company_addres);
let invoice_info = Block::of("INVOICE")
.pad_to_width_left(10)
.pad_bottom(1)
.stack_left(&info_single(info_left_margin, "DATE", &i.date))
.stack_left(&info_single(info_left_margin, "INVOICE #", &i.invoice_no));
let top = company_info
.pad_top(2)
.in_front_of(&invoice_info.pad_left(right_column));
// Customer addresses ------------------------------------------------------
let ship_address = info(info_left_margin, "BILL TO", &i.ship_to);
let bill_address = info(info_left_margin, "SHIP TO", &i.bill_to);
let addresses = bill_address.in_front_of(&ship_address.pad_left(right_column));
// Specification -----------------------------------------------------------
let hline = Block::of_height(1).fill_right(page_width, '─');
let item_header = Block::of("DESCRIPTION")
.pad_to_width_right(36)
.beside_top(&Block::of("UNIT PRICE").pad_to_width_left(12))
.beside_top(&Block::of("QUANTITY").pad_to_width_left(10))
.beside_top(&Block::of("AMMOUNT").pad_to_width_left(12));
let items = i
.items
.iter()
.fold(Block::empty(), |acc, item| acc.stack_left(&item_line(item)));
let spec = item_header
.stack_left(&hline)
.stack_left(&items)
.stack_left(&hline);
// Totals ------------------------------------------------------------------
let totals_width: usize = 22;
let totals_hline = Block::of_height(1).fill_right(totals_width, '─');
let totals_hline_thick = Block::of_height(1).fill_right(totals_width, '═');
let subtotals = Block::of("SUBTOTAL").beside_top(&money(i.subtotal(), 12));
let tax_rate = Block::of("TAX RATE")
.beside_top(&Block::of(format!("{:.00} %", i.tax_rate * 100.0)).pad_to_width_left(12));
let sales_tax = Block::of("SALES TAX").beside_top(&money(i.sales_tax(), 12));
let totals = Block::of("TOTAL").beside_top(&money(i.total(), 12));
let totals = subtotals
.stack_right(&totals_hline)
.stack_right(&tax_rate)
.stack_right(&totals_hline)
.stack_right(&sales_tax)
.stack_right(&totals_hline)
.stack_right(&totals)
.stack_right(&totals_hline_thick)
.pad_to_width_left(page_width);
// Composition -------------------------------------------------------------
top.pad_bottom(3)
.stack_left(&addresses)
.pad_bottom(3)
.stack_left(&spec)
.stack_left(&totals)
.pad_left(left_margin)
}
fn main() {
let invoice = Invoice {
date: "2020/01/01".into(),
invoice_no: "12345678".into(),
company_name: "Acme".into(),
company_slogan: "Where customers are billed".into(),
company_addres: vec!["Address".into(), "City, State ZIP".into()],
bill_to: vec!["Name".into(), "Address".into(), "City, State ZIP".into()],
ship_to: vec!["Name".into(), "Address".into(), "City, State ZIP".into()],
items: vec![
Item::new("Toilet paper, 13-pack", 3.95, 2_00),
Item::new("Coffee, medium ground, 3 lbs", 6.95, 4),
],
tax_rate: 0.08,
};
println!("{}", create_text_invoice(&invoice).to_string());
}
| 32.770588 | 95 | 0.583558 |
f40edb274073bf3723699f3700ce27d05cc5750f | 4,106 | use std::cell::Cell;
use flagset::FlagSet;
use crate::{
component::Component,
component_dirt::ComponentDirt,
core::{Core, Object, ObjectRef, OnAdded},
drawable::Drawable,
dyn_vec::DynVec,
math::Mat,
option_cell::OptionCell,
shapes::{Path, PathComposer, PathSpace, ShapePaintContainer},
transform_component::TransformComponent,
Renderer,
};
#[derive(Debug, Default)]
pub struct Shape {
drawable: Drawable,
shape_paint_container: ShapePaintContainer,
path_composer: OptionCell<Object<PathComposer>>,
paths: DynVec<Object<Path>>,
want_difference_path: Cell<bool>,
}
impl ObjectRef<'_, Shape> {
pub fn paths(&self) -> impl Iterator<Item = Object<Path>> + '_ {
self.paths.iter()
}
pub fn want_difference_path(&self) -> bool {
self.want_difference_path.get()
}
pub fn push_path(&self, path: Object<Path>) {
self.paths.push(path);
}
pub fn path_space(&self) -> FlagSet<PathSpace> {
self.cast::<ShapePaintContainer>().path_space()
}
pub fn path_changed(&self) {
self.path_composer
.get()
.expect("path_composer shoudl already be set on Shape")
.as_ref()
.cast::<Component>()
.add_dirt(ComponentDirt::Path, true);
self.cast::<ShapePaintContainer>()
.invalidate_stroke_effects();
}
pub fn path_composer(&self) -> Option<Object<PathComposer>> {
self.path_composer.get()
}
pub fn set_path_composer(&self, path_composer: Object<PathComposer>) {
self.path_composer.set(Some(path_composer));
}
pub fn draw(&self, renderer: &mut impl Renderer, transform: Mat) {
// todo!("clip");
let path_composer = self
.path_composer()
.expect("path_composer should already be set on Shape");
for shape_paint in self.cast::<ShapePaintContainer>().shape_paints() {
let shape_paint = shape_paint.as_ref();
if !shape_paint.is_visible() {
continue;
}
if shape_paint.path_space() & PathSpace::Local == PathSpace::Local {
let transform = transform * self.cast::<TransformComponent>().world_transform();
path_composer.as_ref().with_local_path(|path| {
shape_paint.draw(
renderer,
path.expect("local_path should already be set on PathComposer"),
transform,
);
});
} else {
path_composer.as_ref().with_world_path(|path| {
shape_paint.draw(
renderer,
path.expect("world_path should already be set on PathComposer"),
transform,
);
});
}
}
}
pub fn build_dependencies(&self) {
self.cast::<TransformComponent>().build_dependencies();
// Set the blend mode on all the shape paints. If we ever animate this
// property, we'll need to update it in the update cycle/mark dirty when the
// blend mode changes.
for paint in self.cast::<ShapePaintContainer>().shape_paints() {
paint
.as_ref()
.set_blend_mode(self.cast::<Drawable>().blend_mode());
}
}
pub fn update(&self, value: FlagSet<ComponentDirt>) {
self.cast::<TransformComponent>().update(value);
if Component::value_has_dirt(value, ComponentDirt::RenderOpacity) {
for paint in self.cast::<ShapePaintContainer>().shape_paints() {
paint
.as_ref()
.set_render_opacity(self.cast::<TransformComponent>().render_opacity());
}
}
}
}
impl Core for Shape {
parent_types![
(drawable, Drawable),
(shape_paint_container, ShapePaintContainer),
];
properties!(drawable);
}
impl OnAdded for ObjectRef<'_, Shape> {
on_added!(Drawable);
}
| 29.970803 | 96 | 0.572333 |
fc8e666b63eb37ab68024e97a0f25f59a8488c40 | 1,839 |
pub struct IconBedroomBaby {
props: crate::Props,
}
impl yew::Component for IconBedroomBaby {
type Properties = crate::Props;
type Message = ();
fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self
{
Self { props }
}
fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender
{
true
}
fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender
{
false
}
fn view(&self) -> yew::prelude::Html
{
yew::prelude::html! {
<svg
class=self.props.class.unwrap_or("")
width=self.props.size.unwrap_or(24).to_string()
height=self.props.size.unwrap_or(24).to_string()
viewBox="0 0 24 24"
fill=self.props.fill.unwrap_or("none")
stroke=self.props.color.unwrap_or("currentColor")
stroke-width=self.props.stroke_width.unwrap_or(2).to_string()
stroke-linecap=self.props.stroke_linecap.unwrap_or("round")
stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round")
>
<svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><path d="M0,0h24v24H0V0z" fill="none"/></g><g><path d="M17.94,14.04c-0.34,0.34-0.71,0.64-1.1,0.92L16,13.5V11h1v-1h-5.62L9.65,7H6l1,0.76L5.5,9.5l0.95,1L8,9.51v3.99l-0.84,1.46 c-0.39-0.27-0.76-0.58-1.1-0.92L5,15.1c1.87,1.87,4.36,2.9,7,2.9s5.13-1.03,7-2.9L17.94,14.04z M8.45,15.71l0.03-0.06l0.81-1.41 c1.74,0.65,3.66,0.65,5.4,0l0.81,1.41l0.03,0.06c-1.1,0.51-2.3,0.79-3.55,0.79S9.55,16.23,8.45,15.71z M20,4v16H4V4H20 M20,2H4 C2.9,2,2,2.9,2,4v16c0,1.1,0.9,2,2,2h16c1.1,0,2-0.9,2-2V4C22,2.9,21.1,2,20,2z"/></g></svg>
</svg>
}
}
}
| 39.978261 | 642 | 0.589451 |
2f1504b7a8b719c5c364cc0d4036f645098cf9a1 | 50,693 | use divrem::*;
use crate::arena::*;
use crate::arithmetic::*;
use crate::atom_table::*;
use crate::forms::*;
use crate::heap_iter::*;
use crate::machine::machine_errors::*;
use crate::machine::machine_state::*;
use crate::parser::ast::*;
use crate::parser::rug::{Integer, Rational};
use crate::types::*;
use crate::fixnum;
use ordered_float::*;
use std::cmp;
use std::convert::TryFrom;
use std::f64;
use std::mem;
#[macro_export]
macro_rules! try_numeric_result {
($e: expr, $stub_gen: expr) => {
match $e {
Ok(val) => Ok(val),
Err(e) => Err(Box::new(move |machine_st: &mut MachineState| {
let stub = $stub_gen();
let evaluation_error = machine_st.evaluation_error(e);
machine_st.error_form(evaluation_error, stub)
}) as Box<dyn Fn(&mut MachineState) -> MachineStub>),
}
};
}
macro_rules! drop_iter_on_err {
($self:expr, $iter: expr, $result: expr) => {
match $result {
Ok(val) => val,
Err(stub_gen) => {
std::mem::drop($iter);
return Err(stub_gen($self));
}
}
};
}
fn zero_divisor_eval_error(
stub_gen: impl Fn() -> FunctorStub + 'static,
) -> MachineStubGen {
Box::new(move |machine_st| {
let eval_error = machine_st.evaluation_error(EvalError::ZeroDivisor);
let stub = stub_gen();
machine_st.error_form(eval_error, stub)
})
}
fn undefined_eval_error(
stub_gen: impl Fn() -> FunctorStub + 'static,
) -> MachineStubGen {
Box::new(move |machine_st| {
let eval_error = machine_st.evaluation_error(EvalError::Undefined);
let stub = stub_gen();
machine_st.error_form(eval_error, stub)
})
}
fn numerical_type_error(
valid_type: ValidType,
n: Number,
stub_gen: impl Fn() -> FunctorStub + 'static,
) -> MachineStubGen {
Box::new(move |machine_st| {
let type_error = machine_st.type_error(valid_type, n);
let stub = stub_gen();
machine_st.error_form(type_error, stub)
})
}
fn isize_gcd(n1: isize, n2: isize) -> Option<isize> {
if n1 == 0 {
return n2.checked_abs().map(|n| n as isize);
}
if n2 == 0 {
return n1.checked_abs().map(|n| n as isize);
}
let n1 = n1.checked_abs();
let n2 = n2.checked_abs();
let mut n1 = if let Some(n1) = n1 { n1 } else { return None };
let mut n2 = if let Some(n2) = n2 { n2 } else { return None };
let mut shift = 0;
while ((n1 | n2) & 1) == 0 {
shift += 1;
n1 >>= 1;
n2 >>= 1;
}
while (n1 & 1) == 0 {
n1 >>= 1;
}
loop {
while (n2 & 1) == 0 {
n2 >>= 1;
}
if n1 > n2 {
let t = n2;
n2 = n1;
n1 = t;
}
n2 -= n1;
if n2 == 0 {
break;
}
}
Some(n1 << shift as isize)
}
pub(crate) fn add(lhs: Number, rhs: Number, arena: &mut Arena) -> Result<Number, EvalError> {
match (lhs, rhs) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => Ok(
if let Some(result) = n1.get_num().checked_add(n2.get_num()) {
fixnum!(Number, result, arena)
} else {
Number::arena_from(
Integer::from(n1.get_num()) + Integer::from(n2.get_num()),
arena,
)
},
),
(Number::Fixnum(n1), Number::Integer(n2)) | (Number::Integer(n2), Number::Fixnum(n1)) => {
Ok(Number::arena_from(
Integer::from(n1.get_num()) + &*n2,
arena,
))
}
(Number::Fixnum(n1), Number::Rational(n2)) | (Number::Rational(n2), Number::Fixnum(n1)) => {
Ok(Number::arena_from(
Rational::from(n1.get_num()) + &*n2,
arena,
))
}
(Number::Fixnum(n1), Number::Float(OrderedFloat(n2)))
| (Number::Float(OrderedFloat(n2)), Number::Fixnum(n1)) => {
Ok(Number::Float(add_f(float_fn_to_f(n1.get_num())?, n2)?))
}
(Number::Integer(n1), Number::Integer(n2)) => {
Ok(Number::arena_from(Integer::from(&*n1) + &*n2, arena)) // add_i
}
(Number::Integer(n1), Number::Float(OrderedFloat(n2)))
| (Number::Float(OrderedFloat(n2)), Number::Integer(n1)) => {
Ok(Number::Float(add_f(float_i_to_f(&n1)?, n2)?))
}
(Number::Integer(n1), Number::Rational(n2))
| (Number::Rational(n2), Number::Integer(n1)) => {
Ok(Number::arena_from(Rational::from(&*n1) + &*n2, arena))
}
(Number::Rational(n1), Number::Float(OrderedFloat(n2)))
| (Number::Float(OrderedFloat(n2)), Number::Rational(n1)) => {
Ok(Number::Float(add_f(float_r_to_f(&n1)?, n2)?))
}
(Number::Float(OrderedFloat(f1)), Number::Float(OrderedFloat(f2))) => {
Ok(Number::Float(add_f(f1, f2)?))
}
(Number::Rational(r1), Number::Rational(r2)) => {
Ok(Number::arena_from(Rational::from(&*r1) + &*r2, arena))
}
}
}
pub(crate) fn neg(n: Number, arena: &mut Arena) -> Number {
match n {
Number::Fixnum(n) => {
if let Some(n) = n.get_num().checked_neg() {
fixnum!(Number, n, arena)
} else {
Number::arena_from(-Integer::from(n.get_num()), arena)
}
}
Number::Integer(n) => Number::arena_from(-Integer::from(&*n), arena),
Number::Float(OrderedFloat(f)) => Number::Float(OrderedFloat(-f)),
Number::Rational(r) => Number::arena_from(-Rational::from(&*r), arena),
}
}
pub(crate) fn abs(n: Number, arena: &mut Arena) -> Number {
match n {
Number::Fixnum(n) => {
if let Some(n) = n.get_num().checked_abs() {
fixnum!(Number, n, arena)
} else {
Number::arena_from(Integer::from(n.get_num()).abs(), arena)
}
}
Number::Integer(n) => Number::arena_from(Integer::from(n.abs_ref()), arena),
Number::Float(f) => Number::Float(f.abs()),
Number::Rational(r) => Number::arena_from(Rational::from(r.abs_ref()), arena),
}
}
#[inline]
pub(crate) fn sub(lhs: Number, rhs: Number, arena: &mut Arena) -> Result<Number, EvalError> {
let neg_result = neg(rhs, arena);
add(lhs, neg_result, arena)
}
pub(crate) fn mul(lhs: Number, rhs: Number, arena: &mut Arena) -> Result<Number, EvalError> {
match (lhs, rhs) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => Ok(
if let Some(result) = n1.get_num().checked_mul(n2.get_num()) {
fixnum!(Number, result, arena)
} else {
Number::arena_from(
Integer::from(n1.get_num()) * Integer::from(n2.get_num()),
arena,
)
},
),
(Number::Fixnum(n1), Number::Integer(n2)) | (Number::Integer(n2), Number::Fixnum(n1)) => {
Ok(Number::arena_from(
Integer::from(n1.get_num()) * &*n2,
arena,
))
}
(Number::Fixnum(n1), Number::Rational(n2)) | (Number::Rational(n2), Number::Fixnum(n1)) => {
Ok(Number::arena_from(
Rational::from(n1.get_num()) * &*n2,
arena,
))
}
(Number::Fixnum(n1), Number::Float(OrderedFloat(n2)))
| (Number::Float(OrderedFloat(n2)), Number::Fixnum(n1)) => {
Ok(Number::Float(mul_f(float_fn_to_f(n1.get_num())?, n2)?))
}
(Number::Integer(n1), Number::Integer(n2)) => {
Ok(Number::arena_from(Integer::from(&*n1) * &*n2, arena)) // mul_i
}
(Number::Integer(n1), Number::Float(OrderedFloat(n2)))
| (Number::Float(OrderedFloat(n2)), Number::Integer(n1)) => {
Ok(Number::Float(mul_f(float_i_to_f(&n1)?, n2)?))
}
(Number::Integer(n1), Number::Rational(n2))
| (Number::Rational(n2), Number::Integer(n1)) => {
Ok(Number::arena_from(Rational::from(&*n1) * &*n2, arena))
}
(Number::Rational(n1), Number::Float(OrderedFloat(n2)))
| (Number::Float(OrderedFloat(n2)), Number::Rational(n1)) => {
Ok(Number::Float(mul_f(float_r_to_f(&n1)?, n2)?))
}
(Number::Float(OrderedFloat(f1)), Number::Float(OrderedFloat(f2))) => {
Ok(Number::Float(mul_f(f1, f2)?))
}
(Number::Rational(r1), Number::Rational(r2)) => {
Ok(Number::arena_from(Rational::from(&*r1) * &*r2, arena))
}
}
}
pub(crate) fn div(n1: Number, n2: Number) -> Result<Number, MachineStubGen> {
let stub_gen = || functor_stub(atom!("/"), 2);
if n2.is_zero() {
Err(zero_divisor_eval_error(stub_gen))
} else {
try_numeric_result!(n1 / n2, stub_gen)
}
}
pub(crate) fn float_pow(n1: Number, n2: Number) -> Result<Number, MachineStubGen> {
let f1 = result_f(&n1);
let f2 = result_f(&n2);
let stub_gen = || {
let pow_atom = atom!("**");
functor_stub(pow_atom, 2)
};
let f1 = try_numeric_result!(f1, stub_gen)?;
let f2 = try_numeric_result!(f2, stub_gen)?;
let result = result_f(&Number::Float(OrderedFloat(f1.powf(f2))));
Ok(Number::Float(OrderedFloat(try_numeric_result!(
result, stub_gen
)?)))
}
pub(crate) fn int_pow(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
if n1.is_zero() && n2.is_negative() {
let stub_gen = || {
let is_atom = atom!("is");
functor_stub(is_atom, 2)
};
return Err(undefined_eval_error(stub_gen));
}
let stub_gen = || {
let caret_atom = atom!("^");
functor_stub(caret_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
let n1_i = n1.get_num();
let n2_i = n2.get_num();
if !(n1_i == 1 || n1_i == 0 || n1_i == -1) && n2_i < 0 {
let n = Number::Fixnum(n1);
Err(numerical_type_error(ValidType::Float, n, stub_gen))
} else {
if let Ok(n2_u) = u32::try_from(n2_i) {
if let Some(result) = n1_i.checked_pow(n2_u) {
return Ok(Number::arena_from(result, arena));
}
}
let n1 = Integer::from(n1_i);
let n2 = Integer::from(n2_i);
Ok(Number::arena_from(binary_pow(n1, &n2), arena))
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
let n1_i = n1.get_num();
if !(n1_i == 1 || n1_i == 0 || n1_i == -1) && &*n2 < &0 {
let n = Number::Fixnum(n1);
Err(numerical_type_error(ValidType::Float, n, stub_gen))
} else {
let n1 = Integer::from(n1_i);
Ok(Number::arena_from(binary_pow(n1, &*n2), arena))
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => {
let n2_i = n2.get_num();
if !(&*n1 == &1 || &*n1 == &0 || &*n1 == &-1) && n2_i < 0 {
let n = Number::Integer(n1);
Err(numerical_type_error(ValidType::Float, n, stub_gen))
} else {
let n2 = Integer::from(n2_i);
Ok(Number::arena_from(binary_pow((*n1).clone(), &n2), arena))
}
}
(Number::Integer(n1), Number::Integer(n2)) => {
if !(&*n1 == &1 || &*n1 == &0 || &*n1 == &-1) && &*n2 < &0 {
let n = Number::Integer(n1);
Err(numerical_type_error(ValidType::Float, n, stub_gen))
} else {
Ok(Number::arena_from(binary_pow((*n1).clone(), &*n2), arena))
}
}
(n1, Number::Integer(n2)) => {
let f1 = float(n1)?;
let f2 = float(Number::Integer(n2))?;
unary_float_fn_template(Number::Float(OrderedFloat(f1)), |f| f.powf(f2))
.map(|f| Number::Float(OrderedFloat(f)))
}
(n1, n2) => {
let f2 = float(n2)?;
if n1.is_negative() && f2 != f2.floor() {
return Err(undefined_eval_error(stub_gen));
}
let f1 = float(n1)?;
unary_float_fn_template(Number::Float(OrderedFloat(f1)), |f| f.powf(f2))
.map(|f| Number::Float(OrderedFloat(f)))
}
}
}
pub(crate) fn pow(n1: Number, n2: Number, culprit: Atom) -> Result<Number, MachineStubGen> {
if n2.is_negative() && n1.is_zero() {
let stub_gen = move || functor_stub(culprit, 2);
return Err(undefined_eval_error(stub_gen));
}
float_pow(n1, n2)
}
#[inline]
pub(crate) fn float(n: Number) -> Result<f64, MachineStubGen> {
let stub_gen = || {
let is_atom = atom!("is");
functor_stub(is_atom, 2)
};
try_numeric_result!(result_f(&n), stub_gen)
}
#[inline]
pub(crate) fn unary_float_fn_template<FloatFn>(
n1: Number,
f: FloatFn,
) -> Result<f64, MachineStubGen>
where
FloatFn: Fn(f64) -> f64,
{
let stub_gen = || {
let is_atom = atom!("is");
functor_stub(is_atom, 2)
};
let f1 = try_numeric_result!(result_f(&n1), stub_gen)?;
let f1 = result_f(&Number::Float(OrderedFloat(f(f1))));
try_numeric_result!(f1, stub_gen)
}
pub(crate) fn max(n1: Number, n2: Number) -> Result<Number, MachineStubGen> {
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
if n1.get_num() > n2.get_num() {
Ok(Number::Fixnum(n1))
} else {
Ok(Number::Fixnum(n2))
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
if &*n2 > &n1.get_num() {
Ok(Number::Integer(n2))
} else {
Ok(Number::Fixnum(n1))
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => {
if &*n1 > &n2.get_num() {
Ok(Number::Integer(n1))
} else {
Ok(Number::Fixnum(n2))
}
}
(Number::Integer(n1), Number::Integer(n2)) => {
if n1 > n2 {
Ok(Number::Integer(n1))
} else {
Ok(Number::Integer(n2))
}
}
(n1, n2) => {
let stub_gen = || {
let max_atom = atom!("max");
functor_stub(max_atom, 2)
};
let f1 = try_numeric_result!(result_f(&n1), stub_gen)?;
let f2 = try_numeric_result!(result_f(&n2), stub_gen)?;
Ok(Number::Float(cmp::max(OrderedFloat(f1), OrderedFloat(f2))))
}
}
}
pub(crate) fn min(n1: Number, n2: Number) -> Result<Number, MachineStubGen> {
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
if n1.get_num() < n2.get_num() {
Ok(Number::Fixnum(n1))
} else {
Ok(Number::Fixnum(n2))
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
if &*n2 < &n1.get_num() {
Ok(Number::Integer(n2))
} else {
Ok(Number::Fixnum(n1))
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => {
if &*n1 < &n2.get_num() {
Ok(Number::Integer(n1))
} else {
Ok(Number::Fixnum(n2))
}
}
(Number::Integer(n1), Number::Integer(n2)) => {
if n1 < n2 {
Ok(Number::Integer(n1))
} else {
Ok(Number::Integer(n2))
}
}
(n1, n2) => {
let stub_gen = || {
let min_atom = atom!("min");
functor_stub(min_atom, 2)
};
let f1 = try_numeric_result!(result_f(&n1), stub_gen)?;
let f2 = try_numeric_result!(result_f(&n2), stub_gen)?;
Ok(Number::Float(cmp::min(OrderedFloat(f1), OrderedFloat(f2))))
}
}
}
pub fn rational_from_number(
n: Number,
stub_gen: impl Fn() -> FunctorStub + 'static,
arena: &mut Arena,
) -> Result<TypedArenaPtr<Rational>, MachineStubGen> {
match n {
Number::Fixnum(n) => Ok(arena_alloc!(Rational::from(n.get_num()), arena)),
Number::Rational(r) => Ok(r),
Number::Float(OrderedFloat(f)) => match Rational::from_f64(f) {
Some(r) => Ok(arena_alloc!(r, arena)),
None => Err(Box::new(move |machine_st| {
let instantiation_error = machine_st.instantiation_error();
let stub = stub_gen();
machine_st.error_form(instantiation_error, stub)
})),
},
Number::Integer(n) => Ok(arena_alloc!(Rational::from(&*n), arena)),
}
}
pub(crate) fn rdiv(
r1: TypedArenaPtr<Rational>,
r2: TypedArenaPtr<Rational>,
) -> Result<Rational, MachineStubGen> {
if &*r2 == &0 {
let stub_gen = || {
let rdiv_atom = atom!("rdiv");
functor_stub(rdiv_atom, 2)
};
Err(zero_divisor_eval_error(stub_gen))
} else {
Ok(Rational::from(&*r1 / &*r2))
}
}
pub(crate) fn idiv(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let idiv_atom = atom!("//");
functor_stub(idiv_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
if n2.get_num() == 0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
if let Some(result) = n1.get_num().checked_div(n2.get_num()) {
Ok(Number::arena_from(result, arena))
} else {
let n1 = Integer::from(n1.get_num());
let n2 = Integer::from(n2.get_num());
Ok(Number::arena_from(n1 / n2, arena))
}
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
if &*n2 == &0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
Ok(Number::arena_from(Integer::from(n1) / &*n2, arena))
}
}
(Number::Integer(n2), Number::Fixnum(n1)) => {
if n1.get_num() == 0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
Ok(Number::arena_from(&*n2 / Integer::from(n1), arena))
}
}
(Number::Integer(n1), Number::Integer(n2)) => {
if &*n2 == &0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
Ok(Number::arena_from(
<(Integer, Integer)>::from(n1.div_rem_ref(&*n2)).0,
arena,
))
}
}
(Number::Fixnum(_), n2) | (Number::Integer(_), n2) => {
Err(numerical_type_error(ValidType::Integer, n2, stub_gen))
}
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn int_floor_div(
n1: Number,
n2: Number,
arena: &mut Arena,
) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let div_atom = atom!("div");
functor_stub(div_atom, 2)
};
let modulus = modulus(n1, n2, arena)?;
let n1 = try_numeric_result!(sub(n1, modulus, arena), stub_gen)?;
idiv(n1, n2, arena)
}
pub(crate) fn shr(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let shr_atom = atom!(">>");
functor_stub(shr_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
let n1_i = n1.get_num();
let n2_i = n2.get_num();
let n1 = Integer::from(n1_i);
if let Ok(n2) = u32::try_from(n2_i) {
return Ok(Number::arena_from(n1 >> n2, arena));
} else {
return Ok(Number::arena_from(n1 >> u32::max_value(), arena));
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
let n1 = Integer::from(n1.get_num());
match n2.to_u32() {
Some(n2) => Ok(Number::arena_from(n1 >> n2, arena)),
_ => Ok(Number::arena_from(n1 >> u32::max_value(), arena)),
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => match u32::try_from(n2.get_num()) {
Ok(n2) => Ok(Number::arena_from(Integer::from(&*n1 >> n2), arena)),
_ => Ok(Number::arena_from(
Integer::from(&*n1 >> u32::max_value()),
arena,
)),
},
(Number::Integer(n1), Number::Integer(n2)) => match n2.to_u32() {
Some(n2) => Ok(Number::arena_from(Integer::from(&*n1 >> n2), arena)),
_ => Ok(Number::arena_from(
Integer::from(&*n1 >> u32::max_value()),
arena,
)),
},
(Number::Integer(_), n2) => Err(numerical_type_error(ValidType::Integer, n2, stub_gen)),
(Number::Fixnum(_), n2) => Err(numerical_type_error(ValidType::Integer, n2, stub_gen)),
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn shl(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let shl_atom = atom!(">>");
functor_stub(shl_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
let n1_i = n1.get_num();
let n2_i = n2.get_num();
let n1 = Integer::from(n1_i);
if let Ok(n2) = u32::try_from(n2_i) {
return Ok(Number::arena_from(n1 << n2, arena));
} else {
return Ok(Number::arena_from(n1 << u32::max_value(), arena));
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
let n1 = Integer::from(n1.get_num());
match n2.to_u32() {
Some(n2) => Ok(Number::arena_from(n1 << n2, arena)),
_ => Ok(Number::arena_from(n1 << u32::max_value(), arena)),
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => match u32::try_from(n2.get_num()) {
Ok(n2) => Ok(Number::arena_from(Integer::from(&*n1 << n2), arena)),
_ => Ok(Number::arena_from(
Integer::from(&*n1 << u32::max_value()),
arena,
)),
},
(Number::Integer(n1), Number::Integer(n2)) => match n2.to_u32() {
Some(n2) => Ok(Number::arena_from(Integer::from(&*n1 << n2), arena)),
_ => Ok(Number::arena_from(
Integer::from(&*n1 << u32::max_value()),
arena,
)),
},
(Number::Integer(_), n2) => Err(numerical_type_error(ValidType::Integer, n2, stub_gen)),
(Number::Fixnum(_), n2) => Err(numerical_type_error(ValidType::Integer, n2, stub_gen)),
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn and(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let and_atom = atom!("/\\");
functor_stub(and_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
Ok(Number::arena_from(n1.get_num() & n2.get_num(), arena))
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
let n1 = Integer::from(n1.get_num());
Ok(Number::arena_from(n1 & &*n2, arena))
}
(Number::Integer(n1), Number::Fixnum(n2)) => Ok(Number::arena_from(
&*n1 & Integer::from(n2.get_num()),
arena,
)),
(Number::Integer(n1), Number::Integer(n2)) => {
Ok(Number::arena_from(Integer::from(&*n1 & &*n2), arena))
}
(Number::Integer(_), n2) | (Number::Fixnum(_), n2) => {
Err(numerical_type_error(ValidType::Integer, n2, stub_gen))
}
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn or(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let or_atom = atom!("\\/");
functor_stub(or_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
Ok(Number::arena_from(n1.get_num() | n2.get_num(), arena))
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
let n1 = Integer::from(n1.get_num());
Ok(Number::arena_from(n1 | &*n2, arena))
}
(Number::Integer(n1), Number::Fixnum(n2)) => Ok(Number::arena_from(
&*n1 | Integer::from(n2.get_num()),
arena,
)),
(Number::Integer(n1), Number::Integer(n2)) => {
Ok(Number::arena_from(Integer::from(&*n1 | &*n2), arena))
}
(Number::Integer(_), n2) | (Number::Fixnum(_), n2) => {
Err(numerical_type_error(ValidType::Integer, n2, stub_gen))
}
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn xor(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let xor_atom = atom!("xor");
functor_stub(xor_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
Ok(Number::arena_from(n1.get_num() ^ n2.get_num(), arena))
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
let n1 = Integer::from(n1.get_num());
Ok(Number::arena_from(n1 ^ &*n2, arena))
}
(Number::Integer(n1), Number::Fixnum(n2)) => Ok(Number::arena_from(
&*n1 ^ Integer::from(n2.get_num()),
arena,
)),
(Number::Integer(n1), Number::Integer(n2)) => {
Ok(Number::arena_from(Integer::from(&*n1 ^ &*n2), arena))
}
(Number::Integer(_), n2) | (Number::Fixnum(_), n2) => {
Err(numerical_type_error(ValidType::Integer, n2, stub_gen))
}
_ => Err(numerical_type_error(ValidType::Integer, n2, stub_gen)),
}
}
pub(crate) fn modulus(x: Number, y: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let mod_atom = atom!("mod");
functor_stub(mod_atom, 2)
};
match (x, y) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
let n2_i = n2.get_num();
if n2_i == 0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
let n1_i = n1.get_num();
Ok(Number::arena_from(n1_i.rem_floor(n2_i), arena))
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
if &*n2 == &0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
let n1 = Integer::from(n1.get_num());
Ok(Number::arena_from(
<(Integer, Integer)>::from(n1.div_rem_floor_ref(&*n2)).1,
arena,
))
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => {
let n2_i = n2.get_num();
if n2_i == 0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
let n2 = Integer::from(n2_i);
Ok(Number::arena_from(
<(Integer, Integer)>::from(n1.div_rem_floor_ref(&n2)).1,
arena,
))
}
}
(Number::Integer(x), Number::Integer(y)) => {
if &*y == &0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
Ok(Number::arena_from(
<(Integer, Integer)>::from(x.div_rem_floor_ref(&*y)).1,
arena,
))
}
}
(Number::Integer(_), n2) | (Number::Fixnum(_), n2) => {
Err(numerical_type_error(ValidType::Integer, n2, stub_gen))
}
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn remainder(x: Number, y: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let rem_atom = atom!("rem");
functor_stub(rem_atom, 2)
};
match (x, y) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
let n2_i = n2.get_num();
if n2_i == 0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
let n1_i = n1.get_num();
Ok(Number::arena_from(n1_i % n2_i, arena))
}
}
(Number::Fixnum(n1), Number::Integer(n2)) => {
if &*n2 == &0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
let n1 = Integer::from(n1.get_num());
Ok(Number::arena_from(n1 % &*n2, arena))
}
}
(Number::Integer(n1), Number::Fixnum(n2)) => {
let n2_i = n2.get_num();
if n2_i == 0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
let n2 = Integer::from(n2_i);
Ok(Number::arena_from(&*n1 % n2, arena))
}
}
(Number::Integer(n1), Number::Integer(n2)) => {
if &*n2 == &0 {
Err(zero_divisor_eval_error(stub_gen))
} else {
Ok(Number::arena_from(Integer::from(&*n1 % &*n2), arena))
}
}
(Number::Integer(_), n2) | (Number::Fixnum(_), n2) => {
Err(numerical_type_error(ValidType::Integer, n2, stub_gen))
}
(n1, _) => Err(numerical_type_error(ValidType::Integer, n1, stub_gen)),
}
}
pub(crate) fn gcd(n1: Number, n2: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let gcd_atom = atom!("gcd");
functor_stub(gcd_atom, 2)
};
match (n1, n2) {
(Number::Fixnum(n1), Number::Fixnum(n2)) => {
let n1_i = n1.get_num() as isize;
let n2_i = n2.get_num() as isize;
if let Some(result) = isize_gcd(n1_i, n2_i) {
Ok(Number::arena_from(result, arena))
} else {
Ok(Number::arena_from(
Integer::from(n1_i).gcd(&Integer::from(n2_i)),
arena,
))
}
}
(Number::Fixnum(n1), Number::Integer(n2)) | (Number::Integer(n2), Number::Fixnum(n1)) => {
let n1 = Integer::from(n1.get_num());
Ok(Number::arena_from(Integer::from(n2.gcd_ref(&n1)), arena))
}
(Number::Integer(n1), Number::Integer(n2)) => {
Ok(Number::arena_from(Integer::from(n1.gcd_ref(&n2)), arena))
}
(Number::Float(f), _) | (_, Number::Float(f)) => {
let n = Number::Float(f);
Err(numerical_type_error(ValidType::Integer, n, stub_gen))
}
(Number::Rational(r), _) | (_, Number::Rational(r)) => {
let n = Number::Rational(r);
Err(numerical_type_error(ValidType::Integer, n, stub_gen))
}
}
}
pub(crate) fn atan2(n1: Number, n2: Number) -> Result<f64, MachineStubGen> {
if n1.is_zero() && n2.is_zero() {
let stub_gen = || {
let is_atom = atom!("is");
functor_stub(is_atom, 2)
};
Err(undefined_eval_error(stub_gen))
} else {
let f1 = float(n1)?;
let f2 = float(n2)?;
unary_float_fn_template(Number::Float(OrderedFloat(f1)), |f| f.atan2(f2))
}
}
#[inline]
pub(crate) fn sin(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.sin())
}
#[inline]
pub(crate) fn cos(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.cos())
}
#[inline]
pub(crate) fn tan(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.tan())
}
#[inline]
pub(crate) fn log(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.log(f64::consts::E))
}
#[inline]
pub(crate) fn exp(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.exp())
}
#[inline]
pub(crate) fn asin(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.asin())
}
#[inline]
pub(crate) fn acos(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.acos())
}
#[inline]
pub(crate) fn atan(n1: Number) -> Result<f64, MachineStubGen> {
unary_float_fn_template(n1, |f| f.atan())
}
#[inline]
pub(crate) fn sqrt(n1: Number) -> Result<f64, MachineStubGen> {
if n1.is_negative() {
let stub_gen = || {
let is_atom = atom!("is");
functor_stub(is_atom, 2)
};
return Err(undefined_eval_error(stub_gen));
}
unary_float_fn_template(n1, |f| f.sqrt())
}
#[inline]
pub(crate) fn floor(n1: Number, arena: &mut Arena) -> Number {
rnd_i(&n1, arena)
}
#[inline]
pub(crate) fn ceiling(n1: Number, arena: &mut Arena) -> Number {
let n1 = neg(n1, arena);
let n1 = floor(n1, arena);
neg(n1, arena)
}
#[inline]
pub(crate) fn truncate(n: Number, arena: &mut Arena) -> Number {
if n.is_negative() {
let n = abs(n, arena);
let n = floor(n, arena);
neg(n, arena)
} else {
floor(n, arena)
}
}
pub(crate) fn round(n: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
let stub_gen = || {
let is_atom = atom!("is");
functor_stub(is_atom, 2)
};
let result = add(n, Number::Float(OrderedFloat(0.5f64)), arena);
let result = try_numeric_result!(result, stub_gen)?;
Ok(floor(result, arena))
}
pub(crate) fn bitwise_complement(n1: Number, arena: &mut Arena) -> Result<Number, MachineStubGen> {
match n1 {
Number::Fixnum(n) => Ok(Number::Fixnum(Fixnum::build_with(!n.get_num()))),
Number::Integer(n1) => Ok(Number::arena_from(Integer::from(!&*n1), arena)),
_ => {
let stub_gen = || {
let bitwise_atom = atom!("\\");
functor_stub(bitwise_atom, 2)
};
Err(numerical_type_error(ValidType::Integer, n1, stub_gen))
}
}
}
impl MachineState {
#[inline]
pub fn get_number(&mut self, at: &ArithmeticTerm) -> Result<Number, MachineStub> {
match at {
&ArithmeticTerm::Reg(r) => {
let value = self.store(self.deref(self[r]));
match Number::try_from(value) {
Ok(n) => Ok(n),
Err(_) => self.arith_eval_by_metacall(value),
}
}
&ArithmeticTerm::Interm(i) => {
Ok(mem::replace(&mut self.interms[i - 1], Number::Fixnum(Fixnum::build_with(0))))
}
&ArithmeticTerm::Number(n) => Ok(n),
}
}
pub fn get_rational(
&mut self,
at: &ArithmeticTerm,
caller: impl Fn() -> FunctorStub + 'static,
) -> Result<TypedArenaPtr<Rational>, MachineStub> {
let n = self.get_number(at)?;
match rational_from_number(n, caller, &mut self.arena) {
Ok(r) => Ok(r),
Err(e_gen) => Err(e_gen(self))
}
}
pub(crate) fn arith_eval_by_metacall(&mut self, value: HeapCellValue) -> Result<Number, MachineStub> {
let stub_gen = || functor_stub(atom!("is"), 2);
let mut iter = stackful_post_order_iter(&mut self.heap, value);
while let Some(value) = iter.next() {
if value.get_forwarding_bit() {
std::mem::drop(iter);
let (name, arity) = read_heap_cell!(value,
(HeapCellValueTag::Atom, (name, arity)) => {
(name, arity)
}
(HeapCellValueTag::Str, s) => {
cell_as_atom_cell!(self.heap[s]).get_name_and_arity()
}
(HeapCellValueTag::Lis | HeapCellValueTag::PStr | HeapCellValueTag::PStrOffset |
HeapCellValueTag::PStrLoc) => {
(atom!("."), 2)
}
(HeapCellValueTag::AttrVar | HeapCellValueTag::Var) => {
let err = self.instantiation_error();
return Err(self.error_form(err, stub_gen()));
}
_ => {
unreachable!()
}
);
let evaluable_error = self.evaluable_error(name, arity);
return Err(self.error_form(evaluable_error, stub_gen()));
}
read_heap_cell!(value,
(HeapCellValueTag::Atom, (name, arity)) => {
if arity == 2 {
let a2 = self.interms.pop().unwrap();
let a1 = self.interms.pop().unwrap();
match name {
atom!("+") => self.interms.push(drop_iter_on_err!(
self,
iter,
try_numeric_result!(add(a1, a2, &mut self.arena), stub_gen)
)),
atom!("-") => self.interms.push(drop_iter_on_err!(
self,
iter,
try_numeric_result!(sub(a1, a2, &mut self.arena), stub_gen)
)),
atom!("*") => self.interms.push(drop_iter_on_err!(
self,
iter,
try_numeric_result!(mul(a1, a2, &mut self.arena), stub_gen)
)),
atom!("/") => self.interms.push(
drop_iter_on_err!(self, iter, div(a1, a2))
),
atom!("**") => self.interms.push(
drop_iter_on_err!(self, iter, pow(a1, a2, atom!("is")))
),
atom!("^") => self.interms.push(
drop_iter_on_err!(self, iter, int_pow(a1, a2, &mut self.arena))
),
atom!("max") => self.interms.push(
drop_iter_on_err!(self, iter, max(a1, a2))
),
atom!("min") => self.interms.push(
drop_iter_on_err!(self, iter, min(a1, a2))
),
atom!("rdiv") => {
let r1 = drop_iter_on_err!(
self,
iter,
rational_from_number(a1, stub_gen, &mut self.arena)
);
let r2 = drop_iter_on_err!(
self,
iter,
rational_from_number(a2, stub_gen, &mut self.arena)
);
let result = arena_alloc!(
drop_iter_on_err!(self, iter, rdiv(r1, r2)),
self.arena
);
self.interms.push(Number::Rational(result));
}
atom!("//") => self.interms.push(
drop_iter_on_err!(self, iter, idiv(a1, a2, &mut self.arena))
),
atom!("div") => self.interms.push(
drop_iter_on_err!(self, iter, int_floor_div(a1, a2, &mut self.arena))
),
atom!(">>") => self.interms.push(
drop_iter_on_err!(self, iter, shr(a1, a2, &mut self.arena))
),
atom!("<<") => self.interms.push(
drop_iter_on_err!(self, iter, shl(a1, a2, &mut self.arena))
),
atom!("/\\") => self.interms.push(
drop_iter_on_err!(self, iter, and(a1, a2, &mut self.arena))
),
atom!("\\/") => self.interms.push(
drop_iter_on_err!(self, iter, or(a1, a2, &mut self.arena))
),
atom!("xor") => self.interms.push(
drop_iter_on_err!(self, iter, xor(a1, a2, &mut self.arena))
),
atom!("mod") => self.interms.push(
drop_iter_on_err!(self, iter, modulus(a1, a2, &mut self.arena))
),
atom!("rem") => self.interms.push(
drop_iter_on_err!(self, iter, remainder(a1, a2, &mut self.arena))
),
atom!("atan2") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, atan2(a1, a2))
))),
atom!("gcd") => self.interms.push(
drop_iter_on_err!(self, iter, gcd(a1, a2, &mut self.arena))
),
_ => {
let evaluable_stub = functor_stub(name, 2);
let stub = stub_gen();
std::mem::drop(iter);
let type_error = self.type_error(ValidType::Evaluable, evaluable_stub);
return Err(self.error_form(type_error, stub));
}
}
continue;
} else if arity == 1 {
let a1 = self.interms.pop().unwrap();
match name {
atom!("-") => self.interms.push(neg(a1, &mut self.arena)),
atom!("+") => self.interms.push(a1),
atom!("cos") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, cos(a1))
))),
atom!("sin") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, sin(a1))
))),
atom!("tan") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, tan(a1))
))),
atom!("sqrt") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, sqrt(a1))
))),
atom!("log") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, log(a1))
))),
atom!("exp") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, exp(a1))
))),
atom!("acos") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, acos(a1))
))),
atom!("asin") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, asin(a1))
))),
atom!("atan") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, atan(a1))
))),
atom!("abs") => self.interms.push(abs(a1, &mut self.arena)),
atom!("float") => self.interms.push(Number::Float(OrderedFloat(
drop_iter_on_err!(self, iter, float(a1))
))),
atom!("truncate") => self.interms.push(truncate(a1, &mut self.arena)),
atom!("round") => self.interms.push(drop_iter_on_err!(self, iter, round(a1, &mut self.arena))),
atom!("ceiling") => self.interms.push(ceiling(a1, &mut self.arena)),
atom!("floor") => self.interms.push(floor(a1, &mut self.arena)),
atom!("\\") => self.interms.push(
drop_iter_on_err!(self, iter, bitwise_complement(a1, &mut self.arena))
),
atom!("sign") => self.interms.push(a1.sign()),
_ => {
let evaluable_stub = functor_stub(name, 1);
std::mem::drop(iter);
let type_error = self.type_error(
ValidType::Evaluable,
evaluable_stub,
);
let stub = stub_gen();
return Err(self.error_form(type_error, stub));
}
}
continue;
} else if arity == 0 {
match name {
atom!("pi") => {
self.interms.push(Number::Float(OrderedFloat(f64::consts::PI)));
continue;
}
atom!("e") => {
self.interms.push(Number::Float(OrderedFloat(f64::consts::E)));
continue;
}
atom!("epsilon") => {
self.interms.push(Number::Float(OrderedFloat(f64::EPSILON)));
continue;
}
_ => {
}
}
}
std::mem::drop(iter);
let evaluable_error = self.evaluable_error(name, arity);
let stub = stub_gen();
return Err(self.error_form(evaluable_error, stub));
}
(HeapCellValueTag::Fixnum, n) => {
self.interms.push(Number::Fixnum(n));
}
(HeapCellValueTag::F64, fl) => {
self.interms.push(Number::Float(*fl));
}
(HeapCellValueTag::Cons, ptr) => {
match_untyped_arena_ptr!(ptr,
(ArenaHeaderTag::Integer, n) => {
self.interms.push(Number::Integer(n));
}
(ArenaHeaderTag::Rational, r) => {
self.interms.push(Number::Rational(r));
}
_ => {
std::mem::drop(iter);
let type_error = self.type_error(ValidType::Evaluable, value);
let stub = stub_gen();
return Err(self.error_form(type_error, stub));
}
)
}
(HeapCellValueTag::Var | HeapCellValueTag::AttrVar) => {
std::mem::drop(iter);
let instantiation_error = self.instantiation_error();
let stub = stub_gen();
return Err(self.error_form(instantiation_error, stub));
}
_ => {
std::mem::drop(iter);
let type_error = self.type_error(ValidType::Evaluable, value);
let stub = stub_gen();
return Err(self.error_form(type_error, stub));
}
)
}
Ok(self.interms.pop().unwrap())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::machine::mock_wam::*;
#[test]
fn arith_eval_by_metacall_tests() {
let mut wam = MachineState::new();
let mut op_dir = default_op_dir();
op_dir.insert(
(atom!("+"), Fixity::In),
OpDesc::build_with(500, YFX as u8),
);
op_dir.insert(
(atom!("-"), Fixity::In),
OpDesc::build_with(500, YFX as u8),
);
op_dir.insert(
(atom!("-"), Fixity::Pre),
OpDesc::build_with(200, FY as u8),
);
op_dir.insert(
(atom!("*"), Fixity::In),
OpDesc::build_with(400, YFX as u8),
);
op_dir.insert(
(atom!("/"), Fixity::In),
OpDesc::build_with(400, YFX as u8),
);
let term_write_result =
parse_and_write_parsed_term_to_heap(&mut wam, "3 + 4 - 1 + 2.", &op_dir).unwrap();
assert_eq!(
wam.arith_eval_by_metacall(heap_loc_as_cell!(term_write_result.heap_loc)),
Ok(Number::Fixnum(Fixnum::build_with(8))),
);
wam.heap.clear();
let term_write_result =
parse_and_write_parsed_term_to_heap(&mut wam, "5 * 4 - 1.", &op_dir).unwrap();
assert_eq!(
wam.arith_eval_by_metacall(heap_loc_as_cell!(term_write_result.heap_loc)),
Ok(Number::Fixnum(Fixnum::build_with(19))),
);
wam.heap.clear();
let term_write_result =
parse_and_write_parsed_term_to_heap(&mut wam, "sign(-1).", &op_dir).unwrap();
assert_eq!(
wam.arith_eval_by_metacall(heap_loc_as_cell!(term_write_result.heap_loc)),
Ok(Number::Fixnum(Fixnum::build_with(-1)))
);
}
}
| 35.549088 | 123 | 0.468329 |
cc125298e47f1d0d477c1f605151d7167d3dc6df | 32,204 | // Copyright (c) Microsoft. All rights reserved.
#[cfg(feature = "tokio1")]
use std::sync::atomic;
#[cfg(feature = "tokio1")]
use futures_util::future;
const SD_LISTEN_FDS_START: std::os::unix::io::RawFd = 3;
#[derive(Clone, Debug, PartialEq)]
pub enum Connector {
Tcp {
host: std::sync::Arc<str>,
port: u16,
},
Unix {
socket_path: std::sync::Arc<std::path::Path>,
},
Fd {
fd: std::os::unix::io::RawFd,
},
}
#[derive(Debug)]
pub enum Stream {
Tcp(std::net::TcpStream),
Unix(std::os::unix::net::UnixStream),
}
#[cfg(feature = "tokio1")]
#[derive(Debug)]
pub enum AsyncStream {
Tcp(tokio::net::TcpStream),
Unix(tokio::net::UnixStream),
}
#[cfg(feature = "tokio1")]
#[derive(Debug)]
pub enum Incoming {
Tcp {
listener: tokio::net::TcpListener,
},
Unix {
listener: tokio::net::UnixListener,
user_state: std::collections::BTreeMap<libc::uid_t, std::sync::Arc<atomic::AtomicUsize>>,
},
}
#[cfg(feature = "tokio1")]
impl Incoming {
pub async fn serve<H>(
&mut self,
server: H,
shutdown: tokio::sync::oneshot::Receiver<()>,
) -> std::io::Result<()>
where
H: hyper::service::Service<
hyper::Request<hyper::Body>,
Response = hyper::Response<hyper::Body>,
Error = std::convert::Infallible,
> + Clone
+ Send
+ 'static,
<H as hyper::service::Service<hyper::Request<hyper::Body>>>::Future: Send,
{
const MAX_REQUESTS_PER_USER: usize = 10;
// Keep track of the number of running tasks.
let tasks = atomic::AtomicUsize::new(0);
let tasks = std::sync::Arc::new(tasks);
let shutdown_loop = shutdown;
futures_util::pin_mut!(shutdown_loop);
match self {
Incoming::Tcp { listener } => loop {
let accept = listener.accept();
futures_util::pin_mut!(accept);
match future::select(shutdown_loop, accept).await {
future::Either::Left((_, _)) => break,
future::Either::Right((tcp_stream, shutdown)) => {
let tcp_stream = tcp_stream?.0;
let server = crate::uid::UidService::new(None, 0, server.clone());
tasks.fetch_add(1, atomic::Ordering::AcqRel);
let server_tasks = tasks.clone();
tokio::spawn(async move {
if let Err(http_err) = hyper::server::conn::Http::new()
.serve_connection(tcp_stream, server)
.await
{
log::info!("Error while serving HTTP connection: {}", http_err);
}
server_tasks.fetch_sub(1, atomic::Ordering::AcqRel);
});
shutdown_loop = shutdown;
}
}
},
Incoming::Unix {
listener,
user_state,
} => loop {
let accept = listener.accept();
futures_util::pin_mut!(accept);
// Await either the next established connection or the shutdown signal.
match future::select(shutdown_loop, accept).await {
future::Either::Left((_, _)) => break,
future::Either::Right((unix_stream, shutdown)) => {
let unix_stream = unix_stream?.0;
let ucred = unix_stream.peer_cred()?;
let servers_available = user_state
.entry(ucred.uid())
.or_insert_with(|| {
std::sync::Arc::new(atomic::AtomicUsize::new(MAX_REQUESTS_PER_USER))
})
.clone();
let server =
crate::uid::UidService::new(ucred.pid(), ucred.uid(), server.clone());
tasks.fetch_add(1, atomic::Ordering::AcqRel);
let server_tasks = tasks.clone();
tokio::spawn(async move {
let available = servers_available
.fetch_update(
atomic::Ordering::AcqRel,
atomic::Ordering::Acquire,
|current| current.checked_sub(1),
)
.is_ok();
if available {
if let Err(http_err) = hyper::server::conn::Http::new()
.serve_connection(unix_stream, server)
.await
{
log::info!("Error while serving HTTP connection: {}", http_err);
}
servers_available.fetch_add(1, atomic::Ordering::AcqRel);
} else {
log::info!(
"Max simultaneous connections reached for user {}",
ucred.uid()
);
}
server_tasks.fetch_sub(1, atomic::Ordering::AcqRel);
});
shutdown_loop = shutdown;
}
};
},
}
// Wait for all running server tasks to finish before returning.
let poll_ms = std::time::Duration::from_millis(100);
while tasks.load(atomic::Ordering::Acquire) != 0 {
tokio::time::sleep(poll_ms).await;
}
Ok(())
}
}
impl Connector {
pub fn new(uri: &url::Url) -> Result<Self, ConnectorError> {
match uri.scheme() {
"http" => {
let host = uri
.host_str()
.ok_or_else(|| ConnectorError {
uri: uri.clone(),
inner: "http URI does not have a host".into(),
})?
.into();
let port = uri.port().unwrap_or(80);
Ok(Connector::Tcp { host, port })
}
"unix" => {
let socket_path = uri
.to_file_path()
.map_err(|()| ConnectorError {
uri: uri.clone(),
inner: "unix URI could not be converted to a file path".into(),
})?
.into();
Ok(Connector::Unix { socket_path })
}
"fd" => {
let host = uri.host_str().ok_or_else(|| ConnectorError {
uri: uri.clone(),
inner: "fd URI does not have a host".into(),
})?;
// Try to parse the host as an fd number.
let fd = match host.parse::<std::os::unix::io::RawFd>() {
Ok(fd) => {
// Host is an fd number.
fd
}
Err(_) => {
// Host is not an fd number. Parse it as an fd name.
socket_name_to_fd(host).map_err(|message| ConnectorError {
uri: uri.clone(),
inner: message.into(),
})?
}
};
Ok(Connector::Fd { fd })
}
scheme => Err(ConnectorError {
uri: uri.clone(),
inner: format!("unrecognized scheme {:?}", scheme).into(),
}),
}
}
pub fn connect(&self) -> std::io::Result<Stream> {
match self {
Connector::Tcp { host, port } => {
let inner = std::net::TcpStream::connect((&**host, *port))?;
Ok(Stream::Tcp(inner))
}
Connector::Unix { socket_path } => {
let inner = std::os::unix::net::UnixStream::connect(socket_path)?;
Ok(Stream::Unix(inner))
}
Connector::Fd { fd } => {
let inner = if is_unix_fd(*fd)? {
let inner: std::os::unix::net::UnixStream =
unsafe { std::os::unix::io::FromRawFd::from_raw_fd(*fd) };
Stream::Unix(inner)
} else {
let inner: std::net::TcpStream =
unsafe { std::os::unix::io::FromRawFd::from_raw_fd(*fd) };
Stream::Tcp(inner)
};
Ok(inner)
}
}
}
#[cfg(feature = "tokio1")]
pub async fn incoming(
self,
unix_socket_permission: u32,
socket_name: Option<String>,
) -> std::io::Result<Incoming> {
// Check for systemd sockets.
let systemd_socket = get_systemd_socket(socket_name)
.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))?;
match (systemd_socket, self) {
// Prefer use of systemd sockets.
(_, Connector::Fd { fd }) | (Some(fd), _) => fd_to_listener(fd),
(None, Connector::Unix { socket_path }) => {
match std::fs::remove_file(&*socket_path) {
Ok(()) => (),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => (),
Err(err) => return Err(err),
}
let listener = tokio::net::UnixListener::bind(socket_path.clone())?;
std::fs::set_permissions(
socket_path.as_ref(),
<std::fs::Permissions as std::os::unix::prelude::PermissionsExt>::from_mode(
unix_socket_permission,
),
)?;
Ok(Incoming::Unix {
listener,
user_state: Default::default(),
})
}
(None, Connector::Tcp { host, port }) => {
if cfg!(debug_assertions) {
let listener = tokio::net::TcpListener::bind((&*host, port)).await?;
Ok(Incoming::Tcp { listener })
} else {
Err(std::io::Error::new(
std::io::ErrorKind::Other,
"servers can only use `unix://` connectors, not `http://` connectors",
))
}
}
}
}
fn to_url(&self) -> Result<url::Url, String> {
match self {
Connector::Tcp { host, port } => {
let url = format!("http://{}:{}", host, port);
let mut url: url::Url = url.parse().expect("hard-coded URL parses successfully");
url.set_host(Some(host))
.map_err(|err| format!("could not set host {:?}: {:?}", host, err))?;
if *port != 80 {
url.set_port(Some(*port))
.map_err(|()| format!("could not set port {:?}", port))?;
}
Ok(url)
}
Connector::Unix { socket_path } => {
let socket_path = socket_path.to_str().ok_or_else(|| {
format!(
"socket path {} cannot be serialized as a utf-8 string",
socket_path.display()
)
})?;
let mut url: url::Url = "unix:///unix-socket"
.parse()
.expect("hard-coded URL parses successfully");
url.set_path(socket_path);
Ok(url)
}
Connector::Fd { fd } => {
let fd_path = format!("fd://{}", fd);
let url = url::Url::parse(&fd_path).expect("hard-coded URL parses successfully");
Ok(url)
}
}
}
}
impl std::fmt::Display for Connector {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let url = self.to_url().map_err(|_| std::fmt::Error)?;
url.fmt(f)
}
}
impl std::str::FromStr for Connector {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let uri = s.parse::<url::Url>().map_err(|err| err.to_string())?;
let connector = Connector::new(&uri).map_err(|err| err.to_string())?;
Ok(connector)
}
}
#[cfg(feature = "tokio1")]
impl hyper::service::Service<hyper::Uri> for Connector {
type Response = AsyncStream;
type Error = std::io::Error;
type Future = std::pin::Pin<
Box<dyn std::future::Future<Output = Result<Self::Response, Self::Error>> + Send>,
>;
fn poll_ready(
&mut self,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
std::task::Poll::Ready(Ok(()))
}
fn call(&mut self, _req: hyper::Uri) -> Self::Future {
match self {
Connector::Tcp { host, port } => {
let (host, port) = (host.clone(), *port);
let f = async move {
let inner = tokio::net::TcpStream::connect((&*host, port)).await?;
Ok(AsyncStream::Tcp(inner))
};
Box::pin(f)
}
Connector::Unix { socket_path } => {
let socket_path = socket_path.clone();
let f = async move {
let inner = tokio::net::UnixStream::connect(&*socket_path).await?;
Ok(AsyncStream::Unix(inner))
};
Box::pin(f)
}
Connector::Fd { fd } => {
let fd = *fd;
let f = async move {
if is_unix_fd(fd)? {
let stream: std::os::unix::net::UnixStream =
unsafe { std::os::unix::io::FromRawFd::from_raw_fd(fd) };
stream.set_nonblocking(true)?;
let stream = tokio::net::UnixStream::from_std(stream)?;
Ok(AsyncStream::Unix(stream))
} else {
let stream: std::net::TcpStream =
unsafe { std::os::unix::io::FromRawFd::from_raw_fd(fd) };
stream.set_nonblocking(true)?;
let stream = tokio::net::TcpStream::from_std(stream)?;
Ok(AsyncStream::Tcp(stream))
}
};
Box::pin(f)
}
}
}
}
impl<'de> serde::Deserialize<'de> for Connector {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Connector;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
formatter.write_str("an endpoint URI")
}
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
s.parse().map_err(serde::de::Error::custom)
}
}
deserializer.deserialize_str(Visitor)
}
}
impl serde::Serialize for Connector {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::ser::Serializer,
{
let url = self.to_url().map_err(serde::ser::Error::custom)?;
url.to_string().serialize(serializer)
}
}
impl std::io::Read for Stream {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
match self {
Stream::Tcp(inner) => inner.read(buf),
Stream::Unix(inner) => inner.read(buf),
}
}
fn read_vectored(&mut self, bufs: &mut [std::io::IoSliceMut<'_>]) -> std::io::Result<usize> {
match self {
Stream::Tcp(inner) => inner.read_vectored(bufs),
Stream::Unix(inner) => inner.read_vectored(bufs),
}
}
}
impl std::io::Write for Stream {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match self {
Stream::Tcp(inner) => inner.write(buf),
Stream::Unix(inner) => inner.write(buf),
}
}
fn flush(&mut self) -> std::io::Result<()> {
match self {
Stream::Tcp(inner) => inner.flush(),
Stream::Unix(inner) => inner.flush(),
}
}
fn write_vectored(&mut self, bufs: &[std::io::IoSlice<'_>]) -> std::io::Result<usize> {
match self {
Stream::Tcp(inner) => inner.write_vectored(bufs),
Stream::Unix(inner) => inner.write_vectored(bufs),
}
}
}
#[cfg(feature = "tokio1")]
impl tokio::io::AsyncRead for AsyncStream {
fn poll_read(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
match &mut *self {
AsyncStream::Tcp(inner) => std::pin::Pin::new(inner).poll_read(cx, buf),
AsyncStream::Unix(inner) => std::pin::Pin::new(inner).poll_read(cx, buf),
}
}
}
#[cfg(feature = "tokio1")]
impl tokio::io::AsyncWrite for AsyncStream {
fn poll_write(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<std::io::Result<usize>> {
match &mut *self {
AsyncStream::Tcp(inner) => std::pin::Pin::new(inner).poll_write(cx, buf),
AsyncStream::Unix(inner) => std::pin::Pin::new(inner).poll_write(cx, buf),
}
}
fn poll_flush(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
match &mut *self {
AsyncStream::Tcp(inner) => std::pin::Pin::new(inner).poll_flush(cx),
AsyncStream::Unix(inner) => std::pin::Pin::new(inner).poll_flush(cx),
}
}
fn poll_shutdown(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<std::io::Result<()>> {
match &mut *self {
AsyncStream::Tcp(inner) => std::pin::Pin::new(inner).poll_shutdown(cx),
AsyncStream::Unix(inner) => std::pin::Pin::new(inner).poll_shutdown(cx),
}
}
fn poll_write_vectored(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> std::task::Poll<std::io::Result<usize>> {
match &mut *self {
AsyncStream::Tcp(inner) => std::pin::Pin::new(inner).poll_write_vectored(cx, bufs),
AsyncStream::Unix(inner) => std::pin::Pin::new(inner).poll_write_vectored(cx, bufs),
}
}
fn is_write_vectored(&self) -> bool {
match self {
AsyncStream::Tcp(inner) => inner.is_write_vectored(),
AsyncStream::Unix(inner) => inner.is_write_vectored(),
}
}
}
#[cfg(feature = "tokio1")]
impl hyper::client::connect::Connection for AsyncStream {
fn connected(&self) -> hyper::client::connect::Connected {
match self {
AsyncStream::Tcp(inner) => inner.connected(),
AsyncStream::Unix(_) => hyper::client::connect::Connected::new(),
}
}
}
#[derive(Debug)]
pub struct ConnectorError {
uri: url::Url,
inner: Box<dyn std::error::Error>,
}
impl std::fmt::Display for ConnectorError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "malformed URI {:?}", self.uri)
}
}
impl std::error::Error for ConnectorError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&*self.inner)
}
}
/// Returns `true` if the given fd is a Unix socket; `false` if the given fd is a TCP socket.
///
/// Returns an Err if the socket type is invalid. TCP sockets are only valid for debug builds,
/// so this function returns an Err for release builds using a TCP socket.
fn is_unix_fd(fd: std::os::unix::io::RawFd) -> std::io::Result<bool> {
let sock_addr = nix::sys::socket::getsockname(fd)
.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))?;
match sock_addr {
nix::sys::socket::SockAddr::Unix(_) => Ok(true),
// Only debug builds can set up HTTP servers. Release builds must use unix sockets.
nix::sys::socket::SockAddr::Inet(_) if cfg!(debug_assertions) => Ok(false),
sock_addr => Err(std::io::Error::new(
std::io::ErrorKind::Other,
format!(
"systemd socket has unsupported address family {:?}",
sock_addr.family()
),
)),
}
}
/// Get the value of the `LISTEN_FDS` or `LISTEN_FDNAMES` environment variable.
///
/// Checks the `LISTEN_PID` variable to ensure that the requested environment variable is for this process.
fn get_env(env: &str) -> Result<Option<String>, String> {
// Check that the LISTEN_* environment variable is for this process.
let listen_pid = {
let listen_pid = match std::env::var("LISTEN_PID") {
Ok(listen_pid) => listen_pid,
Err(std::env::VarError::NotPresent) => return Ok(None),
Err(err @ std::env::VarError::NotUnicode(_)) => {
return Err(format!("could not read LISTEN_PID env var: {}", err))
}
};
let listen_pid = listen_pid
.parse()
.map_err(|err| format!("could not read LISTEN_PID env var: {}", err))?;
nix::unistd::Pid::from_raw(listen_pid)
};
let current_pid = nix::unistd::Pid::this();
if listen_pid != current_pid {
// The env vars are not for us. Perhaps we're being spawned by another socket-activated service and we inherited these env vars from it.
//
// Either way, this is the same as if the env var wasn't set at all. That is, the caller wants us to find a socket-activated fd,
// but we weren't started via socket activation.
return Ok(None);
}
// Get the requested environment variable.
match std::env::var(env) {
Ok(value) => Ok(Some(value)),
Err(std::env::VarError::NotPresent) => Ok(None),
Err(err @ std::env::VarError::NotUnicode(_)) => {
Err(format!("could not read {} env var: {}", env, err))
}
}
}
fn socket_name_to_fd(name: &str) -> Result<std::os::unix::io::RawFd, String> {
let listen_fdnames = match get_env("LISTEN_FDNAMES")? {
Some(listen_fdnames) => listen_fdnames,
None => return Err("LISTEN_FDNAMES not found".to_string()),
};
let listen_fdnames: Vec<&str> = listen_fdnames.split(':').collect();
let index: std::os::unix::io::RawFd =
match listen_fdnames.iter().position(|&fdname| fdname == name) {
Some(index) => match std::convert::TryInto::try_into(index) {
Ok(index) => index,
Err(_) => return Err("couldn't convert LISTEN_FDNAMES index to fd".to_string()),
},
None => return Err(format!("socket {} not found", name)),
};
// The index in LISTEN_FDNAMES is an offset from SD_LISTEN_FDS_START.
let fd = index + SD_LISTEN_FDS_START - 1;
Ok(fd)
}
#[cfg(feature = "tokio1")]
fn fd_to_listener(fd: std::os::unix::io::RawFd) -> std::io::Result<Incoming> {
if is_unix_fd(fd)? {
let listener: std::os::unix::net::UnixListener =
unsafe { std::os::unix::io::FromRawFd::from_raw_fd(fd) };
listener.set_nonblocking(true)?;
let listener = tokio::net::UnixListener::from_std(listener)?;
Ok(Incoming::Unix {
listener,
user_state: Default::default(),
})
} else {
let listener: std::net::TcpListener =
unsafe { std::os::unix::io::FromRawFd::from_raw_fd(fd) };
listener.set_nonblocking(true)?;
let listener = tokio::net::TcpListener::from_std(listener)?;
Ok(Incoming::Tcp { listener })
}
}
/// Return a matching systemd socket. Checks if this process has been socket-activated.
///
/// This mimics `sd_listen_fds` from libsystemd, then returns the fd of systemd socket.
#[cfg(feature = "tokio1")]
fn get_systemd_socket(
socket_name: Option<String>,
) -> Result<Option<std::os::unix::io::RawFd>, String> {
// Ref: <https://www.freedesktop.org/software/systemd/man/sd_listen_fds.html>
//
// Try to find a systemd socket to match when non "fd" path has been provided.
// We consider 4 cases:
// 1. When there is only 1 socket. In this case, we can ignore the socket name. It means
// the call is made by identity service which uses only one systemd socket. So matching is simple
// 2. There are > 1 systemd sockets and a socket name is provided. It means edged is telling us to match an fd with the provided socket name.
// 3. There are > 1 systemd sockets and a socket name is provided but no LISTEN_FDNAMES. We can't match.
// 4. There are > 1 systemd sockets but no socket name is provided. In this case it means there is no corresponding systemd socket we should match
//
// >sd_listen_fds parses the number passed in the $LISTEN_FDS environment variable, then sets the FD_CLOEXEC flag
// >for the parsed number of file descriptors starting from SD_LISTEN_FDS_START. Finally, it returns the parsed number.
//
// Note that it's not possible to distinguish between fd numbers if a process requires more than one socket.
// That is why in edged's case we use the systemd socket name to know which fd the function should return
// CS/IS/KS currently only expect one socket, so this is fine; but it is not the case for iotedged (mgmt and workload sockets)
// for example.
//
// The complication with LISTEN_FDNAMES is that CentOS 7's systemd is too old and doesn't support it, which
// would mean CS/IS/KS would have to stop using systemd socket activation on CentOS 7 (just like iotedged). This creates more complications,
// because now the sockets either have to be placed in /var/lib/aziot (just like iotedged does) which means host modules need to try
// both /run/aziot and /var/lib/aziot to connect to a service, or the services continue to bind sockets under /run/aziot but have to create
// /run/aziot themselves on startup with ACLs for all three users and all three groups.
let listen_fds: std::os::unix::io::RawFd = match get_env("LISTEN_FDS")? {
Some(listen_fds) => listen_fds
.parse()
.map_err(|err| format!("could not read LISTEN_FDS env var: {}", err))?,
None => return Ok(None),
};
// If there is no socket available, no match is possible.
if listen_fds == 0 {
return Ok(None);
}
// fcntl(CLOEXEC) all the fds so that they aren't inherited by the child processes.
// Note that we want to do this for all the fds, not just the one we're looking for.
for fd in SD_LISTEN_FDS_START..(SD_LISTEN_FDS_START + listen_fds) {
if let Err(err) = nix::fcntl::fcntl(
fd,
nix::fcntl::FcntlArg::F_SETFD(nix::fcntl::FdFlag::FD_CLOEXEC),
) {
return Err(format!(
"could not fcntl({}, F_SETFD, FD_CLOEXEC): {}",
fd, err
));
}
}
// If there is only one socket, we know this is the identity service which uses only one socket, so we have a match:
if listen_fds == 1 {
return Ok(Some(SD_LISTEN_FDS_START));
}
// If there is more than 1 socket and we don't have a socket name to match, this is edged telling us that there is no systemd socket we can match.
let socket_name = match socket_name {
Some(socket_name) => socket_name,
None => return Ok(None),
};
// If there is more than one socket, this is edged. We can attempt to match the socket name to systemd.
// This happens when a unix Uri is provided in the config.toml. Systemd sockets get created nonetheless, so we still prefer to use them.
// If a socket name is provided but we don't see the env variable LISTEN_FDNAMES, it means we are probably on an older OS, and we can't match either.
let listen_fdnames = match get_env("LISTEN_FDNAMES")? {
Some(listen_fdnames) => listen_fdnames,
None => return Ok(None),
};
let listen_fdnames: Vec<&str> = listen_fdnames.split(':').collect();
let len: std::os::unix::io::RawFd = std::convert::TryInto::try_into(listen_fdnames.len())
.map_err(|_| "invalid number of sockets".to_string())?;
if listen_fds != len {
return Err(format!(
"Mismatch, there are {} fds, and {} names",
listen_fds,
listen_fdnames.len()
));
}
if let Some(index) = listen_fdnames
.iter()
.position(|fdname| (*fdname).eq(&socket_name))
{
let index: std::os::unix::io::RawFd = std::convert::TryInto::try_into(index)
.map_err(|_| "invalid number of sockets".to_string())?;
Ok(Some(SD_LISTEN_FDS_START + index))
} else {
Err(format!(
"Could not find a match for {} in the fd list",
socket_name
))
}
}
#[cfg(test)]
mod tests {
#[test]
fn create_connector() {
for (input, expected) in &[
(
"http://127.0.0.1",
super::Connector::Tcp {
host: "127.0.0.1".into(),
port: 80,
},
),
(
"http://127.0.0.1:8888",
super::Connector::Tcp {
host: "127.0.0.1".into(),
port: 8888,
},
),
(
"http://[::1]",
super::Connector::Tcp {
host: "[::1]".into(),
port: 80,
},
),
(
"http://[::1]:8888",
super::Connector::Tcp {
host: "[::1]".into(),
port: 8888,
},
),
(
"http://localhost",
super::Connector::Tcp {
host: "localhost".into(),
port: 80,
},
),
(
"http://localhost:8888",
super::Connector::Tcp {
host: "localhost".into(),
port: 8888,
},
),
(
"unix:///run/aziot/keyd.sock",
super::Connector::Unix {
socket_path: std::path::Path::new("/run/aziot/keyd.sock").into(),
},
),
] {
let actual: super::Connector = input.parse().unwrap();
assert_eq!(*expected, actual);
let serialized_input = {
let input: url::Url = input.parse().unwrap();
serde_json::to_string(&input).unwrap()
};
let serialized_connector = serde_json::to_string(&actual).unwrap();
assert_eq!(serialized_input, serialized_connector);
let deserialized_connector: super::Connector =
serde_json::from_str(&serialized_connector).unwrap();
assert_eq!(*expected, deserialized_connector);
}
for input in &[
// unsupported scheme
"ftp://127.0.0.1",
] {
let input = input.parse().unwrap();
let _ = super::Connector::new(&input).unwrap_err();
}
}
}
| 36.143659 | 153 | 0.503447 |
e24a7ad1c569c641855cd432570a616cba770378 | 118 | #[cfg(feature = "bn_382")]
mod curves;
mod fields;
#[cfg(feature = "bn_382")]
pub use curves::*;
pub use fields::*;
| 13.111111 | 26 | 0.627119 |
ac24100c470d69cbc34cb9a6cafa12b2578fce18 | 4,530 | use super::*;
pub struct RandomQsc {
desired_quorum_set_size: usize,
desired_threshold: Option<usize>,
weights: Vec<usize>,
}
impl RandomQsc {
pub fn new(
desired_quorum_set_size: usize,
desired_threshold: Option<usize>,
weights: Option<Vec<usize>>,
) -> Self {
RandomQsc {
desired_quorum_set_size,
desired_threshold,
weights: weights.unwrap_or_else(Vec::new),
}
}
pub fn new_simple(desired_quorum_set_size: usize) -> Self {
Self::new(desired_quorum_set_size, None, None)
}
}
impl QuorumSetConfigurator for RandomQsc {
fn configure(&self, node_id: NodeId, fbas: &mut Fbas) -> ChangeEffect {
let n = fbas.nodes.len();
let existing_quorum_set = &mut fbas.nodes[node_id].quorum_set;
// we add nodes to their own quorum sets, for better comparability with other Qsc
if existing_quorum_set.validators.is_empty() {
existing_quorum_set.validators = vec![node_id];
}
let current_quorum_set_size = existing_quorum_set.validators.len();
if current_quorum_set_size < self.desired_quorum_set_size {
let target_quorum_set_size = cmp::min(self.desired_quorum_set_size, n);
let threshold = self
.desired_threshold
.unwrap_or_else(|| calculate_67p_threshold(target_quorum_set_size));
let used_nodes: BitSet<NodeId> =
existing_quorum_set.validators.iter().copied().collect();
let mut available_nodes: Vec<NodeId> =
(0..n).filter(|&x| !used_nodes.contains(x)).collect();
let mut rng = thread_rng();
for _ in current_quorum_set_size..target_quorum_set_size {
let &chosen_node = available_nodes
.choose_weighted(&mut rng, |&node_id| {
*self.weights.get(node_id).unwrap_or(&1)
})
.unwrap();
let chosen_idx = available_nodes.binary_search(&chosen_node).unwrap();
available_nodes.remove(chosen_idx);
existing_quorum_set.validators.push(chosen_node);
}
existing_quorum_set.threshold = threshold;
Change
} else {
NoChange
}
}
}
#[cfg(test)]
mod tests {
use super::monitors::*;
use super::*;
#[test]
fn simple_random_qsc_makes_a_quorum() {
let mut simulator = Simulator::new(
Fbas::new(),
Rc::new(RandomQsc::new_simple(4)),
Rc::new(DummyMonitor),
);
simulator.simulate_growth(4);
assert!(simulator.fbas.is_quorum(&bitset![0, 1, 2, 3]));
}
#[test]
fn simple_random_qsc_adapts_until_satisfied() {
let mut simulator_random = Simulator::new(
Fbas::new(),
Rc::new(RandomQsc::new_simple(5)),
Rc::new(DummyMonitor),
);
let mut simulator_safe = Simulator::new(
Fbas::new(),
Rc::new(SuperSafeQsc::new()),
Rc::new(DummyMonitor),
);
simulator_random.simulate_growth(2);
simulator_safe.simulate_growth(2);
assert!(simulator_random.fbas.is_quorum(&bitset![0, 1]));
simulator_random.simulate_growth(10);
simulator_safe.simulate_growth(10);
assert_ne!(simulator_safe.fbas, simulator_random.fbas);
assert!(!simulator_random.fbas.is_quorum(&bitset![0, 1]));
}
#[test]
fn simple_random_qsc_is_random() {
let mut simulator_random_1 = Simulator::new(
Fbas::new(),
Rc::new(RandomQsc::new_simple(5)),
Rc::new(DummyMonitor),
);
let mut simulator_random_2 = simulator_random_1.clone();
simulator_random_1.simulate_growth(23);
simulator_random_2.simulate_growth(23);
assert_ne!(simulator_random_1.fbas, simulator_random_2.fbas);
}
#[test]
fn random_qsc_honors_weights() {
let mut simulator = Simulator::new(
Fbas::new_generic_unconfigured(10),
Rc::new(RandomQsc::new(
5,
Some(3),
Some(vec![0, 0, 0, 0, 0, 1, 1, 1, 1, 1]),
)),
Rc::new(DummyMonitor),
);
simulator.simulate_global_reevaluation(2);
assert!(!simulator.fbas.is_quorum(&bitset![0, 1, 2, 3, 4, 5, 6]));
assert!(simulator.fbas.is_quorum(&bitset![7, 8, 9]));
}
}
| 32.826087 | 89 | 0.583223 |
fc7a06b0c70de1ecf3e75dc8e34aa748eacbeed0 | 2,002 | #[doc = "Reader of register B0_24"]
pub type R = crate::R<u8, super::B0_24>;
#[doc = "Writer for register B0_24"]
pub type W = crate::W<u8, super::B0_24>;
#[doc = "Register B0_24 `reset()`'s with value 0"]
impl crate::ResetValue for super::B0_24 {
type Type = u8;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `PBYTE`"]
pub type PBYTE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `PBYTE`"]
pub struct PBYTE_W<'a> {
w: &'a mut W,
}
impl<'a> PBYTE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u8) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Read: state of the pin PIOm_n, regardless of direction, masking, or alternate function, except that pins configured as analog I/O always read as 0. One register for each port pin. Supported pins depends on the specific device and package. Write: loads the pin's output bit. One register for each port pin. Supported pins depends on the specific device and package."]
#[inline(always)]
pub fn pbyte(&self) -> PBYTE_R {
PBYTE_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Read: state of the pin PIOm_n, regardless of direction, masking, or alternate function, except that pins configured as analog I/O always read as 0. One register for each port pin. Supported pins depends on the specific device and package. Write: loads the pin's output bit. One register for each port pin. Supported pins depends on the specific device and package."]
#[inline(always)]
pub fn pbyte(&mut self) -> PBYTE_W {
PBYTE_W { w: self }
}
}
| 39.254902 | 387 | 0.632368 |
4884c00943adb856ab918f73802b50bc6f275f43 | 263 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
fn main() {
let x = 2;
let f = |y| x + y;
let z = f(100);
let g = |y| z + f(y);
assert!(z == 102);
assert!(g(z) == 206);
}
| 23.909091 | 69 | 0.539924 |
ac5a10ec70364674bc46c5abf810ee108667ec88 | 86,317 | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Feature gating
//!
//! This module implements the gating necessary for preventing certain compiler
//! features from being used by default. This module will crawl a pre-expanded
//! AST to ensure that there are no features which are used that are not
//! enabled.
//!
//! Features are enabled in programs via the crate-level attributes of
//! `#![feature(...)]` with a comma-separated list of features.
//!
//! For the purpose of future feature-tracking, once code for detection of feature
//! gate usage is added, *do not remove it again* even once the feature
//! becomes stable.
use self::AttributeType::*;
use self::AttributeGate::*;
use abi::Abi;
use ast::{self, NodeId, PatKind, RangeEnd, RangeSyntax};
use attr;
use codemap::Spanned;
use syntax_pos::Span;
use errors::{DiagnosticBuilder, Handler, FatalError};
use visit::{self, FnKind, Visitor};
use parse::ParseSess;
use symbol::{keywords, Symbol};
use std::{env, path};
macro_rules! set {
(proc_macro) => {{
fn f(features: &mut Features, span: Span) {
features.declared_lib_features.push((Symbol::intern("proc_macro"), span));
features.proc_macro = true;
}
f as fn(&mut Features, Span)
}};
($field: ident) => {{
fn f(features: &mut Features, _: Span) {
features.$field = true;
}
f as fn(&mut Features, Span)
}}
}
macro_rules! declare_features {
($((active, $feature: ident, $ver: expr, $issue: expr),)+) => {
/// Represents active features that are currently being implemented or
/// currently being considered for addition/removal.
const ACTIVE_FEATURES:
&'static [(&'static str, &'static str, Option<u32>, fn(&mut Features, Span))] =
&[$((stringify!($feature), $ver, $issue, set!($feature))),+];
/// A set of features to be used by later passes.
pub struct Features {
/// `#![feature]` attrs for stable language features, for error reporting
pub declared_stable_lang_features: Vec<(Symbol, Span)>,
/// `#![feature]` attrs for non-language (library) features
pub declared_lib_features: Vec<(Symbol, Span)>,
$(pub $feature: bool),+
}
impl Features {
pub fn new() -> Features {
Features {
declared_stable_lang_features: Vec::new(),
declared_lib_features: Vec::new(),
$($feature: false),+
}
}
}
};
($((removed, $feature: ident, $ver: expr, $issue: expr),)+) => {
/// Represents unstable features which have since been removed (it was once Active)
const REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[
$((stringify!($feature), $ver, $issue)),+
];
};
($((stable_removed, $feature: ident, $ver: expr, $issue: expr),)+) => {
/// Represents stable features which have since been removed (it was once Accepted)
const STABLE_REMOVED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[
$((stringify!($feature), $ver, $issue)),+
];
};
($((accepted, $feature: ident, $ver: expr, $issue: expr),)+) => {
/// Those language feature has since been Accepted (it was once Active)
const ACCEPTED_FEATURES: &'static [(&'static str, &'static str, Option<u32>)] = &[
$((stringify!($feature), $ver, $issue)),+
];
}
}
// If you change this, please modify src/doc/unstable-book as well.
//
// Don't ever remove anything from this list; set them to 'Removed'.
//
// The version numbers here correspond to the version in which the current status
// was set. This is most important for knowing when a particular feature became
// stable (active).
//
// NB: tools/tidy/src/features.rs parses this information directly out of the
// source, so take care when modifying it.
declare_features! (
(active, asm, "1.0.0", Some(29722)),
(active, concat_idents, "1.0.0", Some(29599)),
(active, link_args, "1.0.0", Some(29596)),
(active, log_syntax, "1.0.0", Some(29598)),
(active, non_ascii_idents, "1.0.0", Some(28979)),
(active, plugin_registrar, "1.0.0", Some(29597)),
(active, thread_local, "1.0.0", Some(29594)),
(active, trace_macros, "1.0.0", Some(29598)),
// rustc internal, for now:
(active, intrinsics, "1.0.0", None),
(active, lang_items, "1.0.0", None),
(active, link_llvm_intrinsics, "1.0.0", Some(29602)),
(active, linkage, "1.0.0", Some(29603)),
(active, quote, "1.0.0", Some(29601)),
// rustc internal
(active, rustc_diagnostic_macros, "1.0.0", None),
(active, rustc_const_unstable, "1.0.0", None),
(active, advanced_slice_patterns, "1.0.0", Some(23121)),
(active, box_syntax, "1.0.0", Some(27779)),
(active, placement_in_syntax, "1.0.0", Some(27779)),
(active, unboxed_closures, "1.0.0", Some(29625)),
(active, fundamental, "1.0.0", Some(29635)),
(active, main, "1.0.0", Some(29634)),
(active, needs_allocator, "1.4.0", Some(27389)),
(active, on_unimplemented, "1.0.0", Some(29628)),
(active, plugin, "1.0.0", Some(29597)),
(active, simd_ffi, "1.0.0", Some(27731)),
(active, start, "1.0.0", Some(29633)),
(active, structural_match, "1.8.0", Some(31434)),
(active, panic_runtime, "1.10.0", Some(32837)),
(active, needs_panic_runtime, "1.10.0", Some(32837)),
// OIBIT specific features
(active, optin_builtin_traits, "1.0.0", Some(13231)),
// macro re-export needs more discussion and stabilization
(active, macro_reexport, "1.0.0", Some(29638)),
// Allows use of #[staged_api]
// rustc internal
(active, staged_api, "1.0.0", None),
// Allows using #![no_core]
(active, no_core, "1.3.0", Some(29639)),
// Allows using `box` in patterns; RFC 469
(active, box_patterns, "1.0.0", Some(29641)),
// Allows using the unsafe_destructor_blind_to_params attribute;
// RFC 1238
(active, dropck_parametricity, "1.3.0", Some(28498)),
// Allows using the may_dangle attribute; RFC 1327
(active, dropck_eyepatch, "1.10.0", Some(34761)),
// Allows the use of custom attributes; RFC 572
(active, custom_attribute, "1.0.0", Some(29642)),
// Allows the use of #[derive(Anything)] as sugar for
// #[derive_Anything].
(active, custom_derive, "1.0.0", Some(29644)),
// Allows the use of rustc_* attributes; RFC 572
(active, rustc_attrs, "1.0.0", Some(29642)),
// Allows the use of non lexical lifetimes; RFC 2094
(active, nll, "1.0.0", Some(44928)),
// Allows the use of #[allow_internal_unstable]. This is an
// attribute on macro_rules! and can't use the attribute handling
// below (it has to be checked before expansion possibly makes
// macros disappear).
//
// rustc internal
(active, allow_internal_unstable, "1.0.0", None),
// Allows the use of #[allow_internal_unsafe]. This is an
// attribute on macro_rules! and can't use the attribute handling
// below (it has to be checked before expansion possibly makes
// macros disappear).
//
// rustc internal
(active, allow_internal_unsafe, "1.0.0", None),
// #23121. Array patterns have some hazards yet.
(active, slice_patterns, "1.0.0", Some(23121)),
// Allows the definition of `const fn` functions.
(active, const_fn, "1.2.0", Some(24111)),
// Allows indexing into constant arrays.
(active, const_indexing, "1.4.0", Some(29947)),
// Allows using #[prelude_import] on glob `use` items.
//
// rustc internal
(active, prelude_import, "1.2.0", None),
// Allows default type parameters to influence type inference.
(active, default_type_parameter_fallback, "1.3.0", Some(27336)),
// Allows associated type defaults
(active, associated_type_defaults, "1.2.0", Some(29661)),
// allow `repr(simd)`, and importing the various simd intrinsics
(active, repr_simd, "1.4.0", Some(27731)),
// Allows cfg(target_feature = "...").
(active, cfg_target_feature, "1.4.0", Some(29717)),
// allow `extern "platform-intrinsic" { ... }`
(active, platform_intrinsics, "1.4.0", Some(27731)),
// allow `#[unwind]`
// rust runtime internal
(active, unwind_attributes, "1.4.0", None),
// allow the use of `#[naked]` on functions.
(active, naked_functions, "1.9.0", Some(32408)),
// allow `#[no_debug]`
(active, no_debug, "1.5.0", Some(29721)),
// allow `#[omit_gdb_pretty_printer_section]`
// rustc internal.
(active, omit_gdb_pretty_printer_section, "1.5.0", None),
// Allows cfg(target_vendor = "...").
(active, cfg_target_vendor, "1.5.0", Some(29718)),
// Allow attributes on expressions and non-item statements
(active, stmt_expr_attributes, "1.6.0", Some(15701)),
// allow using type ascription in expressions
(active, type_ascription, "1.6.0", Some(23416)),
// Allows cfg(target_thread_local)
(active, cfg_target_thread_local, "1.7.0", Some(29594)),
// rustc internal
(active, abi_vectorcall, "1.7.0", None),
// a..=b and ..=b
(active, inclusive_range_syntax, "1.7.0", Some(28237)),
// X..Y patterns
(active, exclusive_range_pattern, "1.11.0", Some(37854)),
// impl specialization (RFC 1210)
(active, specialization, "1.7.0", Some(31844)),
// Allows cfg(target_has_atomic = "...").
(active, cfg_target_has_atomic, "1.9.0", Some(32976)),
// Allows `impl Trait` in function return types.
(active, conservative_impl_trait, "1.12.0", Some(34511)),
// Allows `impl Trait` in function arguments.
(active, universal_impl_trait, "1.23.0", Some(34511)),
// The `!` type
(active, never_type, "1.13.0", Some(35121)),
// Allows all literals in attribute lists and values of key-value pairs.
(active, attr_literals, "1.13.0", Some(34981)),
// Allows untagged unions `union U { ... }`
(active, untagged_unions, "1.13.0", Some(32836)),
// Used to identify the `compiler_builtins` crate
// rustc internal
(active, compiler_builtins, "1.13.0", None),
// Allows attributes on lifetime/type formal parameters in generics (RFC 1327)
(active, generic_param_attrs, "1.11.0", Some(34761)),
// Allows #[link(..., cfg(..))]
(active, link_cfg, "1.14.0", Some(37406)),
(active, use_extern_macros, "1.15.0", Some(35896)),
// Allows #[target_feature(...)]
(active, target_feature, "1.15.0", None),
// `extern "ptx-*" fn()`
(active, abi_ptx, "1.15.0", None),
// The `i128` type
(active, i128_type, "1.16.0", Some(35118)),
// The `repr(i128)` annotation for enums
(active, repr128, "1.16.0", Some(35118)),
// The `unadjusted` ABI. Perma unstable.
(active, abi_unadjusted, "1.16.0", None),
// Procedural macros 2.0.
(active, proc_macro, "1.16.0", Some(38356)),
// Declarative macros 2.0 (`macro`).
(active, decl_macro, "1.17.0", Some(39412)),
// Allows #[link(kind="static-nobundle"...]
(active, static_nobundle, "1.16.0", Some(37403)),
// `extern "msp430-interrupt" fn()`
(active, abi_msp430_interrupt, "1.16.0", Some(38487)),
// Used to identify crates that contain sanitizer runtimes
// rustc internal
(active, sanitizer_runtime, "1.17.0", None),
// Used to identify crates that contain the profiler runtime
// rustc internal
(active, profiler_runtime, "1.18.0", None),
// `extern "x86-interrupt" fn()`
(active, abi_x86_interrupt, "1.17.0", Some(40180)),
// Allows the `catch {...}` expression
(active, catch_expr, "1.17.0", Some(31436)),
// Allows `repr(align(u16))` struct attribute (RFC 1358)
(active, repr_align, "1.17.0", Some(33626)),
// Used to preserve symbols (see llvm.used)
(active, used, "1.18.0", Some(40289)),
// Allows module-level inline assembly by way of global_asm!()
(active, global_asm, "1.18.0", Some(35119)),
// Allows overlapping impls of marker traits
(active, overlapping_marker_traits, "1.18.0", Some(29864)),
// Allows use of the :vis macro fragment specifier
(active, macro_vis_matcher, "1.18.0", Some(41022)),
// rustc internal
(active, abi_thiscall, "1.19.0", None),
// Allows a test to fail without failing the whole suite
(active, allow_fail, "1.19.0", Some(42219)),
// Allows unsized tuple coercion.
(active, unsized_tuple_coercion, "1.20.0", Some(42877)),
// Generators
(active, generators, "1.21.0", None),
// Trait aliases
(active, trait_alias, "1.24.0", Some(41517)),
// global allocators and their internals
(active, global_allocator, "1.20.0", None),
(active, allocator_internals, "1.20.0", None),
// #[doc(cfg(...))]
(active, doc_cfg, "1.21.0", Some(43781)),
// #[doc(masked)]
(active, doc_masked, "1.21.0", Some(44027)),
// #[doc(spotlight)]
(active, doc_spotlight, "1.22.0", Some(45040)),
// #[doc(include="some-file")]
(active, external_doc, "1.22.0", Some(44732)),
// allow `#[must_use]` on functions and comparison operators (RFC 1940)
(active, fn_must_use, "1.21.0", Some(43302)),
// allow '|' at beginning of match arms (RFC 1925)
(active, match_beginning_vert, "1.21.0", Some(44101)),
// Future-proofing enums/structs with #[non_exhaustive] attribute (RFC 2008)
(active, non_exhaustive, "1.22.0", Some(44109)),
// Copy/Clone closures (RFC 2132)
(active, clone_closures, "1.22.0", Some(44490)),
(active, copy_closures, "1.22.0", Some(44490)),
// allow `'_` placeholder lifetimes
(active, underscore_lifetimes, "1.22.0", Some(44524)),
// allow `..=` in patterns (RFC 1192)
(active, dotdoteq_in_patterns, "1.22.0", Some(28237)),
// Default match binding modes (RFC 2005)
(active, match_default_bindings, "1.22.0", Some(42640)),
// Trait object syntax with `dyn` prefix
(active, dyn_trait, "1.22.0", Some(44662)),
// `crate` as visibility modifier, synonymous to `pub(crate)`
(active, crate_visibility_modifier, "1.23.0", Some(45388)),
// extern types
(active, extern_types, "1.23.0", Some(43467)),
// Allow trait methods with arbitrary self types
(active, arbitrary_self_types, "1.23.0", Some(44874)),
// #![wasm_import_memory] attribute
(active, wasm_import_memory, "1.22.0", None),
// `crate` in paths
(active, crate_in_paths, "1.23.0", Some(45477)),
// In-band lifetime bindings (e.g. `fn foo(x: &'a u8) -> &'a u8`)
(active, in_band_lifetimes, "1.23.0", Some(44524)),
// Nested groups in `use` (RFC 2128)
(active, use_nested_groups, "1.23.0", Some(44494)),
// generic associated types (RFC 1598)
(active, generic_associated_types, "1.23.0", Some(44265)),
// Resolve absolute paths as paths from other crates
(active, extern_absolute_paths, "1.24.0", Some(44660)),
// `foo.rs` as an alternative to `foo/mod.rs`
(active, non_modrs_mods, "1.24.0", Some(44660)),
// Nested `impl Trait`
(active, nested_impl_trait, "1.24.0", Some(34511)),
// Termination trait in main (RFC 1937)
(active, termination_trait, "1.24.0", Some(43301)),
// Allows use of the :lifetime macro fragment specifier
(active, macro_lifetime_matcher, "1.24.0", Some(46895)),
// `extern` in paths
(active, extern_in_paths, "1.23.0", Some(44660)),
// Allows `#[repr(transparent)]` attribute on newtype structs
(active, repr_transparent, "1.25.0", Some(43036)),
);
declare_features! (
(removed, import_shadowing, "1.0.0", None),
(removed, managed_boxes, "1.0.0", None),
// Allows use of unary negate on unsigned integers, e.g. -e for e: u8
(removed, negate_unsigned, "1.0.0", Some(29645)),
(removed, reflect, "1.0.0", Some(27749)),
// A way to temporarily opt out of opt in copy. This will *never* be accepted.
(removed, opt_out_copy, "1.0.0", None),
(removed, quad_precision_float, "1.0.0", None),
(removed, struct_inherit, "1.0.0", None),
(removed, test_removed_feature, "1.0.0", None),
(removed, visible_private_types, "1.0.0", None),
(removed, unsafe_no_drop_flag, "1.0.0", None),
// Allows using items which are missing stability attributes
// rustc internal
(removed, unmarked_api, "1.0.0", None),
(removed, pushpop_unsafe, "1.2.0", None),
(removed, allocator, "1.0.0", None),
// Allows the `#[simd]` attribute -- removed in favor of `#[repr(simd)]`
(removed, simd, "1.0.0", Some(27731)),
);
declare_features! (
(stable_removed, no_stack_check, "1.0.0", None),
);
declare_features! (
(accepted, associated_types, "1.0.0", None),
// allow overloading augmented assignment operations like `a += b`
(accepted, augmented_assignments, "1.8.0", Some(28235)),
// allow empty structs and enum variants with braces
(accepted, braced_empty_structs, "1.8.0", Some(29720)),
(accepted, default_type_params, "1.0.0", None),
(accepted, globs, "1.0.0", None),
(accepted, if_let, "1.0.0", None),
// A temporary feature gate used to enable parser extensions needed
// to bootstrap fix for #5723.
(accepted, issue_5723_bootstrap, "1.0.0", None),
(accepted, macro_rules, "1.0.0", None),
// Allows using #![no_std]
(accepted, no_std, "1.6.0", None),
(accepted, slicing_syntax, "1.0.0", None),
(accepted, struct_variant, "1.0.0", None),
// These are used to test this portion of the compiler, they don't actually
// mean anything
(accepted, test_accepted_feature, "1.0.0", None),
(accepted, tuple_indexing, "1.0.0", None),
// Allows macros to appear in the type position.
(accepted, type_macros, "1.13.0", Some(27245)),
(accepted, while_let, "1.0.0", None),
// Allows `#[deprecated]` attribute
(accepted, deprecated, "1.9.0", Some(29935)),
// `expr?`
(accepted, question_mark, "1.13.0", Some(31436)),
// Allows `..` in tuple (struct) patterns
(accepted, dotdot_in_tuple_patterns, "1.14.0", Some(33627)),
(accepted, item_like_imports, "1.15.0", Some(35120)),
// Allows using `Self` and associated types in struct expressions and patterns.
(accepted, more_struct_aliases, "1.16.0", Some(37544)),
// elide `'static` lifetimes in `static`s and `const`s
(accepted, static_in_const, "1.17.0", Some(35897)),
// Allows field shorthands (`x` meaning `x: x`) in struct literal expressions.
(accepted, field_init_shorthand, "1.17.0", Some(37340)),
// Allows the definition recursive static items.
(accepted, static_recursion, "1.17.0", Some(29719)),
// pub(restricted) visibilities (RFC 1422)
(accepted, pub_restricted, "1.18.0", Some(32409)),
// The #![windows_subsystem] attribute
(accepted, windows_subsystem, "1.18.0", Some(37499)),
// Allows `break {expr}` with a value inside `loop`s.
(accepted, loop_break_value, "1.19.0", Some(37339)),
// Permits numeric fields in struct expressions and patterns.
(accepted, relaxed_adts, "1.19.0", Some(35626)),
// Coerces non capturing closures to function pointers
(accepted, closure_to_fn_coercion, "1.19.0", Some(39817)),
// Allows attributes on struct literal fields.
(accepted, struct_field_attributes, "1.20.0", Some(38814)),
// Allows the definition of associated constants in `trait` or `impl`
// blocks.
(accepted, associated_consts, "1.20.0", Some(29646)),
// Usage of the `compile_error!` macro
(accepted, compile_error, "1.20.0", Some(40872)),
// See rust-lang/rfcs#1414. Allows code like `let x: &'static u32 = &42` to work.
(accepted, rvalue_static_promotion, "1.21.0", Some(38865)),
// Allow Drop types in constants (RFC 1440)
(accepted, drop_types_in_const, "1.22.0", Some(33156)),
// Allows the sysV64 ABI to be specified on all platforms
// instead of just the platforms on which it is the C ABI
(accepted, abi_sysv64, "1.24.0", Some(36167)),
);
// If you change this, please modify src/doc/unstable-book as well. You must
// move that documentation into the relevant place in the other docs, and
// remove the chapter on the flag.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum AttributeType {
/// Normal, builtin attribute that is consumed
/// by the compiler before the unused_attribute check
Normal,
/// Builtin attribute that may not be consumed by the compiler
/// before the unused_attribute check. These attributes
/// will be ignored by the unused_attribute lint
Whitelisted,
/// Builtin attribute that is only allowed at the crate level
CrateLevel,
}
pub enum AttributeGate {
/// Is gated by a given feature gate, reason
/// and function to check if enabled
Gated(Stability, &'static str, &'static str, fn(&Features) -> bool),
/// Ungated attribute, can be used on all release channels
Ungated,
}
impl AttributeGate {
fn is_deprecated(&self) -> bool {
match *self {
Gated(Stability::Deprecated(_), ..) => true,
_ => false,
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Stability {
Unstable,
// Argument is tracking issue link.
Deprecated(&'static str),
}
// fn() is not Debug
impl ::std::fmt::Debug for AttributeGate {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
Gated(ref stab, name, expl, _) =>
write!(fmt, "Gated({:?}, {}, {})", stab, name, expl),
Ungated => write!(fmt, "Ungated")
}
}
}
macro_rules! cfg_fn {
($field: ident) => {{
fn f(features: &Features) -> bool {
features.$field
}
f as fn(&Features) -> bool
}}
}
pub fn deprecated_attributes() -> Vec<&'static (&'static str, AttributeType, AttributeGate)> {
BUILTIN_ATTRIBUTES.iter().filter(|a| a.2.is_deprecated()).collect()
}
pub fn is_builtin_attr(attr: &ast::Attribute) -> bool {
BUILTIN_ATTRIBUTES.iter().any(|&(builtin_name, _, _)| attr.check_name(builtin_name))
}
// Attributes that have a special meaning to rustc or rustdoc
pub const BUILTIN_ATTRIBUTES: &'static [(&'static str, AttributeType, AttributeGate)] = &[
// Normal attributes
("warn", Normal, Ungated),
("allow", Normal, Ungated),
("forbid", Normal, Ungated),
("deny", Normal, Ungated),
("macro_reexport", Normal, Ungated),
("macro_use", Normal, Ungated),
("macro_export", Normal, Ungated),
("plugin_registrar", Normal, Ungated),
("cfg", Normal, Ungated),
("cfg_attr", Normal, Ungated),
("main", Normal, Ungated),
("start", Normal, Ungated),
("test", Normal, Ungated),
("bench", Normal, Ungated),
("repr", Normal, Ungated),
("path", Normal, Ungated),
("abi", Normal, Ungated),
("automatically_derived", Normal, Ungated),
("no_mangle", Normal, Ungated),
("no_link", Normal, Ungated),
("derive", Normal, Ungated),
("should_panic", Normal, Ungated),
("ignore", Normal, Ungated),
("no_implicit_prelude", Normal, Ungated),
("reexport_test_harness_main", Normal, Ungated),
("link_args", Normal, Gated(Stability::Unstable,
"link_args",
"the `link_args` attribute is experimental and not \
portable across platforms, it is recommended to \
use `#[link(name = \"foo\")] instead",
cfg_fn!(link_args))),
("macro_escape", Normal, Ungated),
// RFC #1445.
("structural_match", Whitelisted, Gated(Stability::Unstable,
"structural_match",
"the semantics of constant patterns is \
not yet settled",
cfg_fn!(structural_match))),
// RFC #2008
("non_exhaustive", Whitelisted, Gated(Stability::Unstable,
"non_exhaustive",
"non exhaustive is an experimental feature",
cfg_fn!(non_exhaustive))),
("plugin", CrateLevel, Gated(Stability::Unstable,
"plugin",
"compiler plugins are experimental \
and possibly buggy",
cfg_fn!(plugin))),
("no_std", CrateLevel, Ungated),
("no_core", CrateLevel, Gated(Stability::Unstable,
"no_core",
"no_core is experimental",
cfg_fn!(no_core))),
("lang", Normal, Gated(Stability::Unstable,
"lang_items",
"language items are subject to change",
cfg_fn!(lang_items))),
("linkage", Whitelisted, Gated(Stability::Unstable,
"linkage",
"the `linkage` attribute is experimental \
and not portable across platforms",
cfg_fn!(linkage))),
("thread_local", Whitelisted, Gated(Stability::Unstable,
"thread_local",
"`#[thread_local]` is an experimental feature, and does \
not currently handle destructors. There is no \
corresponding `#[task_local]` mapping to the task \
model",
cfg_fn!(thread_local))),
("rustc_on_unimplemented", Normal, Gated(Stability::Unstable,
"on_unimplemented",
"the `#[rustc_on_unimplemented]` attribute \
is an experimental feature",
cfg_fn!(on_unimplemented))),
("rustc_const_unstable", Normal, Gated(Stability::Unstable,
"rustc_const_unstable",
"the `#[rustc_const_unstable]` attribute \
is an internal feature",
cfg_fn!(rustc_const_unstable))),
("global_allocator", Normal, Gated(Stability::Unstable,
"global_allocator",
"the `#[global_allocator]` attribute is \
an experimental feature",
cfg_fn!(global_allocator))),
("default_lib_allocator", Whitelisted, Gated(Stability::Unstable,
"allocator_internals",
"the `#[default_lib_allocator]` \
attribute is an experimental feature",
cfg_fn!(allocator_internals))),
("needs_allocator", Normal, Gated(Stability::Unstable,
"allocator_internals",
"the `#[needs_allocator]` \
attribute is an experimental \
feature",
cfg_fn!(allocator_internals))),
("panic_runtime", Whitelisted, Gated(Stability::Unstable,
"panic_runtime",
"the `#[panic_runtime]` attribute is \
an experimental feature",
cfg_fn!(panic_runtime))),
("needs_panic_runtime", Whitelisted, Gated(Stability::Unstable,
"needs_panic_runtime",
"the `#[needs_panic_runtime]` \
attribute is an experimental \
feature",
cfg_fn!(needs_panic_runtime))),
("rustc_variance", Normal, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_variance]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_regions", Normal, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_regions]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_error", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_error]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_if_this_changed", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_if_this_changed]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_then_this_would_need", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_if_this_changed]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_dirty", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_dirty]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_clean", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_clean]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_partition_reused", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"this attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_partition_translated", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"this attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_synthetic", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"this attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_symbol_name", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"internal rustc attributes will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_item_path", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"internal rustc attributes will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_mir", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_mir]` attribute \
is just used for rustc unit tests \
and will never be stable",
cfg_fn!(rustc_attrs))),
("rustc_inherit_overflow_checks", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"the `#[rustc_inherit_overflow_checks]` \
attribute is just used to control \
overflow checking behavior of several \
libcore functions that are inlined \
across crates and will never be stable",
cfg_fn!(rustc_attrs))),
// RFC #2094
("nll", Whitelisted, Gated(Stability::Unstable,
"nll",
"Non lexical lifetimes",
cfg_fn!(nll))),
("compiler_builtins", Whitelisted, Gated(Stability::Unstable,
"compiler_builtins",
"the `#[compiler_builtins]` attribute is used to \
identify the `compiler_builtins` crate which \
contains compiler-rt intrinsics and will never be \
stable",
cfg_fn!(compiler_builtins))),
("sanitizer_runtime", Whitelisted, Gated(Stability::Unstable,
"sanitizer_runtime",
"the `#[sanitizer_runtime]` attribute is used to \
identify crates that contain the runtime of a \
sanitizer and will never be stable",
cfg_fn!(sanitizer_runtime))),
("profiler_runtime", Whitelisted, Gated(Stability::Unstable,
"profiler_runtime",
"the `#[profiler_runtime]` attribute is used to \
identify the `profiler_builtins` crate which \
contains the profiler runtime and will never be \
stable",
cfg_fn!(profiler_runtime))),
("allow_internal_unstable", Normal, Gated(Stability::Unstable,
"allow_internal_unstable",
EXPLAIN_ALLOW_INTERNAL_UNSTABLE,
cfg_fn!(allow_internal_unstable))),
("allow_internal_unsafe", Normal, Gated(Stability::Unstable,
"allow_internal_unsafe",
EXPLAIN_ALLOW_INTERNAL_UNSAFE,
cfg_fn!(allow_internal_unsafe))),
("fundamental", Whitelisted, Gated(Stability::Unstable,
"fundamental",
"the `#[fundamental]` attribute \
is an experimental feature",
cfg_fn!(fundamental))),
("proc_macro_derive", Normal, Ungated),
("rustc_copy_clone_marker", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"internal implementation detail",
cfg_fn!(rustc_attrs))),
// FIXME: #14408 whitelist docs since rustdoc looks at them
("doc", Whitelisted, Ungated),
// FIXME: #14406 these are processed in trans, which happens after the
// lint pass
("cold", Whitelisted, Ungated),
("naked", Whitelisted, Gated(Stability::Unstable,
"naked_functions",
"the `#[naked]` attribute \
is an experimental feature",
cfg_fn!(naked_functions))),
("target_feature", Whitelisted, Gated(
Stability::Unstable, "target_feature",
"the `#[target_feature]` attribute is an experimental feature",
cfg_fn!(target_feature))),
("export_name", Whitelisted, Ungated),
("inline", Whitelisted, Ungated),
("link", Whitelisted, Ungated),
("link_name", Whitelisted, Ungated),
("link_section", Whitelisted, Ungated),
("no_builtins", Whitelisted, Ungated),
("no_mangle", Whitelisted, Ungated),
("no_debug", Whitelisted, Gated(
Stability::Deprecated("https://github.com/rust-lang/rust/issues/29721"),
"no_debug",
"the `#[no_debug]` attribute was an experimental feature that has been \
deprecated due to lack of demand",
cfg_fn!(no_debug))),
("omit_gdb_pretty_printer_section", Whitelisted, Gated(Stability::Unstable,
"omit_gdb_pretty_printer_section",
"the `#[omit_gdb_pretty_printer_section]` \
attribute is just used for the Rust test \
suite",
cfg_fn!(omit_gdb_pretty_printer_section))),
("unsafe_destructor_blind_to_params",
Normal,
Gated(Stability::Deprecated("https://github.com/rust-lang/rust/issues/34761"),
"dropck_parametricity",
"unsafe_destructor_blind_to_params has been replaced by \
may_dangle and will be removed in the future",
cfg_fn!(dropck_parametricity))),
("may_dangle",
Normal,
Gated(Stability::Unstable,
"dropck_eyepatch",
"may_dangle has unstable semantics and may be removed in the future",
cfg_fn!(dropck_eyepatch))),
("unwind", Whitelisted, Gated(Stability::Unstable,
"unwind_attributes",
"#[unwind] is experimental",
cfg_fn!(unwind_attributes))),
("used", Whitelisted, Gated(
Stability::Unstable, "used",
"the `#[used]` attribute is an experimental feature",
cfg_fn!(used))),
// used in resolve
("prelude_import", Whitelisted, Gated(Stability::Unstable,
"prelude_import",
"`#[prelude_import]` is for use by rustc only",
cfg_fn!(prelude_import))),
// FIXME: #14407 these are only looked at on-demand so we can't
// guarantee they'll have already been checked
("rustc_deprecated", Whitelisted, Ungated),
("must_use", Whitelisted, Ungated),
("stable", Whitelisted, Ungated),
("unstable", Whitelisted, Ungated),
("deprecated", Normal, Ungated),
("rustc_paren_sugar", Normal, Gated(Stability::Unstable,
"unboxed_closures",
"unboxed_closures are still evolving",
cfg_fn!(unboxed_closures))),
("windows_subsystem", Whitelisted, Ungated),
("proc_macro_attribute", Normal, Gated(Stability::Unstable,
"proc_macro",
"attribute proc macros are currently unstable",
cfg_fn!(proc_macro))),
("proc_macro", Normal, Gated(Stability::Unstable,
"proc_macro",
"function-like proc macros are currently unstable",
cfg_fn!(proc_macro))),
("rustc_derive_registrar", Normal, Gated(Stability::Unstable,
"rustc_derive_registrar",
"used internally by rustc",
cfg_fn!(rustc_attrs))),
("allow_fail", Normal, Gated(Stability::Unstable,
"allow_fail",
"allow_fail attribute is currently unstable",
cfg_fn!(allow_fail))),
("rustc_std_internal_symbol", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"this is an internal attribute that will \
never be stable",
cfg_fn!(rustc_attrs))),
// whitelists "identity-like" conversion methods to suggest on type mismatch
("rustc_conversion_suggestion", Whitelisted, Gated(Stability::Unstable,
"rustc_attrs",
"this is an internal attribute that will \
never be stable",
cfg_fn!(rustc_attrs))),
("wasm_import_memory", Whitelisted, Gated(Stability::Unstable,
"wasm_import_memory",
"wasm_import_memory attribute is currently unstable",
cfg_fn!(wasm_import_memory))),
// Crate level attributes
("crate_name", CrateLevel, Ungated),
("crate_type", CrateLevel, Ungated),
("crate_id", CrateLevel, Ungated),
("feature", CrateLevel, Ungated),
("no_start", CrateLevel, Ungated),
("no_main", CrateLevel, Ungated),
("no_builtins", CrateLevel, Ungated),
("recursion_limit", CrateLevel, Ungated),
("type_length_limit", CrateLevel, Ungated),
];
// cfg(...)'s that are feature gated
const GATED_CFGS: &[(&str, &str, fn(&Features) -> bool)] = &[
// (name in cfg, feature, function to check if the feature is enabled)
("target_feature", "cfg_target_feature", cfg_fn!(cfg_target_feature)),
("target_vendor", "cfg_target_vendor", cfg_fn!(cfg_target_vendor)),
("target_thread_local", "cfg_target_thread_local", cfg_fn!(cfg_target_thread_local)),
("target_has_atomic", "cfg_target_has_atomic", cfg_fn!(cfg_target_has_atomic)),
];
#[derive(Debug, Eq, PartialEq)]
pub struct GatedCfg {
span: Span,
index: usize,
}
impl GatedCfg {
pub fn gate(cfg: &ast::MetaItem) -> Option<GatedCfg> {
let name = cfg.name().as_str();
GATED_CFGS.iter()
.position(|info| info.0 == name)
.map(|idx| {
GatedCfg {
span: cfg.span,
index: idx
}
})
}
pub fn check_and_emit(&self, sess: &ParseSess, features: &Features) {
let (cfg, feature, has_feature) = GATED_CFGS[self.index];
if !has_feature(features) && !self.span.allows_unstable() {
let explain = format!("`cfg({})` is experimental and subject to change", cfg);
emit_feature_err(sess, feature, self.span, GateIssue::Language, &explain);
}
}
}
struct Context<'a> {
features: &'a Features,
parse_sess: &'a ParseSess,
plugin_attributes: &'a [(String, AttributeType)],
}
macro_rules! gate_feature_fn {
($cx: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr, $level: expr) => {{
let (cx, has_feature, span,
name, explain, level) = ($cx, $has_feature, $span, $name, $explain, $level);
let has_feature: bool = has_feature(&$cx.features);
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
if !has_feature && !span.allows_unstable() {
leveled_feature_err(cx.parse_sess, name, span, GateIssue::Language, explain, level)
.emit();
}
}}
}
macro_rules! gate_feature {
($cx: expr, $feature: ident, $span: expr, $explain: expr) => {
gate_feature_fn!($cx, |x:&Features| x.$feature, $span,
stringify!($feature), $explain, GateStrength::Hard)
};
($cx: expr, $feature: ident, $span: expr, $explain: expr, $level: expr) => {
gate_feature_fn!($cx, |x:&Features| x.$feature, $span,
stringify!($feature), $explain, $level)
};
}
impl<'a> Context<'a> {
fn check_attribute(&self, attr: &ast::Attribute, is_macro: bool) {
debug!("check_attribute(attr = {:?})", attr);
let name = unwrap_or!(attr.name(), return).as_str();
for &(n, ty, ref gateage) in BUILTIN_ATTRIBUTES {
if name == n {
if let Gated(_, name, desc, ref has_feature) = *gateage {
gate_feature_fn!(self, has_feature, attr.span, name, desc, GateStrength::Hard);
} else if name == "doc" {
if let Some(content) = attr.meta_item_list() {
if content.iter().any(|c| c.check_name("include")) {
gate_feature!(self, external_doc, attr.span,
"#[doc(include = \"...\")] is experimental"
);
}
}
}
debug!("check_attribute: {:?} is builtin, {:?}, {:?}", attr.path, ty, gateage);
return;
}
}
for &(ref n, ref ty) in self.plugin_attributes {
if attr.path == &**n {
// Plugins can't gate attributes, so we don't check for it
// unlike the code above; we only use this loop to
// short-circuit to avoid the checks below
debug!("check_attribute: {:?} is registered by a plugin, {:?}", attr.path, ty);
return;
}
}
if name.starts_with("rustc_") {
gate_feature!(self, rustc_attrs, attr.span,
"unless otherwise specified, attributes \
with the prefix `rustc_` \
are reserved for internal compiler diagnostics");
} else if name.starts_with("derive_") {
gate_feature!(self, custom_derive, attr.span, EXPLAIN_DERIVE_UNDERSCORE);
} else if !attr::is_known(attr) {
// Only run the custom attribute lint during regular
// feature gate checking. Macro gating runs
// before the plugin attributes are registered
// so we skip this then
if !is_macro {
gate_feature!(self, custom_attribute, attr.span,
&format!("The attribute `{}` is currently \
unknown to the compiler and \
may have meaning \
added to it in the future",
attr.path));
}
}
}
}
pub fn check_attribute(attr: &ast::Attribute, parse_sess: &ParseSess, features: &Features) {
let cx = Context { features: features, parse_sess: parse_sess, plugin_attributes: &[] };
cx.check_attribute(attr, true);
}
pub fn find_lang_feature_accepted_version(feature: &str) -> Option<&'static str> {
ACCEPTED_FEATURES.iter().find(|t| t.0 == feature).map(|t| t.1)
}
fn find_lang_feature_issue(feature: &str) -> Option<u32> {
if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.0 == feature) {
let issue = info.2;
// FIXME (#28244): enforce that active features have issue numbers
// assert!(issue.is_some())
issue
} else {
// search in Accepted, Removed, or Stable Removed features
let found = ACCEPTED_FEATURES.iter().chain(REMOVED_FEATURES).chain(STABLE_REMOVED_FEATURES)
.find(|t| t.0 == feature);
match found {
Some(&(_, _, issue)) => issue,
None => panic!("Feature `{}` is not declared anywhere", feature),
}
}
}
pub enum GateIssue {
Language,
Library(Option<u32>)
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum GateStrength {
/// A hard error. (Most feature gates should use this.)
Hard,
/// Only a warning. (Use this only as backwards-compatibility demands.)
Soft,
}
pub fn emit_feature_err(sess: &ParseSess, feature: &str, span: Span, issue: GateIssue,
explain: &str) {
feature_err(sess, feature, span, issue, explain).emit();
}
pub fn feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue,
explain: &str) -> DiagnosticBuilder<'a> {
leveled_feature_err(sess, feature, span, issue, explain, GateStrength::Hard)
}
fn leveled_feature_err<'a>(sess: &'a ParseSess, feature: &str, span: Span, issue: GateIssue,
explain: &str, level: GateStrength) -> DiagnosticBuilder<'a> {
let diag = &sess.span_diagnostic;
let issue = match issue {
GateIssue::Language => find_lang_feature_issue(feature),
GateIssue::Library(lib) => lib,
};
let explanation = if let Some(n) = issue {
format!("{} (see issue #{})", explain, n)
} else {
explain.to_owned()
};
let mut err = match level {
GateStrength::Hard => {
diag.struct_span_err_with_code(span, &explanation, stringify_error_code!(E0658))
}
GateStrength::Soft => diag.struct_span_warn(span, &explanation),
};
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
if sess.unstable_features.is_nightly_build() {
err.help(&format!("add #![feature({})] to the \
crate attributes to enable",
feature));
}
// If we're on stable and only emitting a "soft" warning, add a note to
// clarify that the feature isn't "on" (rather than being on but
// warning-worthy).
if !sess.unstable_features.is_nightly_build() && level == GateStrength::Soft {
err.help("a nightly build of the compiler is required to enable this feature");
}
err
}
const EXPLAIN_BOX_SYNTAX: &'static str =
"box expression syntax is experimental; you can call `Box::new` instead.";
pub const EXPLAIN_STMT_ATTR_SYNTAX: &'static str =
"attributes on non-item statements and expressions are experimental.";
pub const EXPLAIN_ASM: &'static str =
"inline assembly is not stable enough for use and is subject to change";
pub const EXPLAIN_GLOBAL_ASM: &'static str =
"`global_asm!` is not stable enough for use and is subject to change";
pub const EXPLAIN_LOG_SYNTAX: &'static str =
"`log_syntax!` is not stable enough for use and is subject to change";
pub const EXPLAIN_CONCAT_IDENTS: &'static str =
"`concat_idents` is not stable enough for use and is subject to change";
pub const EXPLAIN_TRACE_MACROS: &'static str =
"`trace_macros` is not stable enough for use and is subject to change";
pub const EXPLAIN_ALLOW_INTERNAL_UNSTABLE: &'static str =
"allow_internal_unstable side-steps feature gating and stability checks";
pub const EXPLAIN_ALLOW_INTERNAL_UNSAFE: &'static str =
"allow_internal_unsafe side-steps the unsafe_code lint";
pub const EXPLAIN_CUSTOM_DERIVE: &'static str =
"`#[derive]` for custom traits is deprecated and will be removed in the future.";
pub const EXPLAIN_DEPR_CUSTOM_DERIVE: &'static str =
"`#[derive]` for custom traits is deprecated and will be removed in the future. \
Prefer using procedural macro custom derive.";
pub const EXPLAIN_DERIVE_UNDERSCORE: &'static str =
"attributes of the form `#[derive_*]` are reserved for the compiler";
pub const EXPLAIN_VIS_MATCHER: &'static str =
":vis fragment specifier is experimental and subject to change";
pub const EXPLAIN_LIFETIME_MATCHER: &'static str =
":lifetime fragment specifier is experimental and subject to change";
pub const EXPLAIN_PLACEMENT_IN: &'static str =
"placement-in expression syntax is experimental and subject to change.";
pub const EXPLAIN_UNSIZED_TUPLE_COERCION: &'static str =
"Unsized tuple coercion is not stable enough for use and is subject to change";
struct PostExpansionVisitor<'a> {
context: &'a Context<'a>,
}
macro_rules! gate_feature_post {
($cx: expr, $feature: ident, $span: expr, $explain: expr) => {{
let (cx, span) = ($cx, $span);
if !span.allows_unstable() {
gate_feature!(cx.context, $feature, span, $explain)
}
}};
($cx: expr, $feature: ident, $span: expr, $explain: expr, $level: expr) => {{
let (cx, span) = ($cx, $span);
if !span.allows_unstable() {
gate_feature!(cx.context, $feature, span, $explain, $level)
}
}}
}
impl<'a> PostExpansionVisitor<'a> {
fn check_abi(&self, abi: Abi, span: Span) {
match abi {
Abi::RustIntrinsic => {
gate_feature_post!(&self, intrinsics, span,
"intrinsics are subject to change");
},
Abi::PlatformIntrinsic => {
gate_feature_post!(&self, platform_intrinsics, span,
"platform intrinsics are experimental and possibly buggy");
},
Abi::Vectorcall => {
gate_feature_post!(&self, abi_vectorcall, span,
"vectorcall is experimental and subject to change");
},
Abi::Thiscall => {
gate_feature_post!(&self, abi_thiscall, span,
"thiscall is experimental and subject to change");
},
Abi::RustCall => {
gate_feature_post!(&self, unboxed_closures, span,
"rust-call ABI is subject to change");
},
Abi::PtxKernel => {
gate_feature_post!(&self, abi_ptx, span,
"PTX ABIs are experimental and subject to change");
},
Abi::Unadjusted => {
gate_feature_post!(&self, abi_unadjusted, span,
"unadjusted ABI is an implementation detail and perma-unstable");
},
Abi::Msp430Interrupt => {
gate_feature_post!(&self, abi_msp430_interrupt, span,
"msp430-interrupt ABI is experimental and subject to change");
},
Abi::X86Interrupt => {
gate_feature_post!(&self, abi_x86_interrupt, span,
"x86-interrupt ABI is experimental and subject to change");
},
// Stable
Abi::Cdecl |
Abi::Stdcall |
Abi::Fastcall |
Abi::Aapcs |
Abi::Win64 |
Abi::SysV64 |
Abi::Rust |
Abi::C |
Abi::System => {}
}
}
}
fn contains_novel_literal(item: &ast::MetaItem) -> bool {
use ast::MetaItemKind::*;
use ast::NestedMetaItemKind::*;
match item.node {
Word => false,
NameValue(ref lit) => !lit.node.is_str(),
List(ref list) => list.iter().any(|li| {
match li.node {
MetaItem(ref mi) => contains_novel_literal(mi),
Literal(_) => true,
}
}),
}
}
// Bans nested `impl Trait`, e.g. `impl Into<impl Debug>`.
// Nested `impl Trait` _is_ allowed in associated type position,
// e.g `impl Iterator<Item=impl Debug>`
struct NestedImplTraitVisitor<'a> {
context: &'a Context<'a>,
is_in_impl_trait: bool,
}
impl<'a> NestedImplTraitVisitor<'a> {
fn with_impl_trait<F>(&mut self, is_in_impl_trait: bool, f: F)
where F: FnOnce(&mut NestedImplTraitVisitor<'a>)
{
let old_is_in_impl_trait = self.is_in_impl_trait;
self.is_in_impl_trait = is_in_impl_trait;
f(self);
self.is_in_impl_trait = old_is_in_impl_trait;
}
}
impl<'a> Visitor<'a> for NestedImplTraitVisitor<'a> {
fn visit_ty(&mut self, t: &'a ast::Ty) {
if let ast::TyKind::ImplTrait(_) = t.node {
if self.is_in_impl_trait {
gate_feature_post!(&self, nested_impl_trait, t.span,
"nested `impl Trait` is experimental"
);
}
self.with_impl_trait(true, |this| visit::walk_ty(this, t));
} else {
visit::walk_ty(self, t);
}
}
fn visit_path_parameters(&mut self, _: Span, path_parameters: &'a ast::PathParameters) {
match *path_parameters {
ast::PathParameters::AngleBracketed(ref params) => {
for type_ in ¶ms.types {
self.visit_ty(type_);
}
for type_binding in ¶ms.bindings {
// Type bindings such as `Item=impl Debug` in `Iterator<Item=Debug>`
// are allowed to contain nested `impl Trait`.
self.with_impl_trait(false, |this| visit::walk_ty(this, &type_binding.ty));
}
}
ast::PathParameters::Parenthesized(ref params) => {
for type_ in ¶ms.inputs {
self.visit_ty(type_);
}
if let Some(ref type_) = params.output {
// `-> Foo` syntax is essentially an associated type binding,
// so it is also allowed to contain nested `impl Trait`.
self.with_impl_trait(false, |this| visit::walk_ty(this, type_));
}
}
}
}
}
impl<'a> PostExpansionVisitor<'a> {
fn whole_crate_feature_gates(&mut self, krate: &ast::Crate) {
visit::walk_crate(
&mut NestedImplTraitVisitor {
context: self.context,
is_in_impl_trait: false,
}, krate);
for &(ident, span) in &*self.context.parse_sess.non_modrs_mods.borrow() {
if !span.allows_unstable() {
let cx = &self.context;
let level = GateStrength::Hard;
let has_feature = cx.features.non_modrs_mods;
let name = "non_modrs_mods";
debug!("gate_feature(feature = {:?}, span = {:?}); has? {}",
name, span, has_feature);
if !has_feature && !span.allows_unstable() {
leveled_feature_err(
cx.parse_sess, name, span, GateIssue::Language,
"mod statements in non-mod.rs files are unstable", level
)
.help(&format!("on stable builds, rename this file to {}{}mod.rs",
ident, path::MAIN_SEPARATOR))
.emit();
}
}
}
}
}
impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
fn visit_attribute(&mut self, attr: &ast::Attribute) {
if !attr.span.allows_unstable() {
// check for gated attributes
self.context.check_attribute(attr, false);
}
if attr.check_name("doc") {
if let Some(content) = attr.meta_item_list() {
if content.len() == 1 && content[0].check_name("cfg") {
gate_feature_post!(&self, doc_cfg, attr.span,
"#[doc(cfg(...))] is experimental"
);
} else if content.iter().any(|c| c.check_name("masked")) {
gate_feature_post!(&self, doc_masked, attr.span,
"#[doc(masked)] is experimental"
);
} else if content.iter().any(|c| c.check_name("spotlight")) {
gate_feature_post!(&self, doc_spotlight, attr.span,
"#[doc(spotlight)] is experimental"
);
}
}
}
if self.context.features.proc_macro && attr::is_known(attr) {
return
}
let meta = panictry!(attr.parse_meta(self.context.parse_sess));
if contains_novel_literal(&meta) {
gate_feature_post!(&self, attr_literals, attr.span,
"non-string literals in attributes, or string \
literals in top-level positions, are experimental");
}
}
fn visit_name(&mut self, sp: Span, name: ast::Name) {
if !name.as_str().is_ascii() {
gate_feature_post!(&self,
non_ascii_idents,
self.context.parse_sess.codemap().def_span(sp),
"non-ascii idents are not fully supported.");
}
}
fn visit_item(&mut self, i: &'a ast::Item) {
match i.node {
ast::ItemKind::ExternCrate(_) => {
if let Some(attr) = attr::find_by_name(&i.attrs[..], "macro_reexport") {
gate_feature_post!(&self, macro_reexport, attr.span,
"macros re-exports are experimental \
and possibly buggy");
}
}
ast::ItemKind::ForeignMod(ref foreign_module) => {
self.check_abi(foreign_module.abi, i.span);
}
ast::ItemKind::Fn(..) => {
if attr::contains_name(&i.attrs[..], "plugin_registrar") {
gate_feature_post!(&self, plugin_registrar, i.span,
"compiler plugins are experimental and possibly buggy");
}
if attr::contains_name(&i.attrs[..], "start") {
gate_feature_post!(&self, start, i.span,
"a #[start] function is an experimental \
feature whose signature may change \
over time");
}
if attr::contains_name(&i.attrs[..], "main") {
gate_feature_post!(&self, main, i.span,
"declaration of a nonstandard #[main] \
function may change over time, for now \
a top-level `fn main()` is required");
}
if let Some(attr) = attr::find_by_name(&i.attrs[..], "must_use") {
gate_feature_post!(&self, fn_must_use, attr.span,
"`#[must_use]` on functions is experimental",
GateStrength::Soft);
}
}
ast::ItemKind::Struct(..) => {
if let Some(attr) = attr::find_by_name(&i.attrs[..], "repr") {
for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
if item.check_name("simd") {
gate_feature_post!(&self, repr_simd, attr.span,
"SIMD types are experimental and possibly buggy");
}
if item.check_name("align") {
gate_feature_post!(&self, repr_align, attr.span,
"the struct `#[repr(align(u16))]` attribute \
is experimental");
}
if item.check_name("transparent") {
gate_feature_post!(&self, repr_transparent, attr.span,
"the `#[repr(transparent)]` attribute \
is experimental");
}
}
}
}
ast::ItemKind::TraitAlias(..) => {
gate_feature_post!(&self, trait_alias,
i.span,
"trait aliases are not yet fully implemented");
}
ast::ItemKind::Impl(_, polarity, defaultness, _, _, _, ref impl_items) => {
if polarity == ast::ImplPolarity::Negative {
gate_feature_post!(&self, optin_builtin_traits,
i.span,
"negative trait bounds are not yet fully implemented; \
use marker types for now");
}
if let ast::Defaultness::Default = defaultness {
gate_feature_post!(&self, specialization,
i.span,
"specialization is unstable");
}
for impl_item in impl_items {
if let ast::ImplItemKind::Method(..) = impl_item.node {
if let Some(attr) = attr::find_by_name(&impl_item.attrs[..], "must_use") {
gate_feature_post!(&self, fn_must_use, attr.span,
"`#[must_use]` on methods is experimental",
GateStrength::Soft);
}
}
}
}
ast::ItemKind::Trait(ast::IsAuto::Yes, ..) => {
gate_feature_post!(&self, optin_builtin_traits,
i.span,
"auto traits are experimental and possibly buggy");
}
ast::ItemKind::MacroDef(ast::MacroDef { legacy: false, .. }) => {
let msg = "`macro` is experimental";
gate_feature_post!(&self, decl_macro, i.span, msg);
}
_ => {}
}
visit::walk_item(self, i);
}
fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) {
match i.node {
ast::ForeignItemKind::Fn(..) |
ast::ForeignItemKind::Static(..) => {
let link_name = attr::first_attr_value_str_by_name(&i.attrs, "link_name");
let links_to_llvm = match link_name {
Some(val) => val.as_str().starts_with("llvm."),
_ => false
};
if links_to_llvm {
gate_feature_post!(&self, link_llvm_intrinsics, i.span,
"linking to LLVM intrinsics is experimental");
}
}
ast::ForeignItemKind::Ty => {
gate_feature_post!(&self, extern_types, i.span,
"extern types are experimental");
}
}
visit::walk_foreign_item(self, i)
}
fn visit_ty(&mut self, ty: &'a ast::Ty) {
match ty.node {
ast::TyKind::BareFn(ref bare_fn_ty) => {
self.check_abi(bare_fn_ty.abi, ty.span);
}
ast::TyKind::Never => {
gate_feature_post!(&self, never_type, ty.span,
"The `!` type is experimental");
},
ast::TyKind::TraitObject(_, ast::TraitObjectSyntax::Dyn) => {
gate_feature_post!(&self, dyn_trait, ty.span,
"`dyn Trait` syntax is unstable");
}
_ => {}
}
visit::walk_ty(self, ty)
}
fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FunctionRetTy) {
if let ast::FunctionRetTy::Ty(ref output_ty) = *ret_ty {
if output_ty.node != ast::TyKind::Never {
self.visit_ty(output_ty)
}
}
}
fn visit_expr(&mut self, e: &'a ast::Expr) {
match e.node {
ast::ExprKind::Box(_) => {
gate_feature_post!(&self, box_syntax, e.span, EXPLAIN_BOX_SYNTAX);
}
ast::ExprKind::Type(..) => {
gate_feature_post!(&self, type_ascription, e.span,
"type ascription is experimental");
}
ast::ExprKind::Range(_, _, ast::RangeLimits::Closed) => {
gate_feature_post!(&self, inclusive_range_syntax,
e.span,
"inclusive range syntax is experimental");
}
ast::ExprKind::InPlace(..) => {
gate_feature_post!(&self, placement_in_syntax, e.span, EXPLAIN_PLACEMENT_IN);
}
ast::ExprKind::Yield(..) => {
gate_feature_post!(&self, generators,
e.span,
"yield syntax is experimental");
}
ast::ExprKind::Lit(ref lit) => {
if let ast::LitKind::Int(_, ref ty) = lit.node {
match *ty {
ast::LitIntType::Signed(ast::IntTy::I128) |
ast::LitIntType::Unsigned(ast::UintTy::U128) => {
gate_feature_post!(&self, i128_type, e.span,
"128-bit integers are not stable");
}
_ => {}
}
}
}
ast::ExprKind::Catch(_) => {
gate_feature_post!(&self, catch_expr, e.span, "`catch` expression is experimental");
}
_ => {}
}
visit::walk_expr(self, e);
}
fn visit_arm(&mut self, arm: &'a ast::Arm) {
if let Some(span) = arm.beginning_vert {
gate_feature_post!(&self, match_beginning_vert,
span,
"Use of a '|' at the beginning of a match arm is experimental")
}
visit::walk_arm(self, arm)
}
fn visit_pat(&mut self, pattern: &'a ast::Pat) {
match pattern.node {
PatKind::Slice(_, Some(_), ref last) if !last.is_empty() => {
gate_feature_post!(&self, advanced_slice_patterns,
pattern.span,
"multiple-element slice matches anywhere \
but at the end of a slice (e.g. \
`[0, ..xs, 0]`) are experimental")
}
PatKind::Slice(..) => {
gate_feature_post!(&self, slice_patterns,
pattern.span,
"slice pattern syntax is experimental");
}
PatKind::Box(..) => {
gate_feature_post!(&self, box_patterns,
pattern.span,
"box pattern syntax is experimental");
}
PatKind::Range(_, _, RangeEnd::Excluded) => {
gate_feature_post!(&self, exclusive_range_pattern, pattern.span,
"exclusive range pattern syntax is experimental");
}
PatKind::Range(_, _, RangeEnd::Included(RangeSyntax::DotDotEq)) => {
gate_feature_post!(&self, dotdoteq_in_patterns, pattern.span,
"`..=` syntax in patterns is experimental");
}
_ => {}
}
visit::walk_pat(self, pattern)
}
fn visit_fn(&mut self,
fn_kind: FnKind<'a>,
fn_decl: &'a ast::FnDecl,
span: Span,
_node_id: NodeId) {
// check for const fn declarations
if let FnKind::ItemFn(_, _, Spanned { node: ast::Constness::Const, .. }, _, _, _) =
fn_kind {
gate_feature_post!(&self, const_fn, span, "const fn is unstable");
}
// stability of const fn methods are covered in
// visit_trait_item and visit_impl_item below; this is
// because default methods don't pass through this
// point.
match fn_kind {
FnKind::ItemFn(_, _, _, abi, _, _) |
FnKind::Method(_, &ast::MethodSig { abi, .. }, _, _) => {
self.check_abi(abi, span);
}
_ => {}
}
visit::walk_fn(self, fn_kind, fn_decl, span);
}
fn visit_trait_item(&mut self, ti: &'a ast::TraitItem) {
match ti.node {
ast::TraitItemKind::Method(ref sig, ref block) => {
if block.is_none() {
self.check_abi(sig.abi, ti.span);
}
if sig.constness.node == ast::Constness::Const {
gate_feature_post!(&self, const_fn, ti.span, "const fn is unstable");
}
}
ast::TraitItemKind::Type(_, ref default) => {
// We use two if statements instead of something like match guards so that both
// of these errors can be emitted if both cases apply.
if default.is_some() {
gate_feature_post!(&self, associated_type_defaults, ti.span,
"associated type defaults are unstable");
}
if ti.generics.is_parameterized() {
gate_feature_post!(&self, generic_associated_types, ti.span,
"generic associated types are unstable");
}
}
_ => {}
}
visit::walk_trait_item(self, ti);
}
fn visit_impl_item(&mut self, ii: &'a ast::ImplItem) {
if ii.defaultness == ast::Defaultness::Default {
gate_feature_post!(&self, specialization,
ii.span,
"specialization is unstable");
}
match ii.node {
ast::ImplItemKind::Method(ref sig, _) => {
if sig.constness.node == ast::Constness::Const {
gate_feature_post!(&self, const_fn, ii.span, "const fn is unstable");
}
}
ast::ImplItemKind::Type(_) if ii.generics.is_parameterized() => {
gate_feature_post!(&self, generic_associated_types, ii.span,
"generic associated types are unstable");
}
_ => {}
}
visit::walk_impl_item(self, ii);
}
fn visit_path(&mut self, path: &'a ast::Path, _id: NodeId) {
for segment in &path.segments {
if segment.identifier.name == keywords::Crate.name() {
gate_feature_post!(&self, crate_in_paths, segment.span,
"`crate` in paths is experimental");
} else if segment.identifier.name == keywords::Extern.name() {
gate_feature_post!(&self, extern_in_paths, segment.span,
"`extern` in paths is experimental");
}
}
visit::walk_path(self, path);
}
fn visit_use_tree(&mut self, use_tree: &'a ast::UseTree, id: NodeId, nested: bool) {
if nested {
match use_tree.kind {
ast::UseTreeKind::Simple(_) => {
if use_tree.prefix.segments.len() != 1 {
gate_feature_post!(&self, use_nested_groups, use_tree.span,
"paths in `use` groups are experimental");
}
}
ast::UseTreeKind::Glob => {
gate_feature_post!(&self, use_nested_groups, use_tree.span,
"glob imports in `use` groups are experimental");
}
ast::UseTreeKind::Nested(_) => {
gate_feature_post!(&self, use_nested_groups, use_tree.span,
"nested groups in `use` are experimental");
}
}
}
visit::walk_use_tree(self, use_tree, id);
}
fn visit_vis(&mut self, vis: &'a ast::Visibility) {
if let ast::Visibility::Crate(span, ast::CrateSugar::JustCrate) = *vis {
gate_feature_post!(&self, crate_visibility_modifier, span,
"`crate` visibility modifier is experimental");
}
visit::walk_vis(self, vis);
}
fn visit_generic_param(&mut self, param: &'a ast::GenericParam) {
let (attrs, explain) = match *param {
ast::GenericParam::Lifetime(ref ld) =>
(&ld.attrs, "attributes on lifetime bindings are experimental"),
ast::GenericParam::Type(ref t) =>
(&t.attrs, "attributes on type parameter bindings are experimental"),
};
if !attrs.is_empty() {
gate_feature_post!(&self, generic_param_attrs, attrs[0].span, explain);
}
visit::walk_generic_param(self, param)
}
fn visit_lifetime(&mut self, lt: &'a ast::Lifetime) {
if lt.ident.name == "'_" {
gate_feature_post!(&self, underscore_lifetimes, lt.span,
"underscore lifetimes are unstable");
}
visit::walk_lifetime(self, lt)
}
}
pub fn get_features(span_handler: &Handler, krate_attrs: &[ast::Attribute]) -> Features {
let mut features = Features::new();
let mut feature_checker = FeatureChecker::default();
for attr in krate_attrs {
if !attr.check_name("feature") {
continue
}
match attr.meta_item_list() {
None => {
span_err!(span_handler, attr.span, E0555,
"malformed feature attribute, expected #![feature(...)]");
}
Some(list) => {
for mi in list {
let name = if let Some(word) = mi.word() {
word.name()
} else {
span_err!(span_handler, mi.span, E0556,
"malformed feature, expected just one word");
continue
};
if let Some(&(_, _, _, set)) = ACTIVE_FEATURES.iter()
.find(|& &(n, _, _, _)| name == n) {
set(&mut features, mi.span);
feature_checker.collect(&features, mi.span);
}
else if let Some(&(_, _, _)) = REMOVED_FEATURES.iter()
.find(|& &(n, _, _)| name == n)
.or_else(|| STABLE_REMOVED_FEATURES.iter()
.find(|& &(n, _, _)| name == n)) {
span_err!(span_handler, mi.span, E0557, "feature has been removed");
}
else if let Some(&(_, _, _)) = ACCEPTED_FEATURES.iter()
.find(|& &(n, _, _)| name == n) {
features.declared_stable_lang_features.push((name, mi.span));
} else {
features.declared_lib_features.push((name, mi.span));
}
}
}
}
}
feature_checker.check(span_handler);
features
}
/// A collector for mutually exclusive and interdependent features and their flag spans.
#[derive(Default)]
struct FeatureChecker {
proc_macro: Option<Span>,
custom_attribute: Option<Span>,
copy_closures: Option<Span>,
clone_closures: Option<Span>,
}
impl FeatureChecker {
// If this method turns out to be a hotspot due to branching,
// the branching can be eliminated by modifying `set!()` to set these spans
// only for the features that need to be checked for mutual exclusion.
fn collect(&mut self, features: &Features, span: Span) {
if features.proc_macro {
// If self.proc_macro is None, set to Some(span)
self.proc_macro = self.proc_macro.or(Some(span));
}
if features.custom_attribute {
self.custom_attribute = self.custom_attribute.or(Some(span));
}
if features.copy_closures {
self.copy_closures = self.copy_closures.or(Some(span));
}
if features.clone_closures {
self.clone_closures = self.clone_closures.or(Some(span));
}
}
fn check(self, handler: &Handler) {
if let (Some(pm_span), Some(ca_span)) = (self.proc_macro, self.custom_attribute) {
handler.struct_span_err(pm_span, "Cannot use `#![feature(proc_macro)]` and \
`#![feature(custom_attribute)] at the same time")
.span_note(ca_span, "`#![feature(custom_attribute)]` declared here")
.emit();
panic!(FatalError);
}
if let (Some(span), None) = (self.copy_closures, self.clone_closures) {
handler.struct_span_err(span, "`#![feature(copy_closures)]` can only be used with \
`#![feature(clone_closures)]`")
.span_note(span, "`#![feature(copy_closures)]` declared here")
.emit();
panic!(FatalError);
}
}
}
pub fn check_crate(krate: &ast::Crate,
sess: &ParseSess,
features: &Features,
plugin_attributes: &[(String, AttributeType)],
unstable: UnstableFeatures) {
maybe_stage_features(&sess.span_diagnostic, krate, unstable);
let ctx = Context {
features,
parse_sess: sess,
plugin_attributes,
};
let visitor = &mut PostExpansionVisitor { context: &ctx };
visitor.whole_crate_feature_gates(krate);
visit::walk_crate(visitor, krate);
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum UnstableFeatures {
/// Hard errors for unstable features are active, as on
/// beta/stable channels.
Disallow,
/// Allow features to be activated, as on nightly.
Allow,
/// Errors are bypassed for bootstrapping. This is required any time
/// during the build that feature-related lints are set to warn or above
/// because the build turns on warnings-as-errors and uses lots of unstable
/// features. As a result, this is always required for building Rust itself.
Cheat
}
impl UnstableFeatures {
pub fn from_environment() -> UnstableFeatures {
// Whether this is a feature-staged build, i.e. on the beta or stable channel
let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
// Whether we should enable unstable features for bootstrapping
let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
match (disable_unstable_features, bootstrap) {
(_, true) => UnstableFeatures::Cheat,
(true, _) => UnstableFeatures::Disallow,
(false, _) => UnstableFeatures::Allow
}
}
pub fn is_nightly_build(&self) -> bool {
match *self {
UnstableFeatures::Allow | UnstableFeatures::Cheat => true,
_ => false,
}
}
}
fn maybe_stage_features(span_handler: &Handler, krate: &ast::Crate,
unstable: UnstableFeatures) {
let allow_features = match unstable {
UnstableFeatures::Allow => true,
UnstableFeatures::Disallow => false,
UnstableFeatures::Cheat => true
};
if !allow_features {
for attr in &krate.attrs {
if attr.check_name("feature") {
let release_channel = option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)");
span_err!(span_handler, attr.span, E0554,
"#![feature] may not be used on the {} release channel",
release_channel);
}
}
}
}
| 42.395383 | 100 | 0.522064 |
50c5aa3d3cd19a42d966c60d70e085d90f099b1c | 1,782 | extern crate rusoto_cloudfront;
extern crate rusoto_core;
extern crate clap;
use clap::{App, Arg};
use rusoto_cloudfront::{
CloudFront, CloudFrontClient, GetDistributionRequest, GetDistributionResult,
};
use rusoto_core::Region;
async fn get_distribution(
client: &CloudFrontClient,
get_distribution_request: GetDistributionRequest,
) -> GetDistributionResult {
let resp = client.get_distribution(get_distribution_request).await;
return resp.unwrap();
}
fn get_distribution_result(resp: GetDistributionResult) {
match resp.distribution {
Some(distribution) => {
println!(
"Distribution ARN:{}\n Domain Name: {}",
distribution.arn, distribution.domain_name
)
}
None => println!("Unable to retrieve distribution information"),
}
}
fn main() {
let matches = App::new("Example Get Distributions call using Rust")
.version("1.0")
.author("rilindo.foster@<[email protected]")
.about("Get Distributions")
.arg(
Arg::with_name("distribution_id")
.short("d")
.long("distribution_id")
.help("Set Distribution ID")
.required(true)
.takes_value(true),
)
.get_matches();
let distribution_id = matches.value_of("distribution_id").unwrap().to_string();
let client = CloudFrontClient::new(Region::default());
let get_distribution_request = GetDistributionRequest {
id: distribution_id,
..Default::default()
};
let mut rt = tokio::runtime::Runtime::new().unwrap();
let resp = rt.block_on(get_distribution(&client, get_distribution_request));
get_distribution_result(resp.clone());
}
| 28.741935 | 83 | 0.639731 |
0ed38638ed8dfea272c25ba5244b2bf598725485 | 1,063 | use crate::prelude::*;
use nu_engine::WholeStreamCommand;
use nu_errors::ShellError;
use nu_protocol::{dataframe::NuDataFrame, Signature, UntaggedValue};
pub struct Command;
impl WholeStreamCommand for Command {
fn name(&self) -> &str {
"dataframe"
}
fn usage(&self) -> &str {
"Creates a dataframe from pipelined Table or List "
}
fn signature(&self) -> Signature {
Signature::build("dataframe")
}
fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> {
let tag = args.call_info.name_tag.clone();
let args = args.evaluate_once()?;
let df = NuDataFrame::try_from_iter(args.input, &tag)?;
let init = InputStream::one(UntaggedValue::Dataframe(df).into_value(&tag));
Ok(init.to_output_stream())
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "Takes an input stream and converts it to a dataframe",
example: "echo [[a b];[1 2] [3 4]] | dataframe",
result: None,
}]
}
}
| 27.25641 | 83 | 0.609595 |
ff2aa5883382dae1ce32af03c30969e63ca28eba | 4,003 | use super::{Engine, SpecTransform};
use crate::pb::*;
use anyhow::Result;
use bytes::Bytes;
use image::{DynamicImage, ImageBuffer, ImageOutputFormat};
use lazy_static::lazy_static;
use photon_rs::{
effects, filters, multiple, native::open_image_from_bytes, transform, PhotonImage,
};
use std::convert::TryFrom;
lazy_static! {
// 预先把水印文件加载为静态变量
static ref WATERMARK: PhotonImage = {
// 这里你需要把 github 项目下的
//[对应图片](https://github.com/tyrchen/geektime-rust/blob/master/05_thumbor/rust-logo.png)
// 拷贝根目录
// 在编译的时候 include_bytes! 宏会直接把文件读入编译后的二进制
let data = include_bytes!("../../rust-logo.png");
let watermark = open_image_from_bytes(data).unwrap();
transform::resize(&watermark, 64, 64, transform::SamplingFilter::Nearest)
};
}
// 我们目前支持 Photon engine
pub struct Photon(PhotonImage);
// 从 Bytes 转换成 Photon 结构
impl TryFrom<Bytes> for Photon {
type Error = anyhow::Error;
fn try_from(data: Bytes) -> Result<Self, Self::Error> {
Ok(Self(open_image_from_bytes(&data)?))
}
}
impl Engine for Photon {
fn apply(&mut self, specs: &[Spec]) {
for spec in specs.iter() {
match spec.data {
Some(spec::Data::Crop(ref v)) => self.transform(v),
Some(spec::Data::Contrast(ref v)) => self.transform(v),
Some(spec::Data::Filter(ref v)) => self.transform(v),
Some(spec::Data::Fliph(ref v)) => self.transform(v),
Some(spec::Data::Flipv(ref v)) => self.transform(v),
Some(spec::Data::Resize(ref v)) => self.transform(v),
Some(spec::Data::Watermark(ref v)) => self.transform(v),
// 对于目前不认识的 sepc,不做任何处理
_ => {}
}
}
}
fn generate(self, format: ImageOutputFormat) -> Vec<u8> {
image_to_buf(self.0, format)
}
}
impl SpecTransform<&Crop> for Photon {
fn transform(&mut self, op: &Crop) {
let img = transform::crop(&mut self.0, op.x1, op.y1, op.x2, op.y2);
self.0 = img;
}
}
impl SpecTransform<&Contrast> for Photon {
fn transform(&mut self, op: &Contrast) {
effects::adjust_contrast(&mut self.0, op.contrast);
}
}
impl SpecTransform<&Flipv> for Photon {
fn transform(&mut self, _op: &Flipv) {
transform::flipv(&mut self.0)
}
}
impl SpecTransform<&Fliph> for Photon {
fn transform(&mut self, _op: &Fliph) {
transform::fliph(&mut self.0)
}
}
impl SpecTransform<&Filter> for Photon {
fn transform(&mut self, op: &Filter) {
match filter::Filter::from_i32(op.filter) {
Some(filter::Filter::Unspecified) => {}
Some(f) => filters::filter(&mut self.0, f.to_str().unwrap()),
_ => {}
}
}
}
impl SpecTransform<&Resize> for Photon {
fn transform(&mut self, op: &Resize) {
let img = match resize::ResizeType::from_i32(op.rtype).unwrap() {
resize::ResizeType::Normal => transform::resize(
&mut self.0,
op.width,
op.height,
resize::SampleFilter::from_i32(op.filter).unwrap().into(),
),
resize::ResizeType::SeamCarve => {
transform::seam_carve(&mut self.0, op.width, op.height)
}
};
self.0 = img;
}
}
impl SpecTransform<&Watermark> for Photon {
fn transform(&mut self, op: &Watermark) {
multiple::watermark(&mut self.0, &WATERMARK, op.x, op.y);
}
}
// photon 库竟然没有提供在内存中对图片转换格式的方法,只好手工实现
fn image_to_buf(img: PhotonImage, format: ImageOutputFormat) -> Vec<u8> {
let raw_pixels = img.get_raw_pixels();
let width = img.get_width();
let height = img.get_height();
let img_buffer = ImageBuffer::from_vec(width, height, raw_pixels).unwrap();
let dynimage = DynamicImage::ImageRgb8(img_buffer);
let mut buffer = Vec::with_capacity(32768);
dynimage.write_to(&mut buffer, format).unwrap();
buffer
}
| 30.792308 | 95 | 0.595553 |
9bb4e191df7e2ab525d2c6713bda12ccfaba6b31 | 2,761 | use serde::Deserialize;
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct Chip {
pub name: String,
pub family: String,
pub line: String,
pub cores: Vec<Core>,
pub memory: Vec<MemoryRegion>,
pub packages: Vec<Package>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct MemoryRegion {
pub name: String,
pub kind: MemoryRegionKind,
pub address: u32,
pub size: u32,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub enum MemoryRegionKind {
#[serde(rename = "flash")]
Flash,
#[serde(rename = "ram")]
Ram,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct Core {
pub name: String,
pub peripherals: Vec<Peripheral>,
pub interrupts: Vec<Interrupt>,
pub dma_channels: Vec<DmaChannel>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct Interrupt {
pub name: String,
pub number: u32,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct Package {
pub name: String,
pub package: String,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct Peripheral {
pub name: String,
pub address: u64,
#[serde(default)]
pub registers: Option<PeripheralRegisters>,
#[serde(default)]
pub rcc: Option<PeripheralRcc>,
#[serde(default)]
pub pins: Vec<PeripheralPin>,
#[serde(default)]
pub dma_channels: Vec<PeripheralDmaChannel>,
#[serde(default)]
pub interrupts: Vec<PeripheralInterrupt>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct PeripheralInterrupt {
pub signal: String,
pub interrupt: String,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct PeripheralRcc {
pub clock: String,
#[serde(default)]
pub enable: Option<PeripheralRccRegister>,
#[serde(default)]
pub reset: Option<PeripheralRccRegister>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct PeripheralRccRegister {
pub register: String,
pub field: String,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct PeripheralPin {
pub pin: String,
pub signal: String,
pub af: Option<String>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize)]
pub struct DmaChannel {
pub name: String,
pub dma: String,
pub channel: u32,
pub dmamux: Option<String>,
pub dmamux_channel: Option<u32>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Hash)]
pub struct PeripheralDmaChannel {
pub signal: String,
pub channel: Option<String>,
pub dmamux: Option<String>,
pub request: Option<u32>,
}
#[derive(Debug, Eq, PartialEq, Clone, Deserialize, Hash)]
pub struct PeripheralRegisters {
pub kind: String,
pub version: String,
pub block: String,
}
| 23.801724 | 57 | 0.677653 |
eb68908830093f3c4847ef39b7b90d66c1598914 | 2,643 | use std::time::Instant;
use crate::analyzer::analyze;
use crate::analyzer::ProgramData;
use crate::pass::hygiene::analyzer::HygieneAnalyzer;
use crate::pass::hygiene::analyzer::HygieneData;
use crate::util::now;
use swc_common::Mark;
use swc_common::SyntaxContext;
use swc_common::DUMMY_SP;
use swc_ecma_ast::*;
use swc_ecma_utils::ident::IdentLike;
use swc_ecma_visit::noop_visit_mut_type;
use swc_ecma_visit::VisitMut;
use swc_ecma_visit::VisitMutWith;
use swc_ecma_visit::VisitWith;
mod analyzer;
pub fn optimize_hygiene(m: &mut Module, top_level_mark: Mark) {
let data = analyze(&*m);
m.visit_mut_with(&mut hygiene_optimizer(data, top_level_mark))
}
/// Create a hygiene optimizer.
///
/// Hygiene optimizer removes span hygiene without renaming if it's ok to do so.
pub(crate) fn hygiene_optimizer(
data: ProgramData,
top_level_mark: Mark,
) -> impl 'static + VisitMut {
Optimizer {
data,
hygiene: Default::default(),
top_level_mark,
}
}
struct Optimizer {
data: ProgramData,
hygiene: HygieneData,
top_level_mark: Mark,
}
impl Optimizer {}
impl VisitMut for Optimizer {
noop_visit_mut_type!();
fn visit_mut_ident(&mut self, i: &mut Ident) {
if i.span.ctxt == SyntaxContext::empty() {
return;
}
if self.hygiene.preserved.contains(&i.to_id())
|| !self.hygiene.modified.contains(&i.to_id())
{
return;
}
i.span.ctxt = SyntaxContext::empty().apply_mark(self.top_level_mark);
}
fn visit_mut_member_expr(&mut self, n: &mut MemberExpr) {
n.obj.visit_mut_with(self);
if n.computed {
n.prop.visit_mut_with(self);
}
}
fn visit_mut_module(&mut self, n: &mut Module) {
log::info!("hygiene: Analyzing span hygiene");
let start = now();
let mut analyzer = HygieneAnalyzer {
data: &self.data,
hygiene: Default::default(),
top_level_mark: self.top_level_mark,
cur_scope: None,
};
n.visit_with(&Invalid { span: DUMMY_SP }, &mut analyzer);
self.hygiene = analyzer.hygiene;
if let Some(start) = start {
let end = Instant::now();
log::info!("hygiene: Span hygiene analysis took {:?}", end - start);
}
let start = now();
log::info!("hygiene: Optimizing span hygiene");
n.visit_mut_children_with(self);
if let Some(start) = start {
let end = Instant::now();
log::info!("hygiene: Span hygiene optimiation took {:?}", end - start);
}
}
}
| 26.43 | 83 | 0.61975 |
7637e28eb0752a4dc202ec6aa02fede62a6f21a3 | 14,270 | //! # Task management
//!
//! Create a task management to leverage the tokio framework
//! in order to more finely organize and control the different
//! modules utilized in jormungandr.
//!
use futures::prelude::*;
use futures::stream::FuturesUnordered;
use thiserror::Error;
use tokio::runtime::{Handle, Runtime};
use tokio::task::JoinHandle;
use tracing::{span, Level, Span};
use tracing_futures::Instrument;
use std::error;
use std::fmt::Debug;
use std::future::Future;
use std::sync::mpsc::Sender;
use std::time::{Duration, Instant};
/// hold onto the different services created
pub struct Services {
services: Vec<Service>,
finish_listener: FuturesUnordered<JoinHandle<Result<(), Box<dyn error::Error + Send + Sync>>>>,
runtime: Runtime,
}
#[derive(Debug, Error)]
pub enum ServiceError {
#[error(
"service panicked: {}",
.0
.as_ref()
.map(|reason| reason.as_ref())
.unwrap_or("could not serialize the panic"),
)]
Panic(Option<String>),
#[error("service future cancelled")]
Cancelled,
#[error("service error")]
Service(#[source] Box<dyn error::Error>),
}
/// wrap up a service
///
/// A service will run with its own runtime system. It will be able to
/// (if configured for) spawn new async tasks that will share that same
/// runtime.
pub struct Service {
/// this is the name of the service task, useful for logging and
/// following activity of a given task within the app
name: &'static str,
/// provides us with information regarding the up time of the Service
/// this will allow us to monitor if a service has been restarted
/// without having to follow the log history of the service.
up_time: Instant,
}
/// the current future service information
///
/// retrieve the name, the up time, the tracing span and the handle
pub struct TokioServiceInfo {
name: &'static str,
up_time: Instant,
span: tracing::Span,
handle: Handle,
}
pub struct TaskMessageBox<Msg>(Sender<Msg>);
/// Input for the different task with input service
///
/// If `Shutdown` is passed on, it means either there is
/// no more inputs to read (the Senders have been dropped), or the
/// service has been required to shutdown
pub enum Input<Msg> {
/// the service has been required to shutdown
Shutdown,
/// input for the task
Input(Msg),
}
impl Services {
/// create a new set of services
pub fn new() -> Self {
Services {
services: Vec::new(),
finish_listener: FuturesUnordered::new(),
runtime: Runtime::new().unwrap(),
}
}
/// Spawn the given Future in a new dedicated runtime
pub fn spawn_future<F, T>(&mut self, name: &'static str, f: F)
where
F: FnOnce(TokioServiceInfo) -> T,
F: Send + 'static,
T: Future<Output = ()> + Send + 'static,
{
let handle = self.runtime.handle().clone();
let now = Instant::now();
let tracing_span = span!(Level::TRACE, "service", kind = name);
let future_service_info = TokioServiceInfo {
name,
up_time: now,
span: tracing_span,
handle,
};
let span_parent = future_service_info.span.clone();
let handle = self.runtime.spawn(
async move {
f(future_service_info).await;
tracing::info!("service `{}` finished", name);
Ok::<_, std::convert::Infallible>(()).map_err(Into::into)
}
.instrument(span!(
parent: span_parent,
Level::TRACE,
"service",
kind = name
)),
);
self.finish_listener.push(handle);
let task = Service::new(name, now);
self.services.push(task);
}
/// Spawn the given Future in a new dedicated runtime
pub fn spawn_try_future<F, T, E>(&mut self, name: &'static str, f: F)
where
F: FnOnce(TokioServiceInfo) -> T,
F: Send + 'static,
T: Future<Output = Result<(), E>> + Send + 'static,
E: error::Error + Send + Sync + 'static,
{
let handle = self.runtime.handle().clone();
let now = Instant::now();
let tracing_span = span!(Level::TRACE, "service", kind = name);
let future_service_info = TokioServiceInfo {
name,
up_time: now,
span: tracing_span,
handle,
};
let parent_span = future_service_info.span.clone();
let handle = self.runtime.spawn(
async move {
let res = f(future_service_info).await;
if let Err(err) = &res {
tracing::error!(reason = %err.to_string(), "service finished with error");
} else {
tracing::info!("service `{}` finished successfully", name);
}
res.map_err(Into::into)
}
.instrument(span!(
parent: parent_span,
Level::TRACE,
"service",
kind = name
)),
);
self.finish_listener.push(handle);
let task = Service::new(name, now);
self.services.push(task);
}
/// select on all the started services. this function will block until first services returns
pub fn wait_any_finished(self) -> Result<(), ServiceError> {
let finish_listener = self.finish_listener;
let result = self
.runtime
.block_on(async move { finish_listener.into_future().await.0 });
match result {
// No services were started or some service exited successfully
None | Some(Ok(Ok(()))) => Ok(()),
// Error produced by a service
Some(Ok(Err(service_error))) => Err(ServiceError::Service(service_error)),
// A service panicked or was cancelled by the environment
Some(Err(join_error)) => {
if join_error.is_cancelled() {
Err(ServiceError::Cancelled)
} else if join_error.is_panic() {
let desc = join_error.into_panic().downcast_ref::<String>().cloned();
Err(ServiceError::Panic(desc))
} else {
unreachable!("JoinError is either Cancelled or Panic")
}
}
}
}
// Run the task to completion
pub fn block_on_task<F, Fut, T>(&mut self, name: &'static str, f: F) -> T
where
F: FnOnce(TokioServiceInfo) -> Fut,
Fut: Future<Output = T>,
{
let handle = self.runtime.handle().clone();
let now = Instant::now();
let tracing_span = span!(Level::TRACE, "service", kind = name);
let future_service_info = TokioServiceInfo {
name,
up_time: now,
span: tracing_span,
handle,
};
let parent_span = future_service_info.span.clone();
self.runtime
.block_on(f(future_service_info).instrument(span!(
parent: parent_span,
Level::TRACE,
"service",
kind = name
)))
}
}
impl Default for Services {
fn default() -> Self {
Self::new()
}
}
impl TokioServiceInfo {
/// get the time this service has been running since
#[inline]
pub fn up_time(&self) -> Duration {
Instant::now().duration_since(self.up_time)
}
/// get the name of this Service
#[inline]
pub fn name(&self) -> &'static str {
self.name
}
/// Access the service's handle
#[inline]
pub fn runtime_handle(&self) -> &Handle {
&self.handle
}
/// Access the parent service span
#[inline]
pub fn span(&self) -> &Span {
&self.span
}
/// spawn a std::future within the service's tokio handle
pub fn spawn<F>(&self, name: &'static str, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
tracing::trace!("service `{}` spawning task `{}`", self.name, name);
self.handle
.spawn(future.instrument(span!(parent: &self.span, Level::TRACE, "task", kind = name)));
}
/// just like spawn but instead log an error on Result::Err
pub fn spawn_fallible<F, E>(&self, name: &'static str, future: F)
where
F: Send + 'static,
E: Debug,
F: Future<Output = Result<(), E>>,
{
tracing::trace!("service `{}` spawning task `{}`", self.name, name);
self.handle.spawn(
async move {
match future.await {
Ok(()) => tracing::trace!("task {} finished successfully", name),
Err(e) => {
tracing::error!(reason = ?e, "task {} finished with error", name)
}
}
}
.instrument(span!(parent: &self.span, Level::TRACE, "task", kind = name)),
);
}
/// just like spawn but add a timeout
pub fn timeout_spawn<F>(&self, name: &'static str, timeout: Duration, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
tracing::trace!("spawning {}", name);
self.handle.spawn(
async move {
match tokio::time::timeout(timeout, future).await {
Err(_) => tracing::error!("task {} timed out", name),
Ok(()) => {}
};
}
.instrument(span!(parent: &self.span, Level::TRACE, "task", kind = name)),
);
}
/// just like spawn_failable but add a timeout
pub fn timeout_spawn_fallible<F, E>(&self, name: &'static str, timeout: Duration, future: F)
where
F: Send + 'static,
E: Debug,
F: Future<Output = Result<(), E>>,
{
tracing::trace!("spawning {}", name);
self.handle.spawn(
async move {
match tokio::time::timeout(timeout, future).await {
Err(_) => tracing::error!("task {} timed out", name),
Ok(Err(e)) => tracing::error!(reason = ?e, "task {} finished with error", name),
Ok(Ok(())) => {}
};
}
.instrument(span!(parent: &self.span, Level::TRACE, "task", kind = name)),
);
}
// Run the closure with the specified period on the handle
// and execute the resulting closure.
pub fn run_periodic<F, U>(&self, name: &'static str, period: Duration, mut f: F)
where
F: FnMut() -> U,
F: Send + 'static,
U: Future<Output = ()> + Send + 'static,
{
self.spawn(
name,
async move {
let mut interval = tokio::time::interval(period);
loop {
let t_now = Instant::now();
interval.tick().await;
let t_last = Instant::now();
let elapsed = t_last.duration_since(t_now);
if elapsed > period * 2 {
tracing::warn!(
period = ?period,
elapsed = ?elapsed,
"periodic task `{}` started late", name
);
}
f().await;
tracing::trace!(
triggered_at = ?t_now,
"periodic task `{}` finished successfully",
name
);
}
}
.instrument(span!(parent: &self.span, Level::TRACE, "task", kind = name)),
);
}
// Run the closure with the specified period on the handle
// and execute the resulting fallible async closure.
// If the closure returns an Err, log it.
pub fn run_periodic_fallible<F, U, E>(&self, name: &'static str, period: Duration, mut f: F)
where
F: FnMut() -> U,
F: Send + 'static,
E: Debug,
U: Future<Output = Result<(), E>> + Send + 'static,
{
self.spawn(
name,
async move {
let mut interval = tokio::time::interval(period);
loop {
let t_now = Instant::now();
interval.tick().await;
let t_last = Instant::now();
let elapsed = t_last.duration_since(t_now);
if elapsed > period * 2 {
tracing::warn!(
period = ?period,
elapsed = ?elapsed,
"periodic task `{}` started late", name
);
}
match f().await {
Ok(()) => {
tracing::trace!(
triggered_at = ?t_now,
"periodic task `{}` finished successfully",
name,
);
}
Err(e) => {
tracing::error!(
triggered_at = ?t_now,
error = ?e,
"periodic task `{}` failed", name
);
}
};
}
}
.instrument(span!(parent: &self.span, Level::TRACE, "task", kind = name)),
);
}
}
impl Service {
/// get the time this service has been running since
#[inline]
pub fn up_time(&self) -> Duration {
Instant::now().duration_since(self.up_time)
}
/// get the name of this Service
#[inline]
pub fn name(&self) -> &'static str {
self.name
}
#[inline]
fn new(name: &'static str, now: Instant) -> Self {
Service { name, up_time: now }
}
}
impl<Msg> Clone for TaskMessageBox<Msg> {
fn clone(&self) -> Self {
TaskMessageBox(self.0.clone())
}
}
impl<Msg> TaskMessageBox<Msg> {
pub fn send_to(&self, a: Msg) {
self.0.send(a).unwrap()
}
}
| 32.579909 | 100 | 0.508339 |
e6ffe9cb690875a991a318f471742280f653793b | 3,431 | use structopt::StructOpt;
use fluvio_index::{PackageId, HttpAgent, MaybeVersion};
use crate::CliError;
use crate::install::{
fetch_latest_version, fetch_package_file, fluvio_extensions_dir, install_bin, install_println,
};
use crate::install::update::{
check_update_required, prompt_required_update, check_update_available, prompt_available_update,
};
#[derive(StructOpt, Debug)]
pub struct InstallOpt {
/// The ID of a package to install, e.g. "fluvio/fluvio-cloud".
package: PackageId<MaybeVersion>,
/// Used for testing. Specifies alternate package location, e.g. "test/"
#[structopt(hidden = true, long)]
prefix: Option<String>,
/// Install the latest prerelease rather than the latest release
///
/// If the package ID contains a version (e.g. `fluvio/fluvio:0.6.0`), this is ignored
#[structopt(long)]
develop: bool,
}
impl InstallOpt {
pub async fn process(self) -> Result<(), CliError> {
let agent = match &self.prefix {
Some(prefix) => HttpAgent::with_prefix(prefix)?,
None => HttpAgent::default(),
};
// Before any "install" type command, check if the CLI needs updating.
// This may be the case if the index schema has updated.
let require_update = check_update_required(&agent).await?;
if require_update {
prompt_required_update(&agent).await?;
return Ok(());
}
self.install_plugin(&agent).await?;
// After any "install" command, check if the CLI has an available update,
// i.e. one that is not required, but present.
let maybe_latest = check_update_available(&agent, false).await?;
if let Some(latest_version) = maybe_latest {
prompt_available_update(&latest_version);
}
Ok(())
}
async fn install_plugin(self, agent: &HttpAgent) -> Result<(), CliError> {
let target = fluvio_index::package_target()?;
// If a version is given in the package ID, use it. Otherwise, use latest
let id = match self.package.maybe_version() {
Some(version) => {
install_println(format!(
"⏳ Downloading package with provided version: {}...",
&self.package
));
let version = version.clone();
self.package.into_versioned(version)
}
None => {
let id = self.package;
install_println(format!(
"🎣 Fetching latest version for package: {}...",
&id
));
let version = fetch_latest_version(agent, &id, &target, self.develop).await?;
let id = id.into_versioned(version);
install_println(format!(
"⏳ Downloading package with latest version: {}...",
&id
));
id
}
};
// Download the package file from the package registry
let package_file = fetch_package_file(agent, &id, &target).await?;
install_println("🔑 Downloaded and verified package file");
// Install the package to the ~/.fluvio/bin/ dir
let fluvio_dir = fluvio_extensions_dir()?;
let package_path = fluvio_dir.join(id.name().as_str());
install_bin(&package_path, &package_file)?;
Ok(())
}
}
| 36.5 | 99 | 0.588167 |
e44fc5243ab11256167c9e01417b576cc8a4bcd7 | 5,819 | #[doc = "Register `PUBLISH_LASTRX` reader"]
pub struct R(crate::R<PUBLISH_LASTRX_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<PUBLISH_LASTRX_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<PUBLISH_LASTRX_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<PUBLISH_LASTRX_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `PUBLISH_LASTRX` writer"]
pub struct W(crate::W<PUBLISH_LASTRX_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<PUBLISH_LASTRX_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<PUBLISH_LASTRX_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<PUBLISH_LASTRX_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `CHIDX` reader - DPPI channel that event LASTRX will publish to."]
pub struct CHIDX_R(crate::FieldReader<u8, u8>);
impl CHIDX_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
CHIDX_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for CHIDX_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CHIDX` writer - DPPI channel that event LASTRX will publish to."]
pub struct CHIDX_W<'a> {
w: &'a mut W,
}
impl<'a> CHIDX_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0xff) | (value as u32 & 0xff);
self.w
}
}
#[doc = "\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EN_A {
#[doc = "0: Disable publishing"]
DISABLED = 0,
#[doc = "1: Enable publishing"]
ENABLED = 1,
}
impl From<EN_A> for bool {
#[inline(always)]
fn from(variant: EN_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `EN` reader - "]
pub struct EN_R(crate::FieldReader<bool, EN_A>);
impl EN_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
EN_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EN_A {
match self.bits {
false => EN_A::DISABLED,
true => EN_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == EN_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == EN_A::ENABLED
}
}
impl core::ops::Deref for EN_R {
type Target = crate::FieldReader<bool, EN_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EN` writer - "]
pub struct EN_W<'a> {
w: &'a mut W,
}
impl<'a> EN_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EN_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Disable publishing"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(EN_A::DISABLED)
}
#[doc = "Enable publishing"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(EN_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:7 - DPPI channel that event LASTRX will publish to."]
#[inline(always)]
pub fn chidx(&self) -> CHIDX_R {
CHIDX_R::new((self.bits & 0xff) as u8)
}
#[doc = "Bit 31"]
#[inline(always)]
pub fn en(&self) -> EN_R {
EN_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:7 - DPPI channel that event LASTRX will publish to."]
#[inline(always)]
pub fn chidx(&mut self) -> CHIDX_W {
CHIDX_W { w: self }
}
#[doc = "Bit 31"]
#[inline(always)]
pub fn en(&mut self) -> EN_W {
EN_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Publish configuration for event LASTRX\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [publish_lastrx](index.html) module"]
pub struct PUBLISH_LASTRX_SPEC;
impl crate::RegisterSpec for PUBLISH_LASTRX_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [publish_lastrx::R](R) reader structure"]
impl crate::Readable for PUBLISH_LASTRX_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [publish_lastrx::W](W) writer structure"]
impl crate::Writable for PUBLISH_LASTRX_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets PUBLISH_LASTRX to value 0"]
impl crate::Resettable for PUBLISH_LASTRX_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| 29.388889 | 433 | 0.582059 |
33ca131a13794a54e6aa9be35533fc258b235fc5 | 619 | use clap::{app_from_crate, arg};
fn main() {
let matches = app_from_crate!()
.arg(
arg!(<MODE>)
.help("What mode to run the program in")
.possible_values(["fast", "slow"]),
)
.get_matches();
// Note, it's safe to call unwrap() because the arg is required
match matches
.value_of("MODE")
.expect("'MODE' is required and parsing will fail if its missing")
{
"fast" => {
println!("Hare");
}
"slow" => {
println!("Tortoise");
}
_ => unreachable!(),
}
}
| 23.807692 | 74 | 0.473344 |
7615b14a14d50937a1abbc4501dfa5659493485b | 1,757 | use crate::{
alg::lkh::move_2_opt,
tour::{NodeRel, Tour, TourNode, UpdateTourError},
Scalar,
};
use super::types::SearchResult;
pub fn search_2_opt<T>(
tour: &mut T,
base: &TourNode,
base_s: &TourNode,
) -> Result<SearchResult, UpdateTourError>
where
T: Tour,
{
let g0 = tour.distance(base, base_s);
let mut _g2_best = Scalar::MIN;
let pair = None;
for cand in base_s.candidates() {
let g1 = g0 - tour.distance(base_s, cand);
if tour.relation(base_s, cand) != NodeRel::None || g1 <= 0. {
continue;
}
let cand_p = match tour.predecessor(cand) {
Some(node) => node,
None => return Err(UpdateTourError::NodeNotFound),
};
// g2
// let delta = tour.distance(&cand_p, cand) - tour.distance(base, &cand_p);
let g2 = g1 + tour.distance(&cand_p, cand) - tour.distance(base, &cand_p);
if g2 > 0. {
// gain criterion satisfied.
move_2_opt(tour, base, base_s, &cand_p, cand);
return Ok(SearchResult::Gainful(g2));
} else {
// Non-gainful move.
// if g2 > g2_best && is_excludable(&cand_p, cand) {
// g2_best = g2;
// pair = Some((cand_p, *cand));
// // check if t3 and t4 can be excluded
// }
}
}
if let Some((cand_1, cand_2)) = pair {
move_2_opt(tour, base, base_s, &cand_1, &cand_2);
return Ok(SearchResult::NonGainful(cand_1));
}
Err(UpdateTourError::SearchFailed)
}
pub fn search_3_opt<T>(
_tour: &mut T,
_head_1: &TourNode,
_tail_1: &TourNode,
) -> Result<SearchResult, UpdateTourError>
where
T: Tour,
{
todo!()
}
| 25.463768 | 83 | 0.549801 |
6ab913b03dcb9ada19c532bfe222313a008102dd | 1,623 | use chrono::NaiveDate;
use diesel::sql_types::{BigInt, Double, Integer, Nullable, Text};
use diesel::QueryableByName;
use serde::Serialize;
use typescript_definitions::TypeScriptify;
// Includes wine info for convenience
#[derive(Queryable, Serialize, TypeScriptify, Debug)]
#[serde(rename_all = "camelCase")]
pub struct RecentPurchase {
pub id: i32,
pub price: Option<f64>,
pub quantity: i32,
pub vintage: Option<i32>,
pub memo: Option<String>,
pub store: Option<String>,
#[ts(ts_type = "Date | null")]
pub date: Option<NaiveDate>,
pub wine_id: i32,
pub wine_name: Option<String>,
pub producer_id: i32,
pub producer: String,
pub region_id: i32,
pub region: String,
pub wine_type_id: i32,
pub wine_type: String,
}
#[derive(QueryableByName, Serialize, TypeScriptify, Debug)]
#[serde(rename_all = "camelCase")]
pub struct YearsPurchases {
#[sql_type = "Integer"]
pub year: i32,
#[sql_type = "BigInt"]
pub quantity: i64,
#[sql_type = "Nullable<Double>"]
pub total_price: Option<f64>,
#[sql_type = "Nullable<Double>"]
pub avg_price: Option<f64>,
}
#[derive(Serialize, TypeScriptify, Debug)]
#[serde(rename_all = "camelCase")]
pub struct TotalLiters {
pub total_liters: f64,
}
#[derive(Serialize, TypeScriptify, Debug)]
#[serde(rename_all = "camelCase")]
pub struct PurchaseCount {
pub count: i64,
}
#[derive(Serialize, QueryableByName, TypeScriptify, Debug)]
#[serde(rename_all = "camelCase")]
pub struct MostCommonPurchaseDate {
#[sql_type = "Nullable<Text>"]
pub most_common_purchase_date: Option<String>,
}
| 27.05 | 65 | 0.691312 |
e221b6a6a2acaa855a3b8e2297ff13167e80019a | 1,563 | // box1.rs
//
// At compile time, Rust needs to know how much space a type takes up. This becomes problematic
// for recursive types, where a value can have as part of itself another value of the same type.
// To get around the issue, we can use a `Box` - a smart pointer used to store data on the heap,
// which also allows us to wrap a recursive type.
//
// The recursive type we're implementing in this exercise is the `cons list` - a data structure
// frequently found in functional programming languages. Each item in a cons list contains two
// elements: the value of the current item and the next item. The last item is a value called `Nil`.
//
// Step 1: use a `Box` in the enum definition to make the code compile
// Step 2: create both empty and non-empty cons lists by replacing `unimplemented!()`
//
// Note: the tests should not be changed
//
// Execute `rustlings hint box1` for hints :)
#[derive(PartialEq, Debug)]
pub enum List {
Cons(i32, List),
Nil,
}
fn main() {
println!("This is an empty cons list: {:?}", create_empty_list());
println!(
"This is a non-empty cons list: {:?}",
create_non_empty_list()
);
}
pub fn create_empty_list() -> List {
unimplemented!()
}
pub fn create_non_empty_list() -> List {
unimplemented!()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_create_empty_list() {
assert_eq!(List::Nil, create_empty_list())
}
#[test]
fn test_create_non_empty_list() {
assert_ne!(create_empty_list(), create_non_empty_list())
}
}
| 28.418182 | 100 | 0.673065 |
1a044680ef114b974f21d3e82c5cca0e9b220167 | 6,296 | // Copyright 2019 Jared Samet
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! This module contains the "strategy choice" logic for which specific contractor
//! should be used for a given mini-contraction.
//!
//! In general, `DiagonalizationAndSummation` should be able to accomodate all singleton
//! contractions and `StackedTensordotGeneral` should be able to handle all pairs; however,
//! other trait implementations might be faster.
//!
//! The code here has some duplication and is probably not the most idiomatic way to accomplish this.
use crate::SizedContraction;
use std::collections::{HashMap, HashSet};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug)]
pub enum SingletonMethod {
Identity,
Permutation,
Summation,
Diagonalization,
PermutationAndSummation,
DiagonalizationAndSummation,
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Copy, Clone, Debug)]
pub struct SingletonSummary {
num_summed_axes: usize,
num_diagonalized_axes: usize,
num_reordered_axes: usize,
}
impl SingletonSummary {
pub fn new(sc: &SizedContraction) -> Self {
assert_eq!(sc.contraction.operand_indices.len(), 1);
let output_indices = &sc.contraction.output_indices;
let input_indices = &sc.contraction.operand_indices[0];
SingletonSummary::from_indices(&input_indices, &output_indices)
}
fn from_indices(input_indices: &[char], output_indices: &[char]) -> Self {
let mut input_counts = HashMap::new();
for &c in input_indices.iter() {
*input_counts.entry(c).or_insert(0) += 1;
}
let num_summed_axes = input_counts.len() - output_indices.len();
let num_diagonalized_axes = input_counts.iter().filter(|(_, &v)| v > 1).count();
let num_reordered_axes = output_indices
.iter()
.zip(input_indices.iter())
.filter(|(&output_char, &input_char)| output_char != input_char)
.count();
SingletonSummary {
num_summed_axes,
num_diagonalized_axes,
num_reordered_axes,
}
}
pub fn get_strategy(&self) -> SingletonMethod {
match (
self.num_summed_axes,
self.num_diagonalized_axes,
self.num_reordered_axes,
) {
(0, 0, 0) => SingletonMethod::Identity,
(0, 0, _) => SingletonMethod::Permutation,
(_, 0, 0) => SingletonMethod::Summation,
(0, _, _) => SingletonMethod::Diagonalization,
(_, 0, _) => SingletonMethod::PermutationAndSummation,
(_, _, _) => SingletonMethod::DiagonalizationAndSummation,
}
}
}
#[allow(dead_code)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Copy, Clone)]
pub enum PairMethod {
HadamardProduct,
HadamardProductGeneral,
TensordotFixedPosition,
TensordotGeneral,
ScalarMatrixProduct,
ScalarMatrixProductGeneral,
MatrixScalarProduct,
MatrixScalarProductGeneral,
BroadcastProductGeneral,
StackedTensordotGeneral,
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Debug, Clone)]
pub struct PairSummary {
num_stacked_axes: usize,
num_lhs_outer_axes: usize,
num_rhs_outer_axes: usize,
num_contracted_axes: usize,
}
impl PairSummary {
pub fn new(sc: &SizedContraction) -> Self {
assert_eq!(sc.contraction.operand_indices.len(), 2);
let output_indices = &sc.contraction.output_indices;
let lhs_indices = &sc.contraction.operand_indices[0];
let rhs_indices = &sc.contraction.operand_indices[1];
PairSummary::from_indices(&lhs_indices, &rhs_indices, &output_indices)
}
fn from_indices(lhs_indices: &[char], rhs_indices: &[char], output_indices: &[char]) -> Self {
let lhs_uniques: HashSet<char> = lhs_indices.iter().cloned().collect();
let rhs_uniques: HashSet<char> = rhs_indices.iter().cloned().collect();
let output_uniques: HashSet<char> = output_indices.iter().cloned().collect();
assert_eq!(lhs_indices.len(), lhs_uniques.len());
assert_eq!(rhs_indices.len(), rhs_uniques.len());
assert_eq!(output_indices.len(), output_uniques.len());
let lhs_and_rhs: HashSet<char> = lhs_uniques.intersection(&rhs_uniques).cloned().collect();
let stacked: HashSet<char> = lhs_and_rhs.intersection(&output_uniques).cloned().collect();
let num_stacked_axes = stacked.len();
let num_contracted_axes = lhs_and_rhs.len() - num_stacked_axes;
let num_lhs_outer_axes = lhs_uniques.len() - num_stacked_axes - num_contracted_axes;
let num_rhs_outer_axes = rhs_uniques.len() - num_stacked_axes - num_contracted_axes;
PairSummary {
num_stacked_axes,
num_lhs_outer_axes,
num_rhs_outer_axes,
num_contracted_axes,
}
}
pub fn get_strategy(&self) -> PairMethod {
match (
self.num_contracted_axes,
self.num_lhs_outer_axes,
self.num_rhs_outer_axes,
self.num_stacked_axes,
) {
(0, 0, 0, _) => PairMethod::HadamardProductGeneral,
(0, 0, _, 0) => PairMethod::ScalarMatrixProductGeneral,
(0, _, 0, 0) => PairMethod::MatrixScalarProductGeneral,
// This contractor works, but appears to be slower
// than StackedTensordotGeneral
// (0, _, _, _) => PairMethod::BroadcastProductGeneral,
(_, _, _, 0) => PairMethod::TensordotGeneral,
(_, _, _, _) => PairMethod::StackedTensordotGeneral,
}
}
}
| 36.818713 | 101 | 0.661531 |
267008a9fb3f084161302035a235c4af7e0d7269 | 281,913 | use std::collections::HashMap;
use std::cell::RefCell;
use std::default::Default;
use std::collections::BTreeMap;
use serde_json as json;
use std::io;
use std::fs;
use std::mem;
use std::thread::sleep;
use crate::client;
// ##############
// UTILITIES ###
// ############
/// Identifies the an OAuth2 authorization scope.
/// A scope is needed when requesting an
/// [authorization token](https://developers.google.com/youtube/v3/guides/authentication).
#[derive(PartialEq, Eq, Hash)]
pub enum Scope {
/// See, edit, configure, and delete your Google Cloud data and see the email address for your Google Account.
CloudPlatform,
/// View and manage your Google Cloud Datastore data
Full,
}
impl AsRef<str> for Scope {
fn as_ref(&self) -> &str {
match *self {
Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform",
Scope::Full => "https://www.googleapis.com/auth/datastore",
}
}
}
impl Default for Scope {
fn default() -> Scope {
Scope::Full
}
}
// ########
// HUB ###
// ######
/// Central instance to access all Datastore related resource activities
///
/// # Examples
///
/// Instantiate a new hub
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate google_datastore1 as datastore1;
/// use datastore1::api::GoogleDatastoreAdminV1Index;
/// use datastore1::{Result, Error};
/// # async fn dox() {
/// use std::default::Default;
/// use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// // Get an ApplicationSecret instance by some means. It contains the `client_id` and
/// // `client_secret`, among other things.
/// let secret: oauth2::ApplicationSecret = Default::default();
/// // Instantiate the authenticator. It will choose a suitable authentication flow for you,
/// // unless you replace `None` with the desired Flow.
/// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about
/// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and
/// // retrieve them from storage.
/// let auth = oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = GoogleDatastoreAdminV1Index::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().indexes_create(req, "projectId")
/// .doit().await;
///
/// match result {
/// Err(e) => match e {
/// // The Error enum provides details about what exactly happened.
/// // You can also just use its `Debug`, `Display` or `Error` traits
/// Error::HttpError(_)
/// |Error::Io(_)
/// |Error::MissingAPIKey
/// |Error::MissingToken(_)
/// |Error::Cancelled
/// |Error::UploadSizeLimitExceeded(_, _)
/// |Error::Failure(_)
/// |Error::BadRequest(_)
/// |Error::FieldClash(_)
/// |Error::JsonDecodeError(_, _) => println!("{}", e),
/// },
/// Ok(res) => println!("Success: {:?}", res),
/// }
/// # }
/// ```
#[derive(Clone)]
pub struct Datastore<> {
pub client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>,
pub auth: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>,
_user_agent: String,
_base_url: String,
_root_url: String,
}
impl<'a, > client::Hub for Datastore<> {}
impl<'a, > Datastore<> {
pub fn new(client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>, authenticator: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>) -> Datastore<> {
Datastore {
client,
auth: authenticator,
_user_agent: "google-api-rust-client/3.0.0".to_string(),
_base_url: "https://datastore.googleapis.com/".to_string(),
_root_url: "https://datastore.googleapis.com/".to_string(),
}
}
pub fn projects(&'a self) -> ProjectMethods<'a> {
ProjectMethods { hub: &self }
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/3.0.0`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
mem::replace(&mut self._user_agent, agent_name)
}
/// Set the base url to use in all requests to the server.
/// It defaults to `https://datastore.googleapis.com/`.
///
/// Returns the previously set base url.
pub fn base_url(&mut self, new_base_url: String) -> String {
mem::replace(&mut self._base_url, new_base_url)
}
/// Set the root url to use in all requests to the server.
/// It defaults to `https://datastore.googleapis.com/`.
///
/// Returns the previously set root url.
pub fn root_url(&mut self, new_root_url: String) -> String {
mem::replace(&mut self._root_url, new_root_url)
}
}
// ############
// SCHEMAS ###
// ##########
/// The request for Datastore.AllocateIds.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [allocate ids projects](ProjectAllocateIdCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct AllocateIdsRequest {
/// Required. A list of keys with incomplete key paths for which to allocate IDs. No key may be reserved/read-only.
pub keys: Option<Vec<Key>>,
}
impl client::RequestValue for AllocateIdsRequest {}
/// The response for Datastore.AllocateIds.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [allocate ids projects](ProjectAllocateIdCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct AllocateIdsResponse {
/// The keys specified in the request (in the same order), each with its key path completed with a newly allocated ID.
pub keys: Option<Vec<Key>>,
}
impl client::ResponseResult for AllocateIdsResponse {}
/// An array value.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ArrayValue {
/// Values in the array. The order of values in an array is preserved as long as all values have identical settings for 'exclude_from_indexes'.
pub values: Option<Vec<Value>>,
}
impl client::Part for ArrayValue {}
/// The request for Datastore.BeginTransaction.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [begin transaction projects](ProjectBeginTransactionCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BeginTransactionRequest {
/// Options for a new transaction.
#[serde(rename="transactionOptions")]
pub transaction_options: Option<TransactionOptions>,
}
impl client::RequestValue for BeginTransactionRequest {}
/// The response for Datastore.BeginTransaction.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [begin transaction projects](ProjectBeginTransactionCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BeginTransactionResponse {
/// The transaction identifier (always present).
pub transaction: Option<String>,
}
impl client::ResponseResult for BeginTransactionResponse {}
/// The request for Datastore.Commit.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [commit projects](ProjectCommitCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CommitRequest {
/// The type of commit to perform. Defaults to `TRANSACTIONAL`.
pub mode: Option<String>,
/// The mutations to perform. When mode is `TRANSACTIONAL`, mutations affecting a single entity are applied in order. The following sequences of mutations affecting a single entity are not permitted in a single `Commit` request: - `insert` followed by `insert` - `update` followed by `insert` - `upsert` followed by `insert` - `delete` followed by `update` When mode is `NON_TRANSACTIONAL`, no two mutations may affect a single entity.
pub mutations: Option<Vec<Mutation>>,
/// The identifier of the transaction associated with the commit. A transaction identifier is returned by a call to Datastore.BeginTransaction.
pub transaction: Option<String>,
}
impl client::RequestValue for CommitRequest {}
/// The response for Datastore.Commit.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [commit projects](ProjectCommitCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CommitResponse {
/// The number of index entries updated during the commit, or zero if none were updated.
#[serde(rename="indexUpdates")]
pub index_updates: Option<i32>,
/// The result of performing the mutations. The i-th mutation result corresponds to the i-th mutation in the request.
#[serde(rename="mutationResults")]
pub mutation_results: Option<Vec<MutationResult>>,
}
impl client::ResponseResult for CommitResponse {}
/// A filter that merges multiple other filters using the given operator.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct CompositeFilter {
/// The list of filters to combine. Must contain at least one filter.
pub filters: Option<Vec<Filter>>,
/// The operator for combining multiple filters.
pub op: Option<String>,
}
impl client::Part for CompositeFilter {}
/// A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [operations cancel projects](ProjectOperationCancelCall) (response)
/// * [operations delete projects](ProjectOperationDeleteCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Empty { _never_set: Option<bool> }
impl client::ResponseResult for Empty {}
/// A Datastore data object. An entity is limited to 1 megabyte when stored. That _roughly_ corresponds to a limit of 1 megabyte for the serialized form of this message.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Entity {
/// The entity's key. An entity must have a key, unless otherwise documented (for example, an entity in `Value.entity_value` may have no key). An entity's kind is its key path's last element's kind, or null if it has no key.
pub key: Option<Key>,
/// The entity's properties. The map's keys are property names. A property name matching regex `__.*__` is reserved. A reserved property name is forbidden in certain documented contexts. The name must not contain more than 500 characters. The name cannot be `""`.
pub properties: Option<HashMap<String, Value>>,
}
impl client::Part for Entity {}
/// The result of fetching an entity from Datastore.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct EntityResult {
/// A cursor that points to the position after the result entity. Set only when the `EntityResult` is part of a `QueryResultBatch` message.
pub cursor: Option<String>,
/// The resulting entity.
pub entity: Option<Entity>,
/// The version of the entity, a strictly positive number that monotonically increases with changes to the entity. This field is set for `FULL` entity results. For missing entities in `LookupResponse`, this is the version of the snapshot that was used to look up the entity, and it is always set except for eventually consistent reads.
pub version: Option<String>,
}
impl client::Part for EntityResult {}
/// A holder for any type of filter.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Filter {
/// A composite filter.
#[serde(rename="compositeFilter")]
pub composite_filter: Option<CompositeFilter>,
/// A filter on a property.
#[serde(rename="propertyFilter")]
pub property_filter: Option<PropertyFilter>,
}
impl client::Part for Filter {}
/// Identifies a subset of entities in a project. This is specified as combinations of kinds and namespaces (either or both of which may be all, as described in the following examples). Example usage: Entire project: kinds=[], namespace_ids=[] Kinds Foo and Bar in all namespaces: kinds=['Foo', 'Bar'], namespace_ids=[] Kinds Foo and Bar only in the default namespace: kinds=['Foo', 'Bar'], namespace_ids=[''] Kinds Foo and Bar in both the default and Baz namespaces: kinds=['Foo', 'Bar'], namespace_ids=['', 'Baz'] The entire Baz namespace: kinds=[], namespace_ids=['Baz']
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleDatastoreAdminV1EntityFilter {
/// If empty, then this represents all kinds.
pub kinds: Option<Vec<String>>,
/// An empty list represents all namespaces. This is the preferred usage for projects that don't use namespaces. An empty string element represents the default namespace. This should be used if the project has data in non-default namespaces, but doesn't want to include them. Each namespace in this list must be unique.
#[serde(rename="namespaceIds")]
pub namespace_ids: Option<Vec<String>>,
}
impl client::Part for GoogleDatastoreAdminV1EntityFilter {}
/// The request for google.datastore.admin.v1.DatastoreAdmin.ExportEntities.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [export projects](ProjectExportCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleDatastoreAdminV1ExportEntitiesRequest {
/// Description of what data from the project is included in the export.
#[serde(rename="entityFilter")]
pub entity_filter: Option<GoogleDatastoreAdminV1EntityFilter>,
/// Client-assigned labels.
pub labels: Option<HashMap<String, String>>,
/// Required. Location for the export metadata and data files. The full resource URL of the external storage location. Currently, only Google Cloud Storage is supported. So output_url_prefix should be of the form: `gs://BUCKET_NAME[/NAMESPACE_PATH]`, where `BUCKET_NAME` is the name of the Cloud Storage bucket and `NAMESPACE_PATH` is an optional Cloud Storage namespace path (this is not a Cloud Datastore namespace). For more information about Cloud Storage namespace paths, see [Object name considerations](https://cloud.google.com/storage/docs/naming#object-considerations). The resulting files will be nested deeper than the specified URL prefix. The final output URL will be provided in the google.datastore.admin.v1.ExportEntitiesResponse.output_url field. That value should be used for subsequent ImportEntities operations. By nesting the data files deeper, the same Cloud Storage bucket can be used in multiple ExportEntities operations without conflict.
#[serde(rename="outputUrlPrefix")]
pub output_url_prefix: Option<String>,
}
impl client::RequestValue for GoogleDatastoreAdminV1ExportEntitiesRequest {}
/// The request for google.datastore.admin.v1.DatastoreAdmin.ImportEntities.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [import projects](ProjectImportCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleDatastoreAdminV1ImportEntitiesRequest {
/// Optionally specify which kinds/namespaces are to be imported. If provided, the list must be a subset of the EntityFilter used in creating the export, otherwise a FAILED_PRECONDITION error will be returned. If no filter is specified then all entities from the export are imported.
#[serde(rename="entityFilter")]
pub entity_filter: Option<GoogleDatastoreAdminV1EntityFilter>,
/// Required. The full resource URL of the external storage location. Currently, only Google Cloud Storage is supported. So input_url should be of the form: `gs://BUCKET_NAME[/NAMESPACE_PATH]/OVERALL_EXPORT_METADATA_FILE`, where `BUCKET_NAME` is the name of the Cloud Storage bucket, `NAMESPACE_PATH` is an optional Cloud Storage namespace path (this is not a Cloud Datastore namespace), and `OVERALL_EXPORT_METADATA_FILE` is the metadata file written by the ExportEntities operation. For more information about Cloud Storage namespace paths, see [Object name considerations](https://cloud.google.com/storage/docs/naming#object-considerations). For more information, see google.datastore.admin.v1.ExportEntitiesResponse.output_url.
#[serde(rename="inputUrl")]
pub input_url: Option<String>,
/// Client-assigned labels.
pub labels: Option<HashMap<String, String>>,
}
impl client::RequestValue for GoogleDatastoreAdminV1ImportEntitiesRequest {}
/// Datastore composite index definition.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [indexes create projects](ProjectIndexeCreateCall) (request)
/// * [indexes get projects](ProjectIndexeGetCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleDatastoreAdminV1Index {
/// Required. The index's ancestor mode. Must not be ANCESTOR_MODE_UNSPECIFIED.
pub ancestor: Option<String>,
/// Output only. The resource ID of the index.
#[serde(rename="indexId")]
pub index_id: Option<String>,
/// Required. The entity kind to which this index applies.
pub kind: Option<String>,
/// Output only. Project ID.
#[serde(rename="projectId")]
pub project_id: Option<String>,
/// Required. An ordered sequence of property names and their index attributes.
pub properties: Option<Vec<GoogleDatastoreAdminV1IndexedProperty>>,
/// Output only. The state of the index.
pub state: Option<String>,
}
impl client::RequestValue for GoogleDatastoreAdminV1Index {}
impl client::ResponseResult for GoogleDatastoreAdminV1Index {}
/// A property of an index.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleDatastoreAdminV1IndexedProperty {
/// Required. The indexed property's direction. Must not be DIRECTION_UNSPECIFIED.
pub direction: Option<String>,
/// Required. The property name to index.
pub name: Option<String>,
}
impl client::Part for GoogleDatastoreAdminV1IndexedProperty {}
/// The response for google.datastore.admin.v1.DatastoreAdmin.ListIndexes.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [indexes list projects](ProjectIndexeListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleDatastoreAdminV1ListIndexesResponse {
/// The indexes.
pub indexes: Option<Vec<GoogleDatastoreAdminV1Index>>,
/// The standard List next-page token.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
}
impl client::ResponseResult for GoogleDatastoreAdminV1ListIndexesResponse {}
/// The response message for Operations.ListOperations.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [operations list projects](ProjectOperationListCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleLongrunningListOperationsResponse {
/// The standard List next-page token.
#[serde(rename="nextPageToken")]
pub next_page_token: Option<String>,
/// A list of operations that matches the specified filter in the request.
pub operations: Option<Vec<GoogleLongrunningOperation>>,
}
impl client::ResponseResult for GoogleLongrunningListOperationsResponse {}
/// This resource represents a long-running operation that is the result of a network API call.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [indexes create projects](ProjectIndexeCreateCall) (response)
/// * [indexes delete projects](ProjectIndexeDeleteCall) (response)
/// * [operations get projects](ProjectOperationGetCall) (response)
/// * [export projects](ProjectExportCall) (response)
/// * [import projects](ProjectImportCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GoogleLongrunningOperation {
/// If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
pub done: Option<bool>,
/// The error result of the operation in case of failure or cancellation.
pub error: Option<Status>,
/// Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
pub metadata: Option<HashMap<String, serde_json::Value>>,
/// The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
pub name: Option<String>,
/// The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
pub response: Option<HashMap<String, serde_json::Value>>,
}
impl client::ResponseResult for GoogleLongrunningOperation {}
/// A [GQL query](https://cloud.google.com/datastore/docs/apis/gql/gql_reference).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GqlQuery {
/// When false, the query string must not contain any literals and instead must bind all values. For example, `SELECT * FROM Kind WHERE a = 'string literal'` is not allowed, while `SELECT * FROM Kind WHERE a = @value` is.
#[serde(rename="allowLiterals")]
pub allow_literals: Option<bool>,
/// For each non-reserved named binding site in the query string, there must be a named parameter with that name, but not necessarily the inverse. Key must match regex `A-Za-z_$*`, must not match regex `__.*__`, and must not be `""`.
#[serde(rename="namedBindings")]
pub named_bindings: Option<HashMap<String, GqlQueryParameter>>,
/// Numbered binding site @1 references the first numbered parameter, effectively using 1-based indexing, rather than the usual 0. For each binding site numbered i in `query_string`, there must be an i-th numbered parameter. The inverse must also be true.
#[serde(rename="positionalBindings")]
pub positional_bindings: Option<Vec<GqlQueryParameter>>,
/// A string of the format described [here](https://cloud.google.com/datastore/docs/apis/gql/gql_reference).
#[serde(rename="queryString")]
pub query_string: Option<String>,
}
impl client::Part for GqlQuery {}
/// A binding parameter for a GQL query.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct GqlQueryParameter {
/// A query cursor. Query cursors are returned in query result batches.
pub cursor: Option<String>,
/// A value parameter.
pub value: Option<Value>,
}
impl client::Part for GqlQueryParameter {}
/// A unique identifier for an entity. If a key's partition ID or any of its path kinds or names are reserved/read-only, the key is reserved/read-only. A reserved/read-only key is forbidden in certain documented contexts.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Key {
/// Entities are partitioned into subsets, currently identified by a project ID and namespace ID. Queries are scoped to a single partition.
#[serde(rename="partitionId")]
pub partition_id: Option<PartitionId>,
/// The entity path. An entity path consists of one or more elements composed of a kind and a string or numerical identifier, which identify entities. The first element identifies a _root entity_, the second element identifies a _child_ of the root entity, the third element identifies a child of the second entity, and so forth. The entities identified by all prefixes of the path are called the element's _ancestors_. An entity path is always fully complete: *all* of the entity's ancestors are required to be in the path along with the entity identifier itself. The only exception is that in some documented cases, the identifier in the last path element (for the entity) itself may be omitted. For example, the last path element of the key of `Mutation.insert` may have no identifier. A path can never be empty, and a path can have at most 100 elements.
pub path: Option<Vec<PathElement>>,
}
impl client::Part for Key {}
/// A representation of a kind.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct KindExpression {
/// The name of the kind.
pub name: Option<String>,
}
impl client::Part for KindExpression {}
/// An object that represents a latitude/longitude pair. This is expressed as a pair of doubles to represent degrees latitude and degrees longitude. Unless specified otherwise, this object must conform to the WGS84 standard. Values must be within normalized ranges.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct LatLng {
/// The latitude in degrees. It must be in the range [-90.0, +90.0].
pub latitude: Option<f64>,
/// The longitude in degrees. It must be in the range [-180.0, +180.0].
pub longitude: Option<f64>,
}
impl client::Part for LatLng {}
/// The request for Datastore.Lookup.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [lookup projects](ProjectLookupCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct LookupRequest {
/// Required. Keys of entities to look up.
pub keys: Option<Vec<Key>>,
/// The options for this lookup request.
#[serde(rename="readOptions")]
pub read_options: Option<ReadOptions>,
}
impl client::RequestValue for LookupRequest {}
/// The response for Datastore.Lookup.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [lookup projects](ProjectLookupCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct LookupResponse {
/// A list of keys that were not looked up due to resource constraints. The order of results in this field is undefined and has no relation to the order of the keys in the input.
pub deferred: Option<Vec<Key>>,
/// Entities found as `ResultType.FULL` entities. The order of results in this field is undefined and has no relation to the order of the keys in the input.
pub found: Option<Vec<EntityResult>>,
/// Entities not found as `ResultType.KEY_ONLY` entities. The order of results in this field is undefined and has no relation to the order of the keys in the input.
pub missing: Option<Vec<EntityResult>>,
}
impl client::ResponseResult for LookupResponse {}
/// A mutation to apply to an entity.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Mutation {
/// The version of the entity that this mutation is being applied to. If this does not match the current version on the server, the mutation conflicts.
#[serde(rename="baseVersion")]
pub base_version: Option<String>,
/// The key of the entity to delete. The entity may or may not already exist. Must have a complete key path and must not be reserved/read-only.
pub delete: Option<Key>,
/// The entity to insert. The entity must not already exist. The entity key's final path element may be incomplete.
pub insert: Option<Entity>,
/// The entity to update. The entity must already exist. Must have a complete key path.
pub update: Option<Entity>,
/// The entity to upsert. The entity may or may not already exist. The entity key's final path element may be incomplete.
pub upsert: Option<Entity>,
}
impl client::Part for Mutation {}
/// The result of applying a mutation.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct MutationResult {
/// Whether a conflict was detected for this mutation. Always false when a conflict detection strategy field is not set in the mutation.
#[serde(rename="conflictDetected")]
pub conflict_detected: Option<bool>,
/// The automatically allocated key. Set only when the mutation allocated a key.
pub key: Option<Key>,
/// The version of the entity on the server after processing the mutation. If the mutation doesn't change anything on the server, then the version will be the version of the current entity or, if no entity is present, a version that is strictly greater than the version of any previous entity and less than the version of any possible future entity.
pub version: Option<String>,
}
impl client::Part for MutationResult {}
/// A partition ID identifies a grouping of entities. The grouping is always by project and namespace, however the namespace ID may be empty. A partition ID contains several dimensions: project ID and namespace ID. Partition dimensions: - May be `""`. - Must be valid UTF-8 bytes. - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` If the value of any dimension matches regex `__.*__`, the partition is reserved/read-only. A reserved/read-only partition ID is forbidden in certain documented contexts. Foreign partition IDs (in which the project ID does not match the context project ID ) are discouraged. Reads and writes of foreign partition IDs may fail if the project is not in an active state.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PartitionId {
/// If not empty, the ID of the namespace to which the entities belong.
#[serde(rename="namespaceId")]
pub namespace_id: Option<String>,
/// The ID of the project to which the entities belong.
#[serde(rename="projectId")]
pub project_id: Option<String>,
}
impl client::Part for PartitionId {}
/// A (kind, ID/name) pair used to construct a key path. If either name or ID is set, the element is complete. If neither is set, the element is incomplete.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PathElement {
/// The auto-allocated ID of the entity. Never equal to zero. Values less than zero are discouraged and may not be supported in the future.
pub id: Option<String>,
/// The kind of the entity. A kind matching regex `__.*__` is reserved/read-only. A kind must not contain more than 1500 bytes when UTF-8 encoded. Cannot be `""`.
pub kind: Option<String>,
/// The name of the entity. A name matching regex `__.*__` is reserved/read-only. A name must not be more than 1500 bytes when UTF-8 encoded. Cannot be `""`.
pub name: Option<String>,
}
impl client::Part for PathElement {}
/// A representation of a property in a projection.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Projection {
/// The property to project.
pub property: Option<PropertyReference>,
}
impl client::Part for Projection {}
/// A filter on a specific property.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PropertyFilter {
/// The operator to filter by.
pub op: Option<String>,
/// The property to filter by.
pub property: Option<PropertyReference>,
/// The value to compare the property to.
pub value: Option<Value>,
}
impl client::Part for PropertyFilter {}
/// The desired order for a specific property.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PropertyOrder {
/// The direction to order by. Defaults to `ASCENDING`.
pub direction: Option<String>,
/// The property to order by.
pub property: Option<PropertyReference>,
}
impl client::Part for PropertyOrder {}
/// A reference to a property relative to the kind expressions.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct PropertyReference {
/// The name of the property. If name includes "."s, it may be interpreted as a property name path.
pub name: Option<String>,
}
impl client::Part for PropertyReference {}
/// A query for entities.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Query {
/// The properties to make distinct. The query results will contain the first result for each distinct combination of values for the given properties (if empty, all results are returned).
#[serde(rename="distinctOn")]
pub distinct_on: Option<Vec<PropertyReference>>,
/// An ending point for the query results. Query cursors are returned in query result batches and [can only be used to limit the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets).
#[serde(rename="endCursor")]
pub end_cursor: Option<String>,
/// The filter to apply.
pub filter: Option<Filter>,
/// The kinds to query (if empty, returns entities of all kinds). Currently at most 1 kind may be specified.
pub kind: Option<Vec<KindExpression>>,
/// The maximum number of results to return. Applies after all other constraints. Optional. Unspecified is interpreted as no limit. Must be >= 0 if specified.
pub limit: Option<i32>,
/// The number of results to skip. Applies before limit, but after all other constraints. Optional. Must be >= 0 if specified.
pub offset: Option<i32>,
/// The order to apply to the query results (if empty, order is unspecified).
pub order: Option<Vec<PropertyOrder>>,
/// The projection to return. Defaults to returning all properties.
pub projection: Option<Vec<Projection>>,
/// A starting point for the query results. Query cursors are returned in query result batches and [can only be used to continue the same query](https://cloud.google.com/datastore/docs/concepts/queries#cursors_limits_and_offsets).
#[serde(rename="startCursor")]
pub start_cursor: Option<String>,
}
impl client::Part for Query {}
/// A batch of results produced by a query.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct QueryResultBatch {
/// A cursor that points to the position after the last result in the batch.
#[serde(rename="endCursor")]
pub end_cursor: Option<String>,
/// The result type for every entity in `entity_results`.
#[serde(rename="entityResultType")]
pub entity_result_type: Option<String>,
/// The results for this batch.
#[serde(rename="entityResults")]
pub entity_results: Option<Vec<EntityResult>>,
/// The state of the query after the current batch.
#[serde(rename="moreResults")]
pub more_results: Option<String>,
/// A cursor that points to the position after the last skipped result. Will be set when `skipped_results` != 0.
#[serde(rename="skippedCursor")]
pub skipped_cursor: Option<String>,
/// The number of results skipped, typically because of an offset.
#[serde(rename="skippedResults")]
pub skipped_results: Option<i32>,
/// The version number of the snapshot this batch was returned from. This applies to the range of results from the query's `start_cursor` (or the beginning of the query if no cursor was given) to this batch's `end_cursor` (not the query's `end_cursor`). In a single transaction, subsequent query result batches for the same query can have a greater snapshot version number. Each batch's snapshot version is valid for all preceding batches. The value will be zero for eventually consistent queries.
#[serde(rename="snapshotVersion")]
pub snapshot_version: Option<String>,
}
impl client::Part for QueryResultBatch {}
/// Options specific to read-only transactions.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReadOnly { _never_set: Option<bool> }
impl client::Part for ReadOnly {}
/// The options shared by read requests.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReadOptions {
/// The non-transactional read consistency to use. Cannot be set to `STRONG` for global queries.
#[serde(rename="readConsistency")]
pub read_consistency: Option<String>,
/// The identifier of the transaction in which to read. A transaction identifier is returned by a call to Datastore.BeginTransaction.
pub transaction: Option<String>,
}
impl client::Part for ReadOptions {}
/// Options specific to read / write transactions.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReadWrite {
/// The transaction identifier of the transaction being retried.
#[serde(rename="previousTransaction")]
pub previous_transaction: Option<String>,
}
impl client::Part for ReadWrite {}
/// The request for Datastore.ReserveIds.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [reserve ids projects](ProjectReserveIdCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReserveIdsRequest {
/// If not empty, the ID of the database against which to make the request.
#[serde(rename="databaseId")]
pub database_id: Option<String>,
/// Required. A list of keys with complete key paths whose numeric IDs should not be auto-allocated.
pub keys: Option<Vec<Key>>,
}
impl client::RequestValue for ReserveIdsRequest {}
/// The response for Datastore.ReserveIds.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [reserve ids projects](ProjectReserveIdCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ReserveIdsResponse { _never_set: Option<bool> }
impl client::ResponseResult for ReserveIdsResponse {}
/// The request for Datastore.Rollback.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [rollback projects](ProjectRollbackCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RollbackRequest {
/// Required. The transaction identifier, returned by a call to Datastore.BeginTransaction.
pub transaction: Option<String>,
}
impl client::RequestValue for RollbackRequest {}
/// The response for Datastore.Rollback. (an empty message).
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [rollback projects](ProjectRollbackCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RollbackResponse { _never_set: Option<bool> }
impl client::ResponseResult for RollbackResponse {}
/// The request for Datastore.RunQuery.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [run query projects](ProjectRunQueryCall) (request)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RunQueryRequest {
/// The GQL query to run.
#[serde(rename="gqlQuery")]
pub gql_query: Option<GqlQuery>,
/// Entities are partitioned into subsets, identified by a partition ID. Queries are scoped to a single partition. This partition ID is normalized with the standard default context partition ID.
#[serde(rename="partitionId")]
pub partition_id: Option<PartitionId>,
/// The query to run.
pub query: Option<Query>,
/// The options for this query.
#[serde(rename="readOptions")]
pub read_options: Option<ReadOptions>,
}
impl client::RequestValue for RunQueryRequest {}
/// The response for Datastore.RunQuery.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [run query projects](ProjectRunQueryCall) (response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct RunQueryResponse {
/// A batch of query results (always present).
pub batch: Option<QueryResultBatch>,
/// The parsed form of the `GqlQuery` from the request, if it was set.
pub query: Option<Query>,
}
impl client::ResponseResult for RunQueryResponse {}
/// The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Status {
/// The status code, which should be an enum value of google.rpc.Code.
pub code: Option<i32>,
/// A list of messages that carry the error details. There is a common set of message types for APIs to use.
pub details: Option<Vec<HashMap<String, serde_json::Value>>>,
/// A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
pub message: Option<String>,
}
impl client::Part for Status {}
/// Options for beginning a new transaction. Transactions can be created explicitly with calls to Datastore.BeginTransaction or implicitly by setting ReadOptions.new_transaction in read requests.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TransactionOptions {
/// The transaction should only allow reads.
#[serde(rename="readOnly")]
pub read_only: Option<ReadOnly>,
/// The transaction should allow both reads and writes.
#[serde(rename="readWrite")]
pub read_write: Option<ReadWrite>,
}
impl client::Part for TransactionOptions {}
/// A message that can hold any of the supported value types and associated metadata.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Value {
/// An array value. Cannot contain another array value. A `Value` instance that sets field `array_value` must not set fields `meaning` or `exclude_from_indexes`.
#[serde(rename="arrayValue")]
pub array_value: Option<ArrayValue>,
/// A blob value. May have at most 1,000,000 bytes. When `exclude_from_indexes` is false, may have at most 1500 bytes. In JSON requests, must be base64-encoded.
#[serde(rename="blobValue")]
pub blob_value: Option<String>,
/// A boolean value.
#[serde(rename="booleanValue")]
pub boolean_value: Option<bool>,
/// A double value.
#[serde(rename="doubleValue")]
pub double_value: Option<f64>,
/// An entity value. - May have no key. - May have a key with an incomplete key path. - May have a reserved/read-only key.
#[serde(rename="entityValue")]
pub entity_value: Option<Entity>,
/// If the value should be excluded from all indexes including those defined explicitly.
#[serde(rename="excludeFromIndexes")]
pub exclude_from_indexes: Option<bool>,
/// A geo point value representing a point on the surface of Earth.
#[serde(rename="geoPointValue")]
pub geo_point_value: Option<LatLng>,
/// An integer value.
#[serde(rename="integerValue")]
pub integer_value: Option<String>,
/// A key value.
#[serde(rename="keyValue")]
pub key_value: Option<Key>,
/// The `meaning` field should only be populated for backwards compatibility.
pub meaning: Option<i32>,
/// A null value.
#[serde(rename="nullValue")]
pub null_value: Option<String>,
/// A UTF-8 encoded string value. When `exclude_from_indexes` is false (it is indexed) , may have at most 1500 bytes. Otherwise, may be set to at most 1,000,000 bytes.
#[serde(rename="stringValue")]
pub string_value: Option<String>,
/// A timestamp value. When stored in the Datastore, precise only to microseconds; any additional precision is rounded down.
#[serde(rename="timestampValue")]
pub timestamp_value: Option<String>,
}
impl client::Part for Value {}
// ###################
// MethodBuilders ###
// #################
/// A builder providing access to all methods supported on *project* resources.
/// It is not used directly, but through the `Datastore` hub.
///
/// # Example
///
/// Instantiate a resource builder
///
/// ```test_harness,no_run
/// extern crate hyper;
/// extern crate hyper_rustls;
/// extern crate google_datastore1 as datastore1;
///
/// # async fn dox() {
/// use std::default::Default;
/// use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// let secret: oauth2::ApplicationSecret = Default::default();
/// let auth = oauth2::InstalledFlowAuthenticator::builder(
/// secret,
/// oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// ).build().await.unwrap();
/// let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders*
/// // like `allocate_ids(...)`, `begin_transaction(...)`, `commit(...)`, `export(...)`, `import(...)`, `indexes_create(...)`, `indexes_delete(...)`, `indexes_get(...)`, `indexes_list(...)`, `lookup(...)`, `operations_cancel(...)`, `operations_delete(...)`, `operations_get(...)`, `operations_list(...)`, `reserve_ids(...)`, `rollback(...)` and `run_query(...)`
/// // to build up your call.
/// let rb = hub.projects();
/// # }
/// ```
pub struct ProjectMethods<'a>
where {
hub: &'a Datastore<>,
}
impl<'a> client::MethodsBuilder for ProjectMethods<'a> {}
impl<'a> ProjectMethods<'a> {
/// Create a builder to help you perform the following task:
///
/// Creates the specified index. A newly created index's initial state is `CREATING`. On completion of the returned google.longrunning.Operation, the state will be `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status. During index creation, the process could result in an error, in which case the index will move to the `ERROR` state. The process can be recovered by fixing the data that caused the error, removing the index with delete, then re-creating the index with create. Indexes with a single property cannot be created.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Project ID against which to make the request.
pub fn indexes_create(&self, request: GoogleDatastoreAdminV1Index, project_id: &str) -> ProjectIndexeCreateCall<'a> {
ProjectIndexeCreateCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state. On successful execution of the request, the index will be in a `DELETING` state. And on completion of the returned google.longrunning.Operation, the index will be removed. During index deletion, the process could result in an error, in which case the index will move to the `ERROR` state. The process can be recovered by fixing the data that caused the error, followed by calling delete again.
///
/// # Arguments
///
/// * `projectId` - Project ID against which to make the request.
/// * `indexId` - The resource ID of the index to delete.
pub fn indexes_delete(&self, project_id: &str, index_id: &str) -> ProjectIndexeDeleteCall<'a> {
ProjectIndexeDeleteCall {
hub: self.hub,
_project_id: project_id.to_string(),
_index_id: index_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets an index.
///
/// # Arguments
///
/// * `projectId` - Project ID against which to make the request.
/// * `indexId` - The resource ID of the index to get.
pub fn indexes_get(&self, project_id: &str, index_id: &str) -> ProjectIndexeGetCall<'a> {
ProjectIndexeGetCall {
hub: self.hub,
_project_id: project_id.to_string(),
_index_id: index_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists the indexes that match the specified filters. Datastore uses an eventually consistent query to fetch the list of indexes and may occasionally return stale results.
///
/// # Arguments
///
/// * `projectId` - Project ID against which to make the request.
pub fn indexes_list(&self, project_id: &str) -> ProjectIndexeListCall<'a> {
ProjectIndexeListCall {
hub: self.hub,
_project_id: project_id.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_filter: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
///
/// # Arguments
///
/// * `name` - The name of the operation resource to be cancelled.
pub fn operations_cancel(&self, name: &str) -> ProjectOperationCancelCall<'a> {
ProjectOperationCancelCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
///
/// # Arguments
///
/// * `name` - The name of the operation resource to be deleted.
pub fn operations_delete(&self, name: &str) -> ProjectOperationDeleteCall<'a> {
ProjectOperationDeleteCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
///
/// # Arguments
///
/// * `name` - The name of the operation resource.
pub fn operations_get(&self, name: &str) -> ProjectOperationGetCall<'a> {
ProjectOperationGetCall {
hub: self.hub,
_name: name.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
///
/// # Arguments
///
/// * `name` - The name of the operation's parent resource.
pub fn operations_list(&self, name: &str) -> ProjectOperationListCall<'a> {
ProjectOperationListCall {
hub: self.hub,
_name: name.to_string(),
_page_token: Default::default(),
_page_size: Default::default(),
_filter: Default::default(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Allocates IDs for the given keys, which is useful for referencing an entity before it is inserted.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn allocate_ids(&self, request: AllocateIdsRequest, project_id: &str) -> ProjectAllocateIdCall<'a> {
ProjectAllocateIdCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Begins a new transaction.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn begin_transaction(&self, request: BeginTransactionRequest, project_id: &str) -> ProjectBeginTransactionCall<'a> {
ProjectBeginTransactionCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Commits a transaction, optionally creating, deleting or modifying some entities.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn commit(&self, request: CommitRequest, project_id: &str) -> ProjectCommitCall<'a> {
ProjectCommitCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the export. The export occurs in the background and its progress can be monitored and managed via the Operation resource that is created. The output of an export may only be used once the associated operation is done. If an export operation is cancelled before completion it may leave partial data behind in Google Cloud Storage.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. Project ID against which to make the request.
pub fn export(&self, request: GoogleDatastoreAdminV1ExportEntitiesRequest, project_id: &str) -> ProjectExportCall<'a> {
ProjectExportCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Imports entities into Google Cloud Datastore. Existing entities with the same key are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportEntities operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Datastore.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. Project ID against which to make the request.
pub fn import(&self, request: GoogleDatastoreAdminV1ImportEntitiesRequest, project_id: &str) -> ProjectImportCall<'a> {
ProjectImportCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Looks up entities by key.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn lookup(&self, request: LookupRequest, project_id: &str) -> ProjectLookupCall<'a> {
ProjectLookupCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Prevents the supplied keys' IDs from being auto-allocated by Cloud Datastore.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn reserve_ids(&self, request: ReserveIdsRequest, project_id: &str) -> ProjectReserveIdCall<'a> {
ProjectReserveIdCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Rolls back a transaction.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn rollback(&self, request: RollbackRequest, project_id: &str) -> ProjectRollbackCall<'a> {
ProjectRollbackCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
/// Create a builder to help you perform the following task:
///
/// Queries for entities.
///
/// # Arguments
///
/// * `request` - No description provided.
/// * `projectId` - Required. The ID of the project against which to make the request.
pub fn run_query(&self, request: RunQueryRequest, project_id: &str) -> ProjectRunQueryCall<'a> {
ProjectRunQueryCall {
hub: self.hub,
_request: request,
_project_id: project_id.to_string(),
_delegate: Default::default(),
_additional_params: Default::default(),
_scopes: Default::default(),
}
}
}
// ###################
// CallBuilders ###
// #################
/// Creates the specified index. A newly created index's initial state is `CREATING`. On completion of the returned google.longrunning.Operation, the state will be `READY`. If the index already exists, the call will return an `ALREADY_EXISTS` status. During index creation, the process could result in an error, in which case the index will move to the `ERROR` state. The process can be recovered by fixing the data that caused the error, removing the index with delete, then re-creating the index with create. Indexes with a single property cannot be created.
///
/// A builder for the *indexes.create* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::GoogleDatastoreAdminV1Index;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = GoogleDatastoreAdminV1Index::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().indexes_create(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectIndexeCreateCall<'a>
where {
hub: &'a Datastore<>,
_request: GoogleDatastoreAdminV1Index,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectIndexeCreateCall<'a> {}
impl<'a> ProjectIndexeCreateCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleLongrunningOperation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.indexes.create",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/indexes";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: GoogleDatastoreAdminV1Index) -> ProjectIndexeCreateCall<'a> {
self._request = new_value;
self
}
/// Project ID against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectIndexeCreateCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectIndexeCreateCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectIndexeCreateCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectIndexeCreateCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes an existing index. An index can only be deleted if it is in a `READY` or `ERROR` state. On successful execution of the request, the index will be in a `DELETING` state. And on completion of the returned google.longrunning.Operation, the index will be removed. During index deletion, the process could result in an error, in which case the index will move to the `ERROR` state. The process can be recovered by fixing the data that caused the error, followed by calling delete again.
///
/// A builder for the *indexes.delete* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().indexes_delete("projectId", "indexId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectIndexeDeleteCall<'a>
where {
hub: &'a Datastore<>,
_project_id: String,
_index_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectIndexeDeleteCall<'a> {}
impl<'a> ProjectIndexeDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleLongrunningOperation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.indexes.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
params.push(("indexId", self._index_id.to_string()));
for &field in ["alt", "projectId", "indexId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/indexes/{indexId}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId"), ("{indexId}", "indexId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2);
for param_name in ["indexId", "projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Project ID against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectIndexeDeleteCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The resource ID of the index to delete.
///
/// Sets the *index id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn index_id(mut self, new_value: &str) -> ProjectIndexeDeleteCall<'a> {
self._index_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectIndexeDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectIndexeDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectIndexeDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets an index.
///
/// A builder for the *indexes.get* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().indexes_get("projectId", "indexId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectIndexeGetCall<'a>
where {
hub: &'a Datastore<>,
_project_id: String,
_index_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectIndexeGetCall<'a> {}
impl<'a> ProjectIndexeGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleDatastoreAdminV1Index)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.indexes.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
params.push(("indexId", self._index_id.to_string()));
for &field in ["alt", "projectId", "indexId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/indexes/{indexId}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId"), ("{indexId}", "indexId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2);
for param_name in ["indexId", "projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Project ID against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectIndexeGetCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The resource ID of the index to get.
///
/// Sets the *index id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn index_id(mut self, new_value: &str) -> ProjectIndexeGetCall<'a> {
self._index_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectIndexeGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectIndexeGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectIndexeGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists the indexes that match the specified filters. Datastore uses an eventually consistent query to fetch the list of indexes and may occasionally return stale results.
///
/// A builder for the *indexes.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().indexes_list("projectId")
/// .page_token("takimata")
/// .page_size(-52)
/// .filter("duo")
/// .doit().await;
/// # }
/// ```
pub struct ProjectIndexeListCall<'a>
where {
hub: &'a Datastore<>,
_project_id: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_filter: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectIndexeListCall<'a> {}
impl<'a> ProjectIndexeListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleDatastoreAdminV1ListIndexesResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.indexes.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
if let Some(value) = self._filter {
params.push(("filter", value.to_string()));
}
for &field in ["alt", "projectId", "pageToken", "pageSize", "filter"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}/indexes";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// Project ID against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectIndexeListCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The next_page_token value returned from a previous List request, if any.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectIndexeListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The maximum number of items to return. If zero, then all results will be returned.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectIndexeListCall<'a> {
self._page_size = Some(new_value);
self
}
///
/// Sets the *filter* query property to the given value.
pub fn filter(mut self, new_value: &str) -> ProjectIndexeListCall<'a> {
self._filter = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectIndexeListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectIndexeListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectIndexeListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
///
/// A builder for the *operations.cancel* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().operations_cancel("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectOperationCancelCall<'a>
where {
hub: &'a Datastore<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectOperationCancelCall<'a> {}
impl<'a> ProjectOperationCancelCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.operations.cancel",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}:cancel";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The name of the operation resource to be cancelled.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectOperationCancelCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectOperationCancelCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectOperationCancelCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectOperationCancelCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
///
/// A builder for the *operations.delete* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().operations_delete("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectOperationDeleteCall<'a>
where {
hub: &'a Datastore<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectOperationDeleteCall<'a> {}
impl<'a> ProjectOperationDeleteCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.operations.delete",
http_method: hyper::Method::DELETE });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The name of the operation resource to be deleted.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectOperationDeleteCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectOperationDeleteCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectOperationDeleteCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectOperationDeleteCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
///
/// A builder for the *operations.get* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().operations_get("name")
/// .doit().await;
/// # }
/// ```
pub struct ProjectOperationGetCall<'a>
where {
hub: &'a Datastore<>,
_name: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectOperationGetCall<'a> {}
impl<'a> ProjectOperationGetCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleLongrunningOperation)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.operations.get",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len());
params.push(("name", self._name.to_string()));
for &field in ["alt", "name"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The name of the operation resource.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectOperationGetCall<'a> {
self._name = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectOperationGetCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectOperationGetCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectOperationGetCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
///
/// A builder for the *operations.list* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().operations_list("name")
/// .page_token("eos")
/// .page_size(-4)
/// .filter("ea")
/// .doit().await;
/// # }
/// ```
pub struct ProjectOperationListCall<'a>
where {
hub: &'a Datastore<>,
_name: String,
_page_token: Option<String>,
_page_size: Option<i32>,
_filter: Option<String>,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectOperationListCall<'a> {}
impl<'a> ProjectOperationListCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleLongrunningListOperationsResponse)> {
use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET};
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.operations.list",
http_method: hyper::Method::GET });
let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len());
params.push(("name", self._name.to_string()));
if let Some(value) = self._page_token {
params.push(("pageToken", value.to_string()));
}
if let Some(value) = self._page_size {
params.push(("pageSize", value.to_string()));
}
if let Some(value) = self._filter {
params.push(("filter", value.to_string()));
}
for &field in ["alt", "name", "pageToken", "pageSize", "filter"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/{+name}/operations";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{+name}", "name")].iter() {
let mut replace_with = String::new();
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = value.to_string();
break;
}
}
if find_this.as_bytes()[1] == '+' as u8 {
replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string();
}
url = url.replace(find_this, &replace_with);
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["name"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.body(hyper::body::Body::empty());
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
/// The name of the operation's parent resource.
///
/// Sets the *name* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn name(mut self, new_value: &str) -> ProjectOperationListCall<'a> {
self._name = new_value.to_string();
self
}
/// The standard list page token.
///
/// Sets the *page token* query property to the given value.
pub fn page_token(mut self, new_value: &str) -> ProjectOperationListCall<'a> {
self._page_token = Some(new_value.to_string());
self
}
/// The standard list page size.
///
/// Sets the *page size* query property to the given value.
pub fn page_size(mut self, new_value: i32) -> ProjectOperationListCall<'a> {
self._page_size = Some(new_value);
self
}
/// The standard list filter.
///
/// Sets the *filter* query property to the given value.
pub fn filter(mut self, new_value: &str) -> ProjectOperationListCall<'a> {
self._filter = Some(new_value.to_string());
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectOperationListCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectOperationListCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectOperationListCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Allocates IDs for the given keys, which is useful for referencing an entity before it is inserted.
///
/// A builder for the *allocateIds* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::AllocateIdsRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = AllocateIdsRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().allocate_ids(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectAllocateIdCall<'a>
where {
hub: &'a Datastore<>,
_request: AllocateIdsRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectAllocateIdCall<'a> {}
impl<'a> ProjectAllocateIdCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, AllocateIdsResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.allocateIds",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:allocateIds";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: AllocateIdsRequest) -> ProjectAllocateIdCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectAllocateIdCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectAllocateIdCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectAllocateIdCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectAllocateIdCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Begins a new transaction.
///
/// A builder for the *beginTransaction* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::BeginTransactionRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = BeginTransactionRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().begin_transaction(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectBeginTransactionCall<'a>
where {
hub: &'a Datastore<>,
_request: BeginTransactionRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectBeginTransactionCall<'a> {}
impl<'a> ProjectBeginTransactionCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, BeginTransactionResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.beginTransaction",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:beginTransaction";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: BeginTransactionRequest) -> ProjectBeginTransactionCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectBeginTransactionCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectBeginTransactionCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectBeginTransactionCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectBeginTransactionCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Commits a transaction, optionally creating, deleting or modifying some entities.
///
/// A builder for the *commit* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::CommitRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = CommitRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().commit(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectCommitCall<'a>
where {
hub: &'a Datastore<>,
_request: CommitRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectCommitCall<'a> {}
impl<'a> ProjectCommitCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, CommitResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.commit",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:commit";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: CommitRequest) -> ProjectCommitCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectCommitCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectCommitCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectCommitCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectCommitCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Exports a copy of all or a subset of entities from Google Cloud Datastore to another storage system, such as Google Cloud Storage. Recent updates to entities may not be reflected in the export. The export occurs in the background and its progress can be monitored and managed via the Operation resource that is created. The output of an export may only be used once the associated operation is done. If an export operation is cancelled before completion it may leave partial data behind in Google Cloud Storage.
///
/// A builder for the *export* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::GoogleDatastoreAdminV1ExportEntitiesRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = GoogleDatastoreAdminV1ExportEntitiesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().export(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectExportCall<'a>
where {
hub: &'a Datastore<>,
_request: GoogleDatastoreAdminV1ExportEntitiesRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectExportCall<'a> {}
impl<'a> ProjectExportCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleLongrunningOperation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.export",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:export";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: GoogleDatastoreAdminV1ExportEntitiesRequest) -> ProjectExportCall<'a> {
self._request = new_value;
self
}
/// Required. Project ID against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectExportCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectExportCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectExportCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectExportCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Imports entities into Google Cloud Datastore. Existing entities with the same key are overwritten. The import occurs in the background and its progress can be monitored and managed via the Operation resource that is created. If an ImportEntities operation is cancelled, it is possible that a subset of the data has already been imported to Cloud Datastore.
///
/// A builder for the *import* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::GoogleDatastoreAdminV1ImportEntitiesRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = GoogleDatastoreAdminV1ImportEntitiesRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().import(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectImportCall<'a>
where {
hub: &'a Datastore<>,
_request: GoogleDatastoreAdminV1ImportEntitiesRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectImportCall<'a> {}
impl<'a> ProjectImportCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, GoogleLongrunningOperation)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.import",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:import";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: GoogleDatastoreAdminV1ImportEntitiesRequest) -> ProjectImportCall<'a> {
self._request = new_value;
self
}
/// Required. Project ID against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectImportCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectImportCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectImportCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectImportCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Looks up entities by key.
///
/// A builder for the *lookup* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::LookupRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = LookupRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().lookup(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectLookupCall<'a>
where {
hub: &'a Datastore<>,
_request: LookupRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectLookupCall<'a> {}
impl<'a> ProjectLookupCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, LookupResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.lookup",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:lookup";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: LookupRequest) -> ProjectLookupCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectLookupCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLookupCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectLookupCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLookupCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Prevents the supplied keys' IDs from being auto-allocated by Cloud Datastore.
///
/// A builder for the *reserveIds* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::ReserveIdsRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = ReserveIdsRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().reserve_ids(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectReserveIdCall<'a>
where {
hub: &'a Datastore<>,
_request: ReserveIdsRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectReserveIdCall<'a> {}
impl<'a> ProjectReserveIdCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ReserveIdsResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.reserveIds",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:reserveIds";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: ReserveIdsRequest) -> ProjectReserveIdCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectReserveIdCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectReserveIdCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectReserveIdCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectReserveIdCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Rolls back a transaction.
///
/// A builder for the *rollback* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::RollbackRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = RollbackRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().rollback(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectRollbackCall<'a>
where {
hub: &'a Datastore<>,
_request: RollbackRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectRollbackCall<'a> {}
impl<'a> ProjectRollbackCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, RollbackResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.rollback",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:rollback";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: RollbackRequest) -> ProjectRollbackCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectRollbackCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectRollbackCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectRollbackCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRollbackCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
/// Queries for entities.
///
/// A builder for the *runQuery* method supported by a *project* resource.
/// It is not used directly, but through a `ProjectMethods` instance.
///
/// # Example
///
/// Instantiate a resource method builder
///
/// ```test_harness,no_run
/// # extern crate hyper;
/// # extern crate hyper_rustls;
/// # extern crate google_datastore1 as datastore1;
/// use datastore1::api::RunQueryRequest;
/// # async fn dox() {
/// # use std::default::Default;
/// # use datastore1::{Datastore, oauth2, hyper, hyper_rustls};
///
/// # let secret: oauth2::ApplicationSecret = Default::default();
/// # let auth = oauth2::InstalledFlowAuthenticator::builder(
/// # secret,
/// # oauth2::InstalledFlowReturnMethod::HTTPRedirect,
/// # ).build().await.unwrap();
/// # let mut hub = Datastore::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth);
/// // As the method needs a request, you would usually fill it with the desired information
/// // into the respective structure. Some of the parts shown here might not be applicable !
/// // Values shown here are possibly random and not representative !
/// let mut req = RunQueryRequest::default();
///
/// // You can configure optional parameters by calling the respective setters at will, and
/// // execute the final call using `doit()`.
/// // Values shown here are possibly random and not representative !
/// let result = hub.projects().run_query(req, "projectId")
/// .doit().await;
/// # }
/// ```
pub struct ProjectRunQueryCall<'a>
where {
hub: &'a Datastore<>,
_request: RunQueryRequest,
_project_id: String,
_delegate: Option<&'a mut dyn client::Delegate>,
_additional_params: HashMap<String, String>,
_scopes: BTreeMap<String, ()>
}
impl<'a> client::CallBuilder for ProjectRunQueryCall<'a> {}
impl<'a> ProjectRunQueryCall<'a> {
/// Perform the operation you have build so far.
pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, RunQueryResponse)> {
use std::io::{Read, Seek};
use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION};
use client::ToParts;
let mut dd = client::DefaultDelegate;
let mut dlg: &mut dyn client::Delegate = match self._delegate {
Some(d) => d,
None => &mut dd
};
dlg.begin(client::MethodInfo { id: "datastore.projects.runQuery",
http_method: hyper::Method::POST });
let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len());
params.push(("projectId", self._project_id.to_string()));
for &field in ["alt", "projectId"].iter() {
if self._additional_params.contains_key(field) {
dlg.finished(false);
return Err(client::Error::FieldClash(field));
}
}
for (name, value) in self._additional_params.iter() {
params.push((&name, value.clone()));
}
params.push(("alt", "json".to_string()));
let mut url = self.hub._base_url.clone() + "v1/projects/{projectId}:runQuery";
if self._scopes.len() == 0 {
self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ());
}
for &(find_this, param_name) in [("{projectId}", "projectId")].iter() {
let mut replace_with: Option<&str> = None;
for &(name, ref value) in params.iter() {
if name == param_name {
replace_with = Some(value);
break;
}
}
url = url.replace(find_this, replace_with.expect("to find substitution value in params"));
}
{
let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1);
for param_name in ["projectId"].iter() {
if let Some(index) = params.iter().position(|t| &t.0 == param_name) {
indices_for_removal.push(index);
}
}
for &index in indices_for_removal.iter() {
params.remove(index);
}
}
let url = url::Url::parse_with_params(&url, params).unwrap();
let mut json_mime_type: mime::Mime = "application/json".parse().unwrap();
let mut request_value_reader =
{
let mut value = json::value::to_value(&self._request).expect("serde to work");
client::remove_json_null_values(&mut value);
let mut dst = io::Cursor::new(Vec::with_capacity(128));
json::to_writer(&mut dst, &value).unwrap();
dst
};
let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap();
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
loop {
let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await {
Ok(token) => token.clone(),
Err(err) => {
match dlg.token(&err) {
Some(token) => token,
None => {
dlg.finished(false);
return Err(client::Error::MissingToken(err))
}
}
}
};
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let client = &self.hub.client;
dlg.pre_request();
let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string())
.header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str()));
let request = req_builder
.header(CONTENT_TYPE, format!("{}", json_mime_type.to_string()))
.header(CONTENT_LENGTH, request_size as u64)
.body(hyper::body::Body::from(request_value_reader.get_ref().clone()));
client.request(request.unwrap()).await
};
match req_result {
Err(err) => {
if let client::Retry::After(d) = dlg.http_error(&err) {
sleep(d);
continue;
}
dlg.finished(false);
return Err(client::Error::HttpError(err))
}
Ok(mut res) => {
if !res.status().is_success() {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
let (parts, _) = res.into_parts();
let body = hyper::Body::from(res_body_string.clone());
let restored_response = hyper::Response::from_parts(parts, body);
let server_response = json::from_str::<serde_json::Value>(&res_body_string).ok();
if let client::Retry::After(d) = dlg.http_failure(&restored_response, server_response.clone()) {
sleep(d);
continue;
}
dlg.finished(false);
return match server_response {
Some(error_value) => Err(client::Error::BadRequest(error_value)),
None => Err(client::Error::Failure(restored_response)),
}
}
let result_value = {
let res_body_string = client::get_body_as_string(res.body_mut()).await;
match json::from_str(&res_body_string) {
Ok(decoded) => (res, decoded),
Err(err) => {
dlg.response_json_decode_error(&res_body_string, &err);
return Err(client::Error::JsonDecodeError(res_body_string, err));
}
}
};
dlg.finished(true);
return Ok(result_value)
}
}
}
}
///
/// Sets the *request* property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn request(mut self, new_value: RunQueryRequest) -> ProjectRunQueryCall<'a> {
self._request = new_value;
self
}
/// Required. The ID of the project against which to make the request.
///
/// Sets the *project id* path property to the given value.
///
/// Even though the property as already been set when instantiating this call,
/// we provide this method for API completeness.
pub fn project_id(mut self, new_value: &str) -> ProjectRunQueryCall<'a> {
self._project_id = new_value.to_string();
self
}
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
/// while executing the actual API request.
///
/// It should be used to handle progress information, and to implement a certain level of resilience.
///
/// Sets the *delegate* property to the given value.
pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectRunQueryCall<'a> {
self._delegate = Some(new_value);
self
}
/// Set any additional parameter of the query string used in the request.
/// It should be used to set parameters which are not yet available through their own
/// setters.
///
/// Please note that this method must not be used to set any of the known parameters
/// which have their own setter method. If done anyway, the request will fail.
///
/// # Additional Parameters
///
/// * *$.xgafv* (query-string) - V1 error format.
/// * *access_token* (query-string) - OAuth access token.
/// * *alt* (query-string) - Data format for response.
/// * *callback* (query-string) - JSONP
/// * *fields* (query-string) - Selector specifying which fields to include in a partial response.
/// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
/// * *oauth_token* (query-string) - OAuth 2.0 token for the current user.
/// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks.
/// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
/// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart").
/// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart").
pub fn param<T>(mut self, name: T, value: T) -> ProjectRunQueryCall<'a>
where T: AsRef<str> {
self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string());
self
}
/// Identifies the authorization scope for the method you are building.
///
/// Use this method to actively specify which scope should be used, instead the default `Scope` variant
/// `Scope::CloudPlatform`.
///
/// The `scope` will be added to a set of scopes. This is important as one can maintain access
/// tokens for more than one scope.
/// If `None` is specified, then all scopes will be removed and no default scope will be used either.
/// In that case, you have to specify your API-key using the `key` parameter (see the `param()`
/// function for details).
///
/// Usually there is more than one suitable scope to authorize an operation, some of which may
/// encompass more rights than others. For example, for listing resources, a *read-only* scope will be
/// sufficient, a read-write scope will do as well.
pub fn add_scope<T, S>(mut self, scope: T) -> ProjectRunQueryCall<'a>
where T: Into<Option<S>>,
S: AsRef<str> {
match scope.into() {
Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()),
None => None,
};
self
}
}
| 45.587484 | 967 | 0.595035 |
d792f654d071695f7480881297169acfd903a261 | 1,737 | //! Advent of Code - Day 21 'Fractal Art' Solution
use bytecount;
use error::Result;
use itertools::Itertools;
use pathfinding::matrix::Matrix;
use std::collections::HashMap;
use std::io::BufRead;
use utils::PrivateTryFromUsize;
/// Find the solution for Advent of Code 2017
pub fn find_solution<T: BufRead>(reader: T, second_star: bool) -> Result<u32> {
let subst = reader
.lines()
.filter_map(|l| l.ok())
.flat_map(|line| {
let (k, v) = line.trim().split(" => ").map(matrix).next_tuple().ok_or("no tuple").expect("");
iproduct!(vec![k.clone(), k.flipped_ud(), k.flipped_lr()], 0..4).map(move |(m, i)| (m.rotated_cw(i), v.clone()))
})
.collect::<HashMap<_, _>>();
let mut sharps = (0..).scan(matrix(".#./..#/###"), |grid, _| {
let pt = 2 + (grid.rows % 2);
let b = grid.rows / pt;
let mut new_grid = Matrix::new_square(grid.rows + b, b'?');
for (c, l) in iproduct!(0..b, 0..b) {
let new = &subst[&grid.slice(l * pt..l * pt + pt, c * pt..c * pt + pt)];
new_grid.set_slice(&(l * (pt + 1), c * (pt + 1)), new);
}
*grid = new_grid;
Some(bytecount::count(grid.as_ref(), b'#'))
});
if second_star {
Ok(u32::private_try_from(sharps.nth(4).unwrap_or(0))?)
} else {
Ok(u32::private_try_from(sharps.nth(12).unwrap_or(0))?)
}
}
/// Make a matrix of bytes for a rule.
fn matrix(i: &str) -> Matrix<u8> {
Matrix::square_from_vec(i.bytes().filter(|&c| c != b'/').collect())
}
#[cfg(test)]
mod one_star {
#[test]
fn solution() {
assert!(true);
}
}
#[cfg(test)]
mod two_star {
#[test]
fn solution() {
assert!(true);
}
}
| 29.440678 | 124 | 0.54289 |
fc7a9a07720592577d5866e2d61fe722b63f3757 | 4,439 | extern crate bigdecimal;
extern crate chrono;
extern crate serde_json;
extern crate xero;
use bigdecimal::BigDecimal;
use chrono::NaiveDate;
use serde_json as json;
use xero::accounting::*;
use xero::encoding::XmlSerializable;
fn _xml(example: &'static str) -> Option<String> {
if example.chars().next() == Some('\n') {
Some(String::from(&example[1..]))
} else {
Some(String::from(example))
}
}
fn _json(example: &'static str) -> Option<String> {
Some(String::from(example))
}
#[test]
fn serialize_item_details() {
let item = ItemDetails{
unit_price: Some(BigDecimal::from(0).with_scale(4)),
account_code: None,
cogs_account_code: None,
tax_type: None
};
assert_eq!(item.to_xml().ok(), _xml("<UnitPrice>0.0000</UnitPrice>"));
assert_eq!(json::to_string(&item).ok(), _json(r#"{"UnitPrice":"0.0000"}"#));
}
#[test]
fn serialized_payment_params() {
let payment_params = PaymentParams{
invoice: None,
credit_note: None,
prepayment: None,
overpayment: None,
account: None,
date: NaiveDate::from_ymd(2009, 08, 30),
amount: BigDecimal::from(0).with_scale(4),
reference: None,
is_reconciled: None,
status: None,
payment_type: None
};
assert_eq!(payment_params.to_xml().ok(), _xml("
<Date>2009-08-30</Date>
<Amount>0.0000</Amount>"));
assert_eq!(json::to_string_pretty(&payment_params).ok(), _json(r#"{
"Date": "2009-08-30",
"Amount": "0.0000"
}"#));
}
#[test]
fn dserialized_payment() {
let data = r#"{
"PaymentID": "payment-id",
"Amount": 0.0000,
"PaymentType": "ACCRECPAYMENT",
"Status": "AUTHORISED",
"IsReconciled": true
}"#;
let payment: Payment = json::from_str(&data).unwrap();
assert_eq!(payment, Payment{
payment_id: String::from("payment-id"),
amount: BigDecimal::from(0).with_scale(4),
payment_type: PaymentType::AccountsReceivable,
status: PaymentStatus::Authorised,
is_reconciled: true,
invoice: None
});
}
#[test]
fn serialize_invoice_params() {
let mut invoice = InvoiceParams::default();
invoice.contact = ContactIdParams{contact_id: "eaa28f49-6028-4b6e-bb12-d8f6278073fc"};
assert_eq!(invoice.to_xml().ok(), _xml("
<Type>ACCREC</Type>
<Contact>
<ContactID>eaa28f49-6028-4b6e-bb12-d8f6278073fc</ContactID>
</Contact>
<LineItems />"));
assert_eq!(json::to_string_pretty(&invoice).ok(), _json(r#"{
"Type": "ACCREC",
"Contact": {
"ContactID": "eaa28f49-6028-4b6e-bb12-d8f6278073fc"
},
"LineItems": []
}"#));
let invoice = InvoiceParams{
invoice_type: InvoiceType::AccountsReceivable,
contact: ContactIdParams{contact_id: "eaa28f49-6028-4b6e-bb12-d8f6278073fc"},
date: Some(NaiveDate::from_ymd(2009, 08, 30)),
due_date: Some(NaiveDate::from_ymd(2009, 09, 20)),
invoice_number: Some("0010"),
reference: Some("Ref:ABC"),
url: Some("https://twitter.com/SuperTransparentInvoices/status/865425833631993856"),
status: Some(InvoiceStatus::Authorised),
sent_to_contact: Some(true),
line_amount_types: Some(LineAmountType::Exclusive),
line_items: vec![
LineItemParams{
item_code: None,
description: "Consulting services as agreed",
quantity: Some(5.0),
unit_amount: Some(BigDecimal::from(0).with_scale(4)),
line_amount: Some(BigDecimal::from(0).with_scale(4)),
tax_amount: None,
account_code: Some("200"),
discount_rate: None,
}
],
};
assert_eq!(invoice.to_xml().ok(), _xml("
<Type>ACCREC</Type>
<Contact>
<ContactID>eaa28f49-6028-4b6e-bb12-d8f6278073fc</ContactID>
</Contact>
<Date>2009-08-30</Date>
<DueDate>2009-09-20</DueDate>
<InvoiceNumber>0010</InvoiceNumber>
<Reference>Ref:ABC</Reference>
<Url>https://twitter.com/SuperTransparentInvoices/status/865425833631993856</Url>
<Status>AUTHORISED</Status>
<SentToContact>true</SentToContact>
<LineAmountTypes>Exclusive</LineAmountTypes>
<LineItems>
<LineItem>
<Description>Consulting services as agreed</Description>
<Quantity>5</Quantity>
<UnitAmount>0.0000</UnitAmount>
<LineAmount>0.0000</LineAmount>
<AccountCode>200</AccountCode>
</LineItem>
</LineItems>"));
}
| 29.397351 | 92 | 0.635503 |
f5c2eab94117422d7a53f438ae0a54da372eeaea | 1,229 | // Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Block oriented views onto rlp.
#[macro_use]
mod view_rlp;
mod block;
mod body;
mod header;
mod transaction;
pub use self::view_rlp::ViewRlp;
pub use self::block::BlockView;
pub use self::body::BodyView;
pub use self::header::HeaderView;
pub use self::transaction::TransactionView;
#[cfg(test)]
mod tests {
use super::HeaderView;
#[test]
#[should_panic]
fn should_include_file_line_number_in_panic_for_invalid_rlp() {
let _ = view!(HeaderView, &[]).parent_hash();
}
}
| 29.261905 | 75 | 0.746135 |
394c5d2db1a4fdcca2eb2e38615453a936364c4f | 1,284 | use reqwest::{Client, Error, Response};
/// Returns the raw transaction data (trytes) of a specific
/// transaction. These trytes can then be easily converted
/// into the actual transaction object. See utility functions
/// for more details.
pub(crate) async fn get_trytes(
client: &Client,
uri: &str,
hashes: &[String],
) -> Result<Response, Error> {
let body = json!({
"command": "getTrytes",
"hashes": hashes,
});
client
.post(uri)
.header("ContentType", "application/json")
.header("X-IOTA-API-Version", "1")
.body(body.to_string())
.send()
.await
}
/// This is a typed representation of the JSON response
#[derive(Clone, Serialize, Default, Deserialize, Debug)]
pub struct GetTrytesResponse {
/// Any errors that occurred
error: Option<String>,
/// Trytes if found
trytes: Option<Vec<String>>,
}
impl GetTrytesResponse {
/// Returns the error attribute
pub fn error(&self) -> &Option<String> {
&self.error
}
/// Returns the trytes attribute
pub fn trytes(&self) -> &Option<Vec<String>> {
&self.trytes
}
/// Takes ownership the trytes attribute
pub fn take_trytes(self) -> Option<Vec<String>> {
self.trytes
}
}
| 26.204082 | 61 | 0.619159 |
bb00fadc39c95c062ab61c9ccc3b04794797e9ef | 3,462 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::ty::{self, Ty};
use middle::ty_relate::{self, Relate, TypeRelation, RelateResult};
use util::ppaux::Repr;
/// A type "A" *matches* "B" if the fresh types in B could be
/// substituted with values so as to make it equal to A. Matching is
/// intended to be used only on freshened types, and it basically
/// indicates if the non-freshened versions of A and B could have been
/// unified.
///
/// It is only an approximation. If it yields false, unification would
/// definitely fail, but a true result doesn't mean unification would
/// succeed. This is because we don't track the "side-constraints" on
/// type variables, nor do we track if the same freshened type appears
/// more than once. To some extent these approximations could be
/// fixed, given effort.
///
/// Like subtyping, matching is really a binary relation, so the only
/// important thing about the result is Ok/Err. Also, matching never
/// affects any type variables or unification state.
pub struct Match<'a, 'tcx: 'a> {
tcx: &'a ty::ctxt<'tcx>
}
impl<'a, 'tcx> Match<'a, 'tcx> {
pub fn new(tcx: &'a ty::ctxt<'tcx>) -> Match<'a, 'tcx> {
Match { tcx: tcx }
}
}
impl<'a, 'tcx> TypeRelation<'a, 'tcx> for Match<'a, 'tcx> {
fn tag(&self) -> &'static str { "Match" }
fn tcx(&self) -> &'a ty::ctxt<'tcx> { self.tcx }
fn a_is_expected(&self) -> bool { true } // irrelevant
fn relate_with_variance<T:Relate<'a,'tcx>>(&mut self,
_: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
self.relate(a, b)
}
fn regions(&mut self, a: ty::Region, b: ty::Region) -> RelateResult<'tcx, ty::Region> {
debug!("{}.regions({}, {})",
self.tag(),
a.repr(self.tcx()),
b.repr(self.tcx()));
Ok(a)
}
fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug!("{}.tys({}, {})", self.tag(),
a.repr(self.tcx()), b.repr(self.tcx()));
if a == b { return Ok(a); }
match (&a.sty, &b.sty) {
(_, &ty::ty_infer(ty::FreshTy(_))) |
(_, &ty::ty_infer(ty::FreshIntTy(_))) => {
Ok(a)
}
(&ty::ty_infer(_), _) |
(_, &ty::ty_infer(_)) => {
Err(ty::terr_sorts(ty_relate::expected_found(self, &a, &b)))
}
(&ty::ty_err, _) | (_, &ty::ty_err) => {
Ok(self.tcx().types.err)
}
_ => {
ty_relate::super_relate_tys(self, a, b)
}
}
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'a,'tcx>
{
Ok(ty::Binder(try!(self.relate(a.skip_binder(), b.skip_binder()))))
}
}
| 36.0625 | 91 | 0.541017 |
29059be7165db3eddcac086b3c53333a7348b188 | 12,116 | use super::setup;
use crate::common::{
jcli::JCli, jormungandr::ConfigurationBuilder, startup, transaction_utils::TransactionHash,
};
use chain_core::property::FromStr;
use chain_crypto::{Ed25519, PublicKey, Signature, Verification};
use chain_impl_mockchain::chaintypes::ConsensusVersion;
use chain_impl_mockchain::{
block::Header,
key::Hash,
testing::{
builders::{GenesisPraosBlockBuilder, StakePoolBuilder},
TestGen,
},
};
use chain_time::{Epoch, TimeEra};
use jormungandr_lib::interfaces::InitialUTxO;
use jormungandr_testing_utils::testing::node::grpc::client::MockClientError;
use rand::Rng;
use std::time::Duration;
const CHAIN_GROWTH_TIMEOUT: Duration = Duration::from_secs(60);
// check that affix is a long enough (at least half the size) prefix of word
fn is_long_prefix<T: PartialEq>(word: &[T], affix: &[T]) -> bool {
if word.len() < affix.len() || affix.len() * 2 < word.len() {
return false;
}
affix.iter().zip(word.iter()).all(|(x, y)| x == y)
}
// L1001 Handshake sanity
#[test]
pub fn handshake_sanity() {
let setup = setup::client::default();
let mut auth_nonce = [0u8; 32];
rand::thread_rng().fill(&mut auth_nonce[..]);
let handshake_response = setup.client.handshake(&auth_nonce);
assert_eq!(
*setup.config.genesis_block_hash(),
hex::encode(handshake_response.block0),
"Genesis Block"
);
assert_eq!(handshake_response.version, 1, "Protocol version");
let public_key =
PublicKey::<Ed25519>::from_binary(&handshake_response.node_id).expect("invalid node ID");
let signature = Signature::<[u8], Ed25519>::from_binary(&handshake_response.signature)
.expect("invalid signature");
assert_eq!(
signature.verify(&public_key, &auth_nonce),
Verification::Success,
);
}
// L1006 Tip request
#[test]
pub fn tip_request() {
let setup =
setup::client::bootstrap(ConfigurationBuilder::new().with_slot_duration(9).to_owned());
setup
.client
.wait_for_chain_length(1.into(), CHAIN_GROWTH_TIMEOUT);
let tip_header = setup.client.tip();
let block_hashes = setup.server.logger.get_created_blocks_hashes();
// TODO: this could fail if the server produces another block
assert_eq!(*block_hashes.last().unwrap(), tip_header.hash());
}
// L1009 GetHeaders correct hash
#[test]
pub fn get_headers_correct_hash() {
let setup = setup::client::default();
std::thread::sleep(Duration::from_secs(10)); // wait for the server to produce some blocks
let block_hashes = setup.server.logger.get_created_blocks_hashes();
let headers: Vec<Header> = setup
.client
.headers(&block_hashes)
.expect("unexpected error returned");
let headers_hashes: Vec<Hash> = headers.iter().map(|x| x.hash()).collect();
assert!(
is_long_prefix(&block_hashes, &headers_hashes),
"server blocks: {:?} | client blocks: {:?}",
block_hashes,
headers_hashes,
);
}
// L1010 GetHeaders incorrect hash
#[test]
pub fn get_headers_incorrect_hash() {
let setup = setup::client::default();
let fake_hash: Hash = TestGen::hash();
assert_eq!(
MockClientError::InvalidRequest(format!(
"not found (block {} is not known to this node)",
fake_hash.to_string()
)),
setup.client.headers(&[fake_hash]).err().unwrap(),
"wrong error"
);
}
// L1011 GetBlocks correct hash
#[test]
pub fn get_blocks_correct_hash() {
let setup = setup::client::default();
let tip = setup.client.tip();
assert!(setup.client.get_blocks(&[tip.hash()]).is_ok());
}
// L1012 GetBlocks incorrect hash
#[test]
pub fn get_blocks_incorrect_hash() {
let setup = setup::client::default();
let fake_hash: Hash = TestGen::hash();
assert_eq!(
MockClientError::InvalidRequest(format!(
"not found (block {} is not known to this node)",
fake_hash.to_string()
)),
setup.client.headers(&[fake_hash]).err().unwrap(),
"wrong error"
);
}
// L1013 PullBlocksToTip correct hash
#[test]
pub fn pull_blocks_to_tip_correct_hash() {
let setup = setup::client::default();
std::thread::sleep(Duration::from_secs(10)); // wait for the server to produce some blocks
let blocks = setup
.client
.pull_blocks_to_tip(Hash::from_str(setup.config.genesis_block_hash()).unwrap())
.unwrap();
let blocks_hashes: Vec<Hash> = blocks.iter().map(|x| x.header.hash()).collect();
let block_hashes_from_logs = setup.server.logger.get_created_blocks_hashes();
assert!(
is_long_prefix(&block_hashes_from_logs, &blocks_hashes),
"server blocks: {:?} | client blocks: {:?}",
block_hashes_from_logs,
blocks_hashes
);
}
#[test]
pub fn pull_range_invalid_params() {
let setup = setup::client::default();
std::thread::sleep(Duration::from_secs(10)); // wait for the server to produce some blocks
let gen_hash = Hash::from_str(setup.config.genesis_block_hash()).unwrap();
let client = setup.client;
let tip_hash = client.tip().hash();
let fake_hash = TestGen::hash();
let error = MockClientError::InvalidRequest(
"not found (Could not find a known block in `from`)".into(),
);
let invalid_params: [(&[Hash], Hash); 3] = [
(&[], tip_hash),
(&[fake_hash], tip_hash),
(&[gen_hash], fake_hash),
];
for (from, to) in invalid_params.iter() {
assert_eq!(error, client.pull_headers(from, *to).err().unwrap());
assert_eq!(error, client.pull_blocks(from, *to).err().unwrap());
}
assert_eq!(error, client.pull_blocks_to_tip(fake_hash).err().unwrap());
}
// L1018 Pull headers correct hash
#[test]
pub fn pull_headers_correct_hash() {
let setup = setup::client::default();
std::thread::sleep(Duration::from_secs(10)); // wait for the server to produce some blocks
let tip_header = setup.client.tip();
let headers = setup
.client
.pull_headers(&[setup.client.get_genesis_block_hash()], tip_header.hash())
.unwrap();
let hashes: Vec<Hash> = headers.iter().map(|x| x.hash()).collect();
let hashes_from_logs = setup.server.logger.get_created_blocks_hashes();
assert!(
is_long_prefix(&hashes_from_logs, &hashes),
"server blocks: {:?} | client blocks: {:?}",
hashes_from_logs,
hashes,
);
}
// L1020 Push headers incorrect header
#[test]
pub fn push_headers() {
let setup = setup::client::default();
let tip_header = setup.client.tip();
let stake_pool = StakePoolBuilder::new().build();
let time_era = TimeEra::new(
0u64.into(),
Epoch(0u32),
setup
.config
.block0_configuration()
.blockchain_configuration
.slots_per_epoch
.into(),
);
let block = GenesisPraosBlockBuilder::new()
.with_parent(&tip_header)
.build(&stake_pool, &time_era);
assert!(setup.client.push_headers(block.header).is_ok());
}
// L1020 Push headers incorrect header
#[test]
pub fn upload_block_incompatible_protocol() {
let setup = setup::client::default();
let tip_header = setup.client.tip();
let stake_pool = StakePoolBuilder::new().build();
let time_era = TimeEra::new(
0u64.into(),
Epoch(0u32),
setup
.config
.block0_configuration()
.blockchain_configuration
.slots_per_epoch
.into(),
);
let block = GenesisPraosBlockBuilder::new()
.with_parent(&tip_header)
.build(&stake_pool, &time_era);
assert_eq!(
MockClientError::InvalidRequest(
"invalid request data (The block header verification failed: The block Version is incompatible with LeaderSelection.)".into()
),
setup.client.upload_blocks(block.clone()).err().unwrap()
);
}
// L1020 Push headers incorrect header
#[test]
pub fn upload_block_nonexisting_stake_pool() {
let setup = setup::client::bootstrap(
ConfigurationBuilder::new()
.with_slot_duration(1)
.with_block0_consensus(ConsensusVersion::GenesisPraos)
.to_owned(),
);
let tip_header = setup.client.tip();
let stake_pool = StakePoolBuilder::new().build();
let time_era = TimeEra::new(
0u64.into(),
Epoch(0u32),
setup
.config
.block0_configuration()
.blockchain_configuration
.slots_per_epoch
.into(),
);
let block = GenesisPraosBlockBuilder::new()
.with_parent(&tip_header)
.build(&stake_pool, &time_era);
assert_eq!(
MockClientError::InvalidRequest(
"invalid request data (The block header verification failed: Invalid block message)"
.into()
),
setup.client.upload_blocks(block.clone()).err().unwrap()
);
}
// L1020 Get fragments
#[test]
pub fn get_fragments() {
let mut sender = startup::create_new_account_address();
let receiver = startup::create_new_account_address();
let config = ConfigurationBuilder::new()
.with_slot_duration(4)
.with_funds(vec![InitialUTxO {
address: sender.address(),
value: 100.into(),
}])
.to_owned();
let setup = setup::client::bootstrap(config);
let output_value = 1u64;
let jcli: JCli = Default::default();
let transaction = sender
.transaction_to(
&setup.server.genesis_block_hash(),
&setup.server.fees(),
receiver.address(),
output_value.into(),
)
.unwrap()
.encode();
let fragment_id = jcli
.fragment_sender(&setup.server)
.send(&transaction)
.assert_in_block();
println!("{:?}", setup.client.get_fragments(vec![fragment_id]));
}
// L1021 PullBlocks correct hashes
#[test]
pub fn pull_blocks_correct_hashes_all_blocks() {
let setup = setup::client::default();
std::thread::sleep(Duration::from_secs(10)); // wait for the server to produce some blocks
let genesis_block_hash = Hash::from_str(setup.config.genesis_block_hash()).unwrap();
let blocks = setup
.client
.pull_blocks(&[genesis_block_hash], setup.client.tip().id())
.unwrap();
let blocks_hashes: Vec<Hash> = blocks.iter().map(|x| x.header.hash()).collect();
let block_hashes_from_logs = setup.server.logger.get_created_blocks_hashes();
assert!(
is_long_prefix(&block_hashes_from_logs, &blocks_hashes),
"server blocks: {:?} | client blocks: {:?}",
block_hashes_from_logs,
blocks_hashes
);
}
// L1022 PullBlocks correct hashes
#[test]
pub fn pull_blocks_correct_hashes_partial() {
let setup = setup::client::default();
setup
.client
.wait_for_chain_length(10.into(), CHAIN_GROWTH_TIMEOUT);
let block_hashes_from_logs = setup.server.logger.get_created_blocks_hashes();
let start = 2;
let end = 8;
let expected_hashes = block_hashes_from_logs[start..end].to_vec();
let blocks = setup
.client
.pull_blocks(
&[expected_hashes[0]],
expected_hashes.last().copied().unwrap(),
)
.unwrap();
let blocks_hashes: Vec<Hash> = blocks.iter().map(|x| x.header.hash()).collect();
assert_eq!(&expected_hashes[1..], &blocks_hashes);
}
// L1023 PullBlocks to and from in wrong order
#[test]
pub fn pull_blocks_hashes_wrong_order() {
let setup = setup::client::default();
setup
.client
.wait_for_chain_length(10.into(), CHAIN_GROWTH_TIMEOUT);
let block_hashes_from_logs = setup.server.logger.get_created_blocks_hashes();
let start = 2;
let end = 8;
let expected_hashes = block_hashes_from_logs[start..end].to_vec();
let result = setup.client.pull_blocks(
&[expected_hashes.last().copied().unwrap()],
expected_hashes[0],
);
assert!(result.is_err());
}
| 30.29 | 138 | 0.636761 |
14b07e22f2ea1b48f1b5fbf448b9d0e9f67ce327 | 2,583 | use std::{
future::Future,
sync::{Arc, Mutex},
task::{Poll, Waker},
};
use crate::{Client, RPCResponse, RPCResult, SeqHandler, SeqRead, SerializedCommand};
impl Client {
/// Asyncrounously sends a request and waits for a response.
pub(crate) fn request<'a, R: RPCResponse>(
&'a self,
name: &'static str,
body: Vec<u8>,
) -> RPCRequest<'a, R> {
RPCRequest {
client: self,
state: Arc::new(Mutex::new(RequestState::Unsent(SerializedCommand {
name,
body,
}))),
}
}
}
pub struct RPCRequest<'a, R: RPCResponse> {
client: &'a Client,
state: Arc<Mutex<RequestState<R>>>,
}
enum RequestState<R: RPCResponse> {
Unsent(SerializedCommand),
Pending(Waker),
Ready(RPCResult<R>),
Invalid,
}
impl<'a, T: RPCResponse> RPCRequest<'a, T> {
/// Send this request, but ignore the response
pub fn send_ignored(self) {
match std::mem::replace(&mut *self.state.lock().unwrap(), RequestState::Invalid) {
RequestState::Unsent(cmd) => {
self.client.send_command(cmd, None);
}
_ => {
panic!()
}
}
}
}
impl<'a, T: RPCResponse> Future for RPCRequest<'a, T> {
type Output = RPCResult<T>;
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let mut state = self.state.lock().unwrap();
match std::mem::replace(&mut *state, RequestState::Invalid) {
RequestState::Unsent(cmd) => {
*state = RequestState::Pending(cx.waker().clone());
self.client.send_command(cmd, Some(self.state.clone()));
return Poll::Pending;
}
RequestState::Pending(_) => {
*state = RequestState::Pending(cx.waker().clone());
return Poll::Pending;
}
RequestState::Ready(response) => {
return Poll::Ready(response);
}
RequestState::Invalid => {
panic!()
}
}
}
}
impl<T> SeqHandler for Mutex<RequestState<T>>
where
T: RPCResponse,
{
fn handle(&self, res: RPCResult<SeqRead>) {
let res = res.and_then(T::read_from);
let ready = RequestState::Ready(res);
match std::mem::replace(&mut *self.lock().unwrap(), ready) {
RequestState::Pending(waker) => waker.wake(),
_ => panic!(),
}
}
}
| 27.189474 | 90 | 0.524584 |
f79607bd0b9939675c348ae3a8b74d34b05e0268 | 2,433 | use crate::errors::*;
use crate::types::*;
use uuid::Uuid;
/// Returns one of the available Telegram Passport elements
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct GetPassportElement {
#[doc(hidden)]
#[serde(rename(serialize = "@extra", deserialize = "@extra"))]
extra: Option<String>,
#[serde(rename(serialize = "@client_id", deserialize = "@client_id"))]
client_id: Option<i32>,
/// Telegram Passport element type
#[serde(rename(serialize = "type", deserialize = "type"))]
#[serde(skip_serializing_if = "PassportElementType::_is_default")]
type_: PassportElementType,
/// Password of the current user
password: String,
#[serde(rename(serialize = "@type"))]
td_type: String,
}
impl RObject for GetPassportElement {
#[doc(hidden)]
fn extra(&self) -> Option<&str> {
self.extra.as_deref()
}
#[doc(hidden)]
fn client_id(&self) -> Option<i32> {
self.client_id
}
}
impl TDPassportElement for GetPassportElement {}
impl RFunction for GetPassportElement {}
impl GetPassportElement {
pub fn from_json<S: AsRef<str>>(json: S) -> RTDResult<Self> {
Ok(serde_json::from_str(json.as_ref())?)
}
pub fn builder() -> RTDGetPassportElementBuilder {
let mut inner = GetPassportElement::default();
inner.extra = Some(Uuid::new_v4().to_string());
inner.td_type = "getPassportElement".to_string();
RTDGetPassportElementBuilder { inner }
}
pub fn type_(&self) -> &PassportElementType {
&self.type_
}
pub fn password(&self) -> &String {
&self.password
}
}
#[doc(hidden)]
pub struct RTDGetPassportElementBuilder {
inner: GetPassportElement,
}
impl RTDGetPassportElementBuilder {
pub fn build(&self) -> GetPassportElement {
self.inner.clone()
}
pub fn type_<T: AsRef<PassportElementType>>(&mut self, type_: T) -> &mut Self {
self.inner.type_ = type_.as_ref().clone();
self
}
pub fn password<T: AsRef<str>>(&mut self, password: T) -> &mut Self {
self.inner.password = password.as_ref().to_string();
self
}
}
impl AsRef<GetPassportElement> for GetPassportElement {
fn as_ref(&self) -> &GetPassportElement {
self
}
}
impl AsRef<GetPassportElement> for RTDGetPassportElementBuilder {
fn as_ref(&self) -> &GetPassportElement {
&self.inner
}
}
| 25.882979 | 83 | 0.642828 |
79e0dda57ed7287646758cb0451436a2dee2e0bc | 1,789 | #[macro_use]
extern crate lazy_static;
pub mod utils;
use assert_cmd::prelude::*;
use std::env;
use std::fs::File;
use std::io::Write;
use std::process::Command;
use std::str;
use std::sync::Mutex;
lazy_static! {
static ref BUILD_LOCK: Mutex<u8> = Mutex::new(0);
}
macro_rules! settings {
( $f:expr, $x:expr ) => {
let file_path = utils::fixture_path($f).join("wrangler.toml");
let mut file = File::create(file_path).unwrap();
let content = format!(
r#"
name = "test"
zone_id = ""
account_id = ""
workers_dev = true
{}
"#,
$x
);
file.write_all(content.as_bytes()).unwrap();
};
}
#[test]
fn it_can_preview_js_project() {
let fixture = "simple_js";
utils::create_temporary_copy(fixture);
settings! {fixture, r#"
type = "javascript"
"#};
preview(fixture);
utils::cleanup(fixture);
}
#[test]
fn it_can_preview_webpack_project() {
let fixture = "webpack_simple_js";
utils::create_temporary_copy(fixture);
settings! {fixture, r#"
type = "webpack"
"#};
preview(fixture);
utils::cleanup(fixture);
}
#[test]
fn it_can_preview_rust_project() {
let fixture = "simple_rust";
utils::create_temporary_copy(fixture);
settings! {fixture, r#"
type = "rust"
"#};
preview(fixture);
utils::cleanup(fixture);
}
fn preview(fixture: &str) {
// Lock to avoid having concurrent builds
let _g = BUILD_LOCK.lock().unwrap();
env::remove_var("CF_ACCOUNT_ID");
let mut preview = Command::cargo_bin(env!("CARGO_PKG_NAME")).unwrap();
preview.current_dir(utils::fixture_path(fixture));
preview.arg("preview").arg("--headless").assert().success();
}
| 22.935897 | 74 | 0.599217 |
f75c80f8c6f8a00b59ff631671d4738e7b3aeec8 | 2,945 | use std::collections::HashSet;
use std::env;
use std::fs::File;
use std::io;
use std::io::BufRead;
#[macro_use]
extern crate text_io;
const FABRIC_SIZE: usize = 1000;
fn main() {
let args = env::args().collect::<Vec<String>>();
if args.len() < 3 {
panic!("Not enough arguments")
}
let input_filename = &args[2];
let input_lines = read_input(input_filename).unwrap();
let result = match args[1].as_str() {
"a" => {
println!("Calling Part A");
part_a(input_lines)
}
"b" => {
println!("Calling Part B");
part_b(input_lines)
}
_ => panic!("Expecting a or b as 1st argument"),
};
println!("{}", result);
}
fn read_input(filename: &str) -> Result<io::BufReader<File>, io::Error> {
let file = File::open(filename)?;
let buf_reader = io::BufReader::new(file);
Ok(buf_reader)
}
fn part_a(input_lines: io::BufReader<File>) -> usize {
let mut fabric_map: [[usize; FABRIC_SIZE]; FABRIC_SIZE] = [[0; FABRIC_SIZE]; FABRIC_SIZE];
let mut claims = Vec::new();
for line in input_lines.lines() {
let line = line.unwrap();
let new_claim = Claim::from_str(&line);
new_claim.mark_map(&mut fabric_map);
claims.push(new_claim);
}
fabric_map
.iter()
.map(|arr| arr.iter().filter(|&&x| x > 1).count())
.fold(0, |acc, x| acc + x)
}
fn part_b(input_lines: io::BufReader<File>) -> usize {
let mut fabric_map: [[usize; FABRIC_SIZE]; FABRIC_SIZE] = [[0; FABRIC_SIZE]; FABRIC_SIZE];
let mut claims = HashSet::new();
for line in input_lines.lines() {
let line = line.unwrap();
let new_claim = Claim::from_str(&line);
new_claim.mark_map(&mut fabric_map);
claims.insert(new_claim);
}
'main_outer: for claim in claims {
for i in claim.left..claim.left + claim.width {
for j in claim.top..claim.top + claim.height {
if fabric_map[i][j] > 1 {
continue 'main_outer;
}
}
}
return claim.id;
}
1
}
#[derive(Debug, PartialEq, Eq, Hash)]
struct Claim {
id: usize,
left: usize,
top: usize,
width: usize,
height: usize,
}
impl Claim {
pub fn from_str(input_line: &str) -> Self {
let id: usize;
let left: usize;
let top: usize;
let width: usize;
let height: usize;
scan!(input_line.bytes() => "#{} @ {},{}: {}x{}", id, left, top, width, height);
Claim {
id,
left,
top,
width,
height,
}
}
pub fn mark_map(&self, fabric_map: &mut [[usize; FABRIC_SIZE]; FABRIC_SIZE]) {
for i in self.left..self.left + self.width {
for j in self.top..self.top + self.height {
fabric_map[i][j] += 1;
}
}
}
}
| 26.061947 | 94 | 0.536163 |
0e928345d931f7ff28da2d4a04ba9d7378d18887 | 16,096 | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![crate_type = "bin"]
#![feature(box_syntax)]
#![feature(dynamic_lib)]
#![feature(libc)]
#![feature(path_ext)]
#![feature(rustc_private)]
#![feature(slice_splits)]
#![feature(str_char)]
#![feature(test)]
#![feature(vec_push_all)]
#![feature(path_components_peek)]
#![deny(warnings)]
extern crate libc;
extern crate test;
extern crate getopts;
#[macro_use]
extern crate log;
use std::env;
use std::fs;
use std::path::{Path, PathBuf};
use getopts::{optopt, optflag, reqopt};
use common::Config;
use common::{Pretty, DebugInfoGdb, DebugInfoLldb};
use util::logv;
pub mod procsrv;
pub mod util;
pub mod header;
pub mod runtest;
pub mod common;
pub mod errors;
mod raise_fd_limit;
pub fn main() {
let config = parse_config(env::args().collect());
if config.valgrind_path.is_none() && config.force_valgrind {
panic!("Can't find Valgrind to run Valgrind tests");
}
log_config(&config);
run_tests(&config);
}
pub fn parse_config(args: Vec<String> ) -> Config {
let groups : Vec<getopts::OptGroup> =
vec!(reqopt("", "compile-lib-path", "path to host shared libraries", "PATH"),
reqopt("", "run-lib-path", "path to target shared libraries", "PATH"),
reqopt("", "rustc-path", "path to rustc to use for compiling", "PATH"),
reqopt("", "rustdoc-path", "path to rustdoc to use for compiling", "PATH"),
reqopt("", "python", "path to python to use for doc tests", "PATH"),
optopt("", "valgrind-path", "path to Valgrind executable for Valgrind tests", "PROGRAM"),
optflag("", "force-valgrind", "fail if Valgrind tests cannot be run under Valgrind"),
optopt("", "llvm-bin-path", "path to directory holding llvm binaries", "DIR"),
reqopt("", "src-base", "directory to scan for test files", "PATH"),
reqopt("", "build-base", "directory to deposit test outputs", "PATH"),
reqopt("", "aux-base", "directory to find auxiliary test files", "PATH"),
reqopt("", "stage-id", "the target-stage identifier", "stageN-TARGET"),
reqopt("", "mode", "which sort of compile tests to run",
"(compile-fail|parse-fail|run-fail|run-pass|run-pass-valgrind|pretty|debug-info)"),
optflag("", "ignored", "run tests marked as ignored"),
optopt("", "runtool", "supervisor program to run tests under \
(eg. emulator, valgrind)", "PROGRAM"),
optopt("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS"),
optopt("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS"),
optflag("", "verbose", "run tests verbosely, showing all output"),
optopt("", "logfile", "file to log test execution to", "FILE"),
optopt("", "target", "the target to build for", "TARGET"),
optopt("", "host", "the host to build for", "HOST"),
optopt("", "gdb-version", "the version of GDB used", "VERSION STRING"),
optopt("", "lldb-version", "the version of LLDB used", "VERSION STRING"),
optopt("", "android-cross-path", "Android NDK standalone path", "PATH"),
optopt("", "adb-path", "path to the android debugger", "PATH"),
optopt("", "adb-test-dir", "path to tests for the android debugger", "PATH"),
optopt("", "lldb-python-dir", "directory containing LLDB's python module", "PATH"),
optflag("h", "help", "show this message"));
let (argv0, args_) = args.split_first().unwrap();
if args[1] == "-h" || args[1] == "--help" {
let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
println!("{}", getopts::usage(&message, &groups));
println!("");
panic!()
}
let matches =
&match getopts::getopts(args_, &groups) {
Ok(m) => m,
Err(f) => panic!("{:?}", f)
};
if matches.opt_present("h") || matches.opt_present("help") {
let message = format!("Usage: {} [OPTIONS] [TESTNAME...]", argv0);
println!("{}", getopts::usage(&message, &groups));
println!("");
panic!()
}
fn opt_path(m: &getopts::Matches, nm: &str) -> PathBuf {
match m.opt_str(nm) {
Some(s) => PathBuf::from(&s),
None => panic!("no option (=path) found for {}", nm),
}
}
let filter = if !matches.free.is_empty() {
Some(matches.free[0].clone())
} else {
None
};
Config {
compile_lib_path: matches.opt_str("compile-lib-path").unwrap(),
run_lib_path: matches.opt_str("run-lib-path").unwrap(),
rustc_path: opt_path(matches, "rustc-path"),
rustdoc_path: opt_path(matches, "rustdoc-path"),
python: matches.opt_str("python").unwrap(),
valgrind_path: matches.opt_str("valgrind-path"),
force_valgrind: matches.opt_present("force-valgrind"),
llvm_bin_path: matches.opt_str("llvm-bin-path").map(|s| PathBuf::from(&s)),
src_base: opt_path(matches, "src-base"),
build_base: opt_path(matches, "build-base"),
aux_base: opt_path(matches, "aux-base"),
stage_id: matches.opt_str("stage-id").unwrap(),
mode: matches.opt_str("mode").unwrap().parse().ok().expect("invalid mode"),
run_ignored: matches.opt_present("ignored"),
filter: filter,
logfile: matches.opt_str("logfile").map(|s| PathBuf::from(&s)),
runtool: matches.opt_str("runtool"),
host_rustcflags: matches.opt_str("host-rustcflags"),
target_rustcflags: matches.opt_str("target-rustcflags"),
target: opt_str2(matches.opt_str("target")),
host: opt_str2(matches.opt_str("host")),
gdb_version: extract_gdb_version(matches.opt_str("gdb-version")),
lldb_version: extract_lldb_version(matches.opt_str("lldb-version")),
android_cross_path: opt_path(matches, "android-cross-path"),
adb_path: opt_str2(matches.opt_str("adb-path")),
adb_test_dir: format!("{}/{}",
opt_str2(matches.opt_str("adb-test-dir")),
opt_str2(matches.opt_str("target"))),
adb_device_status:
opt_str2(matches.opt_str("target")).contains("android") &&
"(none)" != opt_str2(matches.opt_str("adb-test-dir")) &&
!opt_str2(matches.opt_str("adb-test-dir")).is_empty(),
lldb_python_dir: matches.opt_str("lldb-python-dir"),
verbose: matches.opt_present("verbose"),
}
}
pub fn log_config(config: &Config) {
let c = config;
logv(c, format!("configuration:"));
logv(c, format!("compile_lib_path: {:?}", config.compile_lib_path));
logv(c, format!("run_lib_path: {:?}", config.run_lib_path));
logv(c, format!("rustc_path: {:?}", config.rustc_path.display()));
logv(c, format!("rustdoc_path: {:?}", config.rustdoc_path.display()));
logv(c, format!("src_base: {:?}", config.src_base.display()));
logv(c, format!("build_base: {:?}", config.build_base.display()));
logv(c, format!("stage_id: {}", config.stage_id));
logv(c, format!("mode: {}", config.mode));
logv(c, format!("run_ignored: {}", config.run_ignored));
logv(c, format!("filter: {}",
opt_str(&config.filter
.as_ref()
.map(|re| re.to_owned()))));
logv(c, format!("runtool: {}", opt_str(&config.runtool)));
logv(c, format!("host-rustcflags: {}",
opt_str(&config.host_rustcflags)));
logv(c, format!("target-rustcflags: {}",
opt_str(&config.target_rustcflags)));
logv(c, format!("target: {}", config.target));
logv(c, format!("host: {}", config.host));
logv(c, format!("android-cross-path: {:?}",
config.android_cross_path.display()));
logv(c, format!("adb_path: {:?}", config.adb_path));
logv(c, format!("adb_test_dir: {:?}", config.adb_test_dir));
logv(c, format!("adb_device_status: {}",
config.adb_device_status));
logv(c, format!("verbose: {}", config.verbose));
logv(c, format!("\n"));
}
pub fn opt_str<'a>(maybestr: &'a Option<String>) -> &'a str {
match *maybestr {
None => "(none)",
Some(ref s) => s,
}
}
pub fn opt_str2(maybestr: Option<String>) -> String {
match maybestr {
None => "(none)".to_owned(),
Some(s) => s,
}
}
pub fn run_tests(config: &Config) {
if config.target.contains("android") {
if let DebugInfoGdb = config.mode {
println!("{} debug-info test uses tcp 5039 port.\
please reserve it", config.target);
}
// android debug-info test uses remote debugger
// so, we test 1 thread at once.
// also trying to isolate problems with adb_run_wrapper.sh ilooping
env::set_var("RUST_TEST_THREADS","1");
}
match config.mode {
DebugInfoLldb => {
// Some older versions of LLDB seem to have problems with multiple
// instances running in parallel, so only run one test thread at a
// time.
env::set_var("RUST_TEST_THREADS", "1");
}
_ => { /* proceed */ }
}
let opts = test_opts(config);
let tests = make_tests(config);
// sadly osx needs some file descriptor limits raised for running tests in
// parallel (especially when we have lots and lots of child processes).
// For context, see #8904
unsafe { raise_fd_limit::raise_fd_limit(); }
// Prevent issue #21352 UAC blocking .exe containing 'patch' etc. on Windows
// If #11207 is resolved (adding manifest to .exe) this becomes unnecessary
env::set_var("__COMPAT_LAYER", "RunAsInvoker");
let res = test::run_tests_console(&opts, tests.into_iter().collect());
match res {
Ok(true) => {}
Ok(false) => panic!("Some tests failed"),
Err(e) => {
println!("I/O failure during tests: {:?}", e);
}
}
}
pub fn test_opts(config: &Config) -> test::TestOpts {
test::TestOpts {
filter: match config.filter {
None => None,
Some(ref filter) => Some(filter.clone()),
},
run_ignored: config.run_ignored,
logfile: config.logfile.clone(),
run_tests: true,
bench_benchmarks: true,
nocapture: env::var("RUST_TEST_NOCAPTURE").is_ok(),
color: test::AutoColor,
}
}
pub fn make_tests(config: &Config) -> Vec<test::TestDescAndFn> {
debug!("making tests from {:?}",
config.src_base.display());
let mut tests = Vec::new();
let dirs = fs::read_dir(&config.src_base).unwrap();
for file in dirs {
let file = file.unwrap().path();
debug!("inspecting file {:?}", file.display());
if is_test(config, &file) {
tests.push(make_test(config, &file))
}
}
tests
}
pub fn is_test(config: &Config, testfile: &Path) -> bool {
// Pretty-printer does not work with .rc files yet
let valid_extensions =
match config.mode {
Pretty => vec!(".rs".to_owned()),
_ => vec!(".rc".to_owned(), ".rs".to_owned())
};
let invalid_prefixes = vec!(".".to_owned(), "#".to_owned(), "~".to_owned());
let name = testfile.file_name().unwrap().to_str().unwrap();
let mut valid = false;
for ext in &valid_extensions {
if name.ends_with(ext) {
valid = true;
}
}
for pre in &invalid_prefixes {
if name.starts_with(pre) {
valid = false;
}
}
return valid;
}
pub fn make_test(config: &Config, testfile: &Path) -> test::TestDescAndFn
{
test::TestDescAndFn {
desc: test::TestDesc {
name: make_test_name(config, testfile),
ignore: header::is_test_ignored(config, testfile),
should_panic: test::ShouldPanic::No,
},
testfn: make_test_closure(config, &testfile),
}
}
pub fn make_test_name(config: &Config, testfile: &Path) -> test::TestName {
// Try to elide redundant long paths
fn shorten(path: &Path) -> String {
let filename = path.file_name().unwrap().to_str();
let p = path.parent().unwrap();
let dir = p.file_name().unwrap().to_str();
format!("{}/{}", dir.unwrap_or(""), filename.unwrap_or(""))
}
test::DynTestName(format!("[{}] {}", config.mode, shorten(testfile)))
}
pub fn make_test_closure(config: &Config, testfile: &Path) -> test::TestFn {
let config = (*config).clone();
let testfile = testfile.to_path_buf();
test::DynTestFn(Box::new(move || {
runtest::run(config, &testfile)
}))
}
fn extract_gdb_version(full_version_line: Option<String>) -> Option<String> {
match full_version_line {
Some(ref full_version_line)
if !full_version_line.trim().is_empty() => {
let full_version_line = full_version_line.trim();
// used to be a regex "(^|[^0-9])([0-9]\.[0-9]+)"
for (pos, c) in full_version_line.char_indices() {
if !c.is_digit(10) { continue }
if pos + 2 >= full_version_line.len() { continue }
if full_version_line.char_at(pos + 1) != '.' { continue }
if !full_version_line.char_at(pos + 2).is_digit(10) { continue }
if pos > 0 && full_version_line.char_at_reverse(pos).is_digit(10) {
continue
}
let mut end = pos + 3;
while end < full_version_line.len() &&
full_version_line.char_at(end).is_digit(10) {
end += 1;
}
return Some(full_version_line[pos..end].to_owned());
}
println!("Could not extract GDB version from line '{}'",
full_version_line);
None
},
_ => None
}
}
fn extract_lldb_version(full_version_line: Option<String>) -> Option<String> {
// Extract the major LLDB version from the given version string.
// LLDB version strings are different for Apple and non-Apple platforms.
// At the moment, this function only supports the Apple variant, which looks
// like this:
//
// LLDB-179.5 (older versions)
// lldb-300.2.51 (new versions)
//
// We are only interested in the major version number, so this function
// will return `Some("179")` and `Some("300")` respectively.
if let Some(ref full_version_line) = full_version_line {
if !full_version_line.trim().is_empty() {
let full_version_line = full_version_line.trim();
for (pos, l) in full_version_line.char_indices() {
if l != 'l' && l != 'L' { continue }
if pos + 5 >= full_version_line.len() { continue }
let l = full_version_line.char_at(pos + 1);
if l != 'l' && l != 'L' { continue }
let d = full_version_line.char_at(pos + 2);
if d != 'd' && d != 'D' { continue }
let b = full_version_line.char_at(pos + 3);
if b != 'b' && b != 'B' { continue }
let dash = full_version_line.char_at(pos + 4);
if dash != '-' { continue }
let vers = full_version_line[pos + 5..].chars().take_while(|c| {
c.is_digit(10)
}).collect::<String>();
if !vers.is_empty() { return Some(vers) }
}
println!("Could not extract LLDB version from line '{}'",
full_version_line);
}
}
None
}
| 38.879227 | 100 | 0.57915 |
08d328e5c1983deb46e5ddec75d95db3a2cfdffa | 279 | mod lib;
use structopt::StructOpt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = lib::Args::from_args();
let mut driver = dynamixel_driver::DynamixelDriver::new(&args.port)?;
driver.write_id(2, 1).await.unwrap();
Ok(())
}
| 25.363636 | 73 | 0.637993 |
4be20f844ae46bd4fa7321d1f070d2d8779c3101 | 6,467 | use crate::{agents::api, components::toast_container::ToastContainer, model::Config, pages::*};
use log::trace;
use yew::prelude::*;
use yew_router::{prelude::*, switch::Permissive, Switch};
#[derive(Debug, Switch, Clone)]
pub enum AppRoute {
#[to = "/!"]
Index,
#[to = "/favorites"]
Favorites,
#[to = "/songs"]
Songs,
#[to = "/artists"]
Artists,
#[to = "/artist/{artist_id}"]
Artist(u64),
#[to = "/queue"]
Queue,
#[to = "/player"]
Player,
#[to = "/page-not-found"]
NotFound(Permissive<String>),
}
#[allow(dead_code)]
pub struct Model {
link: ComponentLink<Self>,
router_agent: Box<dyn Bridge<RouteAgent>>,
current_route: Option<String>,
api_agent: Box<dyn Bridge<api::ApiAgent>>,
config: Option<Config>,
}
pub enum Msg {
UpdateHeader(String),
ApiResponse(api::Response),
}
impl Component for Model {
type Message = Msg;
type Properties = ();
fn create(_: Self::Properties, link: ComponentLink<Self>) -> Self {
let callback = link.callback(|route: Route| Msg::UpdateHeader(route.route));
let router_agent = RouteAgent::bridge(callback);
let callback = link.callback(Msg::ApiResponse);
let api_agent = api::ApiAgent::bridge(callback);
Model {
link,
router_agent,
current_route: None,
api_agent,
config: None,
}
}
fn mounted(&mut self) -> ShouldRender {
self.api_agent.send(api::Request::Config);
false
}
fn update(&mut self, msg: Self::Message) -> ShouldRender {
match msg {
Msg::UpdateHeader(route) => {
self.current_route = Some(route);
}
Msg::ApiResponse(response) => {
if let api::Response::Success(api::ResponseData::Config(config)) = response {
self.config = Some(config);
}
}
}
true
}
fn view(&self) -> Html {
html! {
<div>
<ToastContainer />
{ self.view_header() }
<main class="content" role="main">
{ self.view_page() }
</main>
</div>
}
}
}
impl Model {
fn view_header(&self) -> Html {
let current_route = self.current_route.clone().unwrap_or_else(|| "/".into());
let player_active = if let Some(config) = &self.config {
config.use_web_player
} else {
false
};
trace!("Current route is: {}", current_route);
html! {
<div class="header">
<img src="/logo.png" class="header__logo" width="64" />
<nav class="header__navigation">
<RouterAnchor<AppRoute> route=AppRoute::Index
classes={ if current_route=="/" { "header__navigation-item--active" } else { "header__navigation-item" }}>
{ "Home" }</RouterAnchor<AppRoute>>
<RouterAnchor<AppRoute> route=AppRoute::Favorites
classes={ if current_route=="/favorites" { "header__navigation-item--active" } else { "header__navigation-item" }}>
{ "Favorites" }</RouterAnchor<AppRoute>>
<RouterAnchor<AppRoute> route=AppRoute::Songs
classes={ if current_route=="/songs" { "header__navigation-item--active" } else { "header__navigation-item" }}>
{ "Songs" }</RouterAnchor<AppRoute>>
<RouterAnchor<AppRoute> route=AppRoute::Artists
classes={ if current_route=="/artists" { "header__navigation-item--active" } else { "header__navigation-item" }}>
{ "Artists" }</RouterAnchor<AppRoute>>
<RouterAnchor<AppRoute> route=AppRoute::Queue
classes={ if current_route=="/queue" { "header__navigation-item--active" } else { "header__navigation-item" }}>
{ "Queue" }</RouterAnchor<AppRoute>>
{
if player_active {
html! {
<RouterAnchor<AppRoute> route=AppRoute::Player
classes={ if current_route=="/player" { "header__navigation-item--active" } else { "header__navigation-item" }}>
{ "Player" }</RouterAnchor<AppRoute>>
}
} else {
html! {}
}
}
</nav>
</div>
}
}
fn view_page(&self) -> Html {
if let Some(config) = &self.config {
let port_ws = config.port_ws;
let fullscreen = config.player.fullscreen;
let scale = config.player.scale;
let disable_background = config.player.disable_background;
html! {
<Router<AppRoute, ()>
render = Router::render(move |switch: AppRoute| {
match switch {
AppRoute::Index => html!{<IndexPage />},
AppRoute::Favorites => html! {<SongsPage favorites_only=true/>},
AppRoute::Songs => html! {<SongsPage favorites_only=false/>},
AppRoute::Artist(id) => html!{<ArtistPage artist_id=id />},
AppRoute::Artists => html!{<ArtistsPage />},
AppRoute::Queue => html!{<QueuePage />},
AppRoute::Player => html!{<PlayerPage port_ws=port_ws fullscreen=fullscreen
scale=scale disable_background=disable_background/>},
AppRoute::NotFound(Permissive(None)) => html!{"Page not found"},
AppRoute::NotFound(Permissive(Some(missed_route))) => html!{format!("Page '{}' not found", missed_route)},
_ => html!{"Page not found"},
}
})
redirect = Router::redirect(|route: Route| {
AppRoute::NotFound(Permissive(Some(route.route)))
})
/>
}
} else {
html! {}
}
}
}
| 37.381503 | 148 | 0.491573 |
237bfc2d2c089655bb051904107e3977d2db5f58 | 32,226 | //! # Task
//!
//! `task` contains the implementation of Task
//
// Shell-Core
// Developed by Christian Visintin
//
// MIT License
// Copyright (c) 2020 Christian Visintin
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
use super::process::Process;
use super::{Redirection, Task, TaskError, TaskErrorCode, TaskRelation};
use crate::{FileRedirectionType, UnixSignal};
use std::fs::OpenOptions;
use std::io::Write;
impl Task {
/// ## new
///
/// Instantiate a new unrelated Task. To chain tasks, use new_pipeline()
pub fn new(command: Vec<String>, stdout_redir: Redirection, stderr_redir: Redirection) -> Task {
Task {
command: command,
stdout_redirection: stdout_redir,
stderr_redirection: stderr_redir,
process: None,
relation: TaskRelation::Unrelated,
next: None,
exit_code: None,
}
}
/// ## new_pipeline
///
/// Add to Task a new task
pub fn new_pipeline(
&mut self,
next_command: Vec<String>,
stdout_redir: Redirection,
stderr_redir: Redirection,
relation: TaskRelation,
) {
//If next is None, set Next as new Task, otherwise pass new task to the next of the next etc...
match &mut self.next {
None => self.next = {
//Set current relation to relation
self.relation = relation;
Some(Box::new(Task::new(next_command, stdout_redir, stderr_redir)))
},
Some(task) => task.new_pipeline(next_command, stdout_redir, stderr_redir, relation)
}
}
/// ### truncate
///
/// Truncate Task pipeline at a certain index. The provided index will be of the last element kept in the pipeline
/// Returns the broken relation
pub(crate) fn truncate(&mut self, index: usize) -> TaskRelation {
let t: &mut Task = self;
let mut i: usize = 0;
loop {
if t.next.is_none() {
return TaskRelation::Unrelated;
}
//This is why rust is for certain aspects the most stupid language ever, you can't edit t after that, because it says it's a temp value. But it's not fuckin true
let t: &mut Task = match i {
0 => t,
_ => t.next.as_mut().unwrap()
};
if i == index {
t.next = None; //Set next to None
let last_relation: TaskRelation = t.relation.clone();
t.relation = TaskRelation::Unrelated; //Set relation to unrelated
return last_relation;
}
//NOTE: no, you can't do that; rust is so fuckin bad sometimes
/*
t = &mut t.next.as_mut().unwrap();
*/
i = i + 1;
}
}
/// ## start
///
/// Start process
/// NOTE: if the relation is Pipe, the next command is directly executed
/// In pipes the processes are started in sequence from the last to the first one
pub fn start(&mut self) -> Result<(), TaskError> {
if self.process.is_some() {
return Err(TaskError::new(TaskErrorCode::AlreadyRunning, String::from("Could not start process since it is already running")))
}
if self.relation == TaskRelation::Pipe {
//Start next process
if self.next.is_some() {
if let Err(_) = self.next.as_mut().unwrap().start() {
return Err(TaskError::new(TaskErrorCode::BrokenPipe, String::from("Failed to start next process in the pipeline")));
}
}
}
//After starting the pipe, execute this process
self.process = match Process::exec(&self.command) {
Ok(p) => Some(p),
Err(_) => {
return Err(TaskError::new(
TaskErrorCode::CouldNotStartTask,
format!("Could not start process {}", self.command[0].clone()),
))
}
};
//Return OK
Ok(())
}
/// read
///
/// Read or redirect command output
pub fn read(&mut self) -> Result<(Option<String>, Option<String>), TaskError> {
match &mut self.process {
None => Err(TaskError::new(
TaskErrorCode::ProcessTerminated,
String::from("Process is not running"),
)),
Some(p) => {
match p.read() {
Ok((stdout, stderr)) => {
let mut res_stdout: String = String::new();
let mut res_stderr: String = String::new();
//Check redirections for stdout
if let Err(err) = self.redirect_output(self.stdout_redirection.clone(), stdout, &mut res_stdout, &mut res_stderr) {
return Err(err);
}
//Check redirections fdr stderr
if let Err(err) = self.redirect_output(self.stderr_redirection.clone(), stderr, &mut res_stdout, &mut res_stderr) {
return Err(err);
}
let res_stdout: Option<String> = match res_stdout.len() {
0 => None,
_ => Some(res_stdout),
};
let res_stderr: Option<String> = match res_stderr.len() {
0 => None,
_ => Some(res_stderr),
};
Ok((res_stdout, res_stderr))
}
Err(e) => Err(TaskError::new(
TaskErrorCode::IoError,
format!("Could not read from process: {}", e),
)),
}
}
}
}
/// ### write
///
/// Write to process stdin
pub fn write(&mut self, input: String) -> Result<(), TaskError> {
match &mut self.process {
None => Err(TaskError::new(
TaskErrorCode::ProcessTerminated,
String::from("Process is not running"),
)),
Some(p) => match p.write(input) {
Ok(()) => Ok(()),
Err(err) => Err(TaskError::new(
TaskErrorCode::IoError,
format!("Could not write to process stdin {}", err),
)),
},
}
}
/// ### kill
///
/// Kill running task
pub fn kill(&mut self) -> Result<(), TaskError> {
match &mut self.process {
None => Err(TaskError::new(
TaskErrorCode::ProcessTerminated,
String::from("Process is not running"),
)),
Some(p) => match p.kill() {
Ok(()) => Ok(()),
Err(()) => Err(TaskError::new(
TaskErrorCode::KillError,
String::from("It was not possible to kill process"),
)),
},
}
}
/// ### raise
///
/// Raise a signal on the process
pub fn raise(&mut self, signal: UnixSignal) -> Result<(), TaskError> {
match &mut self.process {
None => Err(TaskError::new(
TaskErrorCode::ProcessTerminated,
String::from("Process is not running"),
)),
Some(p) => match p.raise(signal) {
Ok(()) => Ok(()),
Err(()) => Err(TaskError::new(
TaskErrorCode::KillError,
String::from("It was not possible to send signal to process"),
)),
},
}
}
/// ### is_running
///
/// Returns whether the process is running. If the process has terminated, the exitcode will be set
pub fn is_running(&mut self) -> bool {
match &mut self.process {
Some(p) => match p.is_running() {
true => true,
false => {
self.exit_code = p.exit_status.clone();
false
}
},
None => false,
}
}
/// ### get_exitcode
///
/// Return task's exitcode
pub fn get_exitcode(&self) -> Option<u8> {
self.exit_code.clone()
}
/// ### redirect_output
///
/// Handle output redirections in a single method
/// NOTE: This method redirects output to pipes too
fn redirect_output(&mut self, redirection: Redirection, output: Option<String>, stdout: &mut String, stderr: &mut String) -> Result<(), TaskError> {
match redirection {
Redirection::Stdout => {
if output.is_some() {
//If relation is Pipe, write output to NEXT TASK, otherwise push to stdout string
if self.relation == TaskRelation::Pipe {
match self.next.as_mut().unwrap().write(output.unwrap()) {
Ok(()) => return Ok(()),
Err(err) => return Err(TaskError::new(TaskErrorCode::BrokenPipe, err.message))
}
} else {
stdout.push_str(&output.unwrap());
}
}
},
Redirection::Stderr => {
if output.is_some() {
stderr.push_str(&output.unwrap());
}
}
Redirection::File(file, file_mode) => {
if output.is_some() {
return self.redirect_to_file(file, file_mode, output.unwrap());
}
}
}
Ok(())
}
/// ### redirect_to_file
///
/// Redirect a certain output to a certain file
fn redirect_to_file(&self, file: String, file_mode: FileRedirectionType, out: String) -> Result<(), TaskError> {
match OpenOptions::new().create(true).write(true).append(file_mode == FileRedirectionType::Append).truncate(file_mode == FileRedirectionType::Truncate).open(file.as_str()) {
Ok(mut f) => {
if let Err(e) = write!(f, "{}", out) {
Err(TaskError::new(
TaskErrorCode::IoError,
format!("Could not write to file {}: {}", file, e),
))
} else {
Ok(())
}
}
Err(e) => Err(TaskError::new(
TaskErrorCode::IoError,
format!("Could not open file {}: {}", file, e),
)),
}
}
}
//@! Clone trait for Task
impl Clone for Task {
fn clone(&self) -> Task {
Task {
command: self.command.clone(),
process: None,
stdout_redirection: self.stdout_redirection.clone(),
stderr_redirection: self.stderr_redirection.clone(),
relation: self.relation,
exit_code: None,
next: match &self.next {
None => None,
Some(task) => Some(task.clone())
}
}
}
}
//@! Module Test
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io::Read;
use std::thread::sleep;
use std::time::Duration;
#[test]
fn test_task_new() {
let command: Vec<String> = vec![String::from("echo"), String::from("foobar")];
let task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Verify constructor
assert!(task.exit_code.is_none());
assert!(task.process.is_none());
assert!(task.next.is_none());
assert_eq!(task.relation, TaskRelation::Unrelated);
assert_eq!(task.stderr_redirection, Redirection::Stderr);
assert_eq!(task.stdout_redirection, Redirection::Stdout);
assert_eq!(task.command[0], String::from("echo"));
assert_eq!(task.command[1], String::from("foobar"));
}
#[test]
fn test_task_truncate() {
let command: Vec<String> = vec![String::from("echo"), String::from("foo")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("echo"), String::from("bar")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::And,
);
//Add pipe
let command: Vec<String> = vec![String::from("echo"), String::from("pippo")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::Or,
);
assert_eq!(task.relation, TaskRelation::And);
//Verify next is something
assert!(task.next.is_some());
//Reset next
assert_eq!(task.truncate(1), TaskRelation::Or);
assert_eq!(task.relation, TaskRelation::And);
assert!(task.next.is_some());
//Let's try to truncate again
assert_eq!(task.truncate(2), TaskRelation::Unrelated);
//Verify truncation
let task: Task = *task.next.unwrap();
assert_eq!(task.relation, TaskRelation::Unrelated);
assert!(task.next.is_none());
}
#[test]
fn test_task_start_run() {
let command: Vec<String> = vec![String::from("echo"), String::from("foobar")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("foobar\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
}
#[test]
fn test_task_start_failed() {
let command: Vec<String> = vec![String::from("thiscommandfails")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Start process
assert_eq!(
task.start().err().unwrap().code,
TaskErrorCode::CouldNotStartTask
);
}
#[test]
fn test_task_run_twice() {
let command: Vec<String> = vec![String::from("cat")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Try to Start process another time
assert_eq!(task.start().err().unwrap().code, TaskErrorCode::AlreadyRunning);
//Process should be still running
assert!(task.is_running());
//Kill process
assert!(task.kill().is_ok());
//Verify process terminated
assert!(!task.is_running());
//Exit code should be 9
assert_eq!(task.get_exitcode().unwrap(), 9);
}
#[test]
fn test_task_write() {
let command: Vec<String> = vec![String::from("cat")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Start process
assert!(task.start().is_ok());
//Write to process
assert!(task.write(String::from("hi there!\n")).is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read process output
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("hi there!\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should be still running
assert!(task.is_running());
//Kill process
assert!(task.kill().is_ok());
//Verify process terminated
assert!(!task.is_running());
//Try to write when the process has already terminated
assert!(task.write(String::from("hi there!\n")).is_err());
//Exit code should be 9
assert_eq!(task.get_exitcode().unwrap(), 9);
}
#[test]
fn test_task_kill() {
let command: Vec<String> = vec![String::from("yes")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Process should be still running
assert!(task.is_running());
//Kill process
assert!(task.raise(UnixSignal::Sigint).is_ok());
//Verify process terminated
assert!(!task.is_running());
//Exit code should be 9
assert_eq!(task.get_exitcode().unwrap(), 2);
}
#[test]
fn test_task_pipeline_simple() {
let command: Vec<String> = vec![String::from("echo"), String::from("foo")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("echo"), String::from("bar")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::And,
);
assert_eq!(task.relation, TaskRelation::And);
//Verify next is something
assert!(task.next.is_some());
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("foo\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
//Start next process
let mut task: Task = *task.next.unwrap();
//Verify next of second process is None
assert_eq!(task.relation, TaskRelation::Unrelated);
//Start second process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("bar\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
}
#[test]
fn test_task_pipeline_with_3_tasks() {
let command: Vec<String> = vec![String::from("echo"), String::from("foo")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("echo"), String::from("bar")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::And,
);
let command: Vec<String> = vec![String::from("echo"), String::from("pippo")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::And,
);
assert_eq!(task.relation, TaskRelation::And);
//Verify next is something
assert!(task.next.is_some());
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("foo\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
//@! Start SECOND process
let mut task: Task = *task.next.unwrap();
//Verify next of second process is None
assert_eq!(task.relation, TaskRelation::And);
//Start second process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("bar\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
//@! Start THIRD process
let mut task: Task = *task.next.unwrap();
//Verify next of second process is None
assert_eq!(task.relation, TaskRelation::Unrelated);
//Start second process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("pippo\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
}
#[test]
fn test_task_pipeline_with_pipe_mode() {
let command: Vec<String> = vec![String::from("echo"), String::from("foobar")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("head"), String::from("-n"), String::from("1")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::Pipe,
);
assert_eq!(task.relation, TaskRelation::Pipe);
//Verify next is something
assert!(task.next.is_some());
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr; output should be redirected to cat process, so should be None
let (stdout, stderr) = task.read().unwrap();
assert!(stdout.is_none());
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
let mut task: Task = *task.next.unwrap();
//Verify next task output is foobar
let (stdout, _stderr) = task.read().unwrap();
assert_eq!(stdout.unwrap(), String::from("foobar\n"));
//Wait 500ms
sleep(Duration::from_millis(500));
//2nd Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
}
#[test]
fn test_task_pipeline_with_pipe_mode_brokenpipe() {
let command: Vec<String> = vec![String::from("echo"), String::from("foo")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("echo"), String::from("bar")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::Pipe,
);
assert_eq!(task.relation, TaskRelation::Pipe);
//Verify next is something
assert!(task.next.is_some());
//Start process
assert!(task.start().is_ok());
//Wait 500ms
sleep(Duration::from_millis(500));
//@! Since the second process has terminated before the first, it'll return broken pipe
assert_eq!(task.read().err().unwrap().code, TaskErrorCode::BrokenPipe);
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
}
#[test]
fn test_task_pipeline_with_pipe_mode_failed() {
let command: Vec<String> = vec![String::from("echo"), String::from("foo")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("THISCOMMANDDOESNOTEXIST")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::Pipe,
);
assert_eq!(task.relation, TaskRelation::Pipe);
//Verify next is something
assert!(task.next.is_some());
//Start process
assert_eq!(task.start().err().unwrap().code, TaskErrorCode::BrokenPipe);
//Wait 100ms
sleep(Duration::from_millis(100));
//Process should not be running
assert!(!task.is_running());
}
#[test]
fn test_task_redirect_stdout_to_file() {
let command: Vec<String> = vec![String::from("echo"), String::from("foobar")];
let mut tmpfile = create_tmpfile();
let tmpfile_path: String = String::from(tmpfile.path().to_str().unwrap());
let mut task: Task = Task::new(
command,
Redirection::File(tmpfile_path, FileRedirectionType::Append),
Redirection::Stderr,
);
//Start second process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Stdout and stderr should be both none (Since output has been redirected to file)
assert!(stdout.is_none());
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
//Read file
let output: String = read_file(&mut tmpfile);
assert_eq!(output, String::from("foobar\n"));
}
#[test]
fn test_task_stderr() {
let command: Vec<String> = vec![
String::from("ping"),
String::from("8.8.8.8.8.8.8.8.8.8.8.8.8.8.8"),
];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Start second process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Stdout and stderr should be both none (Since output has been redirected to file)
assert!(stdout.is_none());
assert!(stderr.is_some());
assert!(stderr.unwrap().len() > 4); //Should at least be ping:
sleep(Duration::from_millis(100));
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert!(task.get_exitcode().unwrap() != 0); //Exitcode won't be 0
}
#[test]
fn test_task_redirect_stderr_to_file() {
let command: Vec<String> = vec![
String::from("ping"),
String::from("8.8.8.8.8.8.8.8.8.8.8.8.8.8.8"),
];
let mut tmpfile = create_tmpfile();
let tmpfile_path: String = String::from(tmpfile.path().to_str().unwrap());
let mut task: Task = Task::new(
command,
Redirection::Stdout,
Redirection::File(tmpfile_path, FileRedirectionType::Append),
);
//Start second process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Stdout and stderr should be both none (Since output has been redirected to file)
assert!(stdout.is_none());
assert!(stderr.is_none());
//Process should not be running anymore
sleep(Duration::from_millis(100));
assert!(!task.is_running());
//Get exitcode
assert!(task.get_exitcode().unwrap() != 0); //Exitcode won't be 0
//Read file
let output: String = read_file(&mut tmpfile);
println!("Stderr output: {}", output);
assert!(output.len() > 4); //Should at least be ping:
}
#[test]
fn test_task_kill_not_running() {
let command: Vec<String> = vec![String::from("yes")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//OOps, I forgot to start process
//Process should be still running
assert!(!task.is_running());
//Kill process
assert_eq!(
task.raise(UnixSignal::Sigint).err().unwrap().code,
TaskErrorCode::ProcessTerminated
);
assert_eq!(
task.kill().err().unwrap().code,
TaskErrorCode::ProcessTerminated
);
//Exit code should be 9
assert!(task.get_exitcode().is_none());
}
#[test]
fn test_task_clone() {
let command: Vec<String> = vec![String::from("echo"), String::from("foo")];
let mut task: Task = Task::new(command, Redirection::Stdout, Redirection::Stderr);
//Add pipe
let command: Vec<String> = vec![String::from("echo"), String::from("bar")];
task.new_pipeline(
command,
Redirection::Stdout,
Redirection::Stderr,
TaskRelation::And,
);
//Run task just to see if is cloned successfully
//Start process
assert!(task.start().is_ok());
//Wait 100ms
sleep(Duration::from_millis(100));
//Read stdout/stderr
let (stdout, stderr) = task.read().unwrap();
//Verify stdout
assert_eq!(stdout.unwrap(), String::from("foo\n"));
//Verify stderr
assert!(stderr.is_none());
//Process should not be running anymore
assert!(!task.is_running());
//Get exitcode
assert_eq!(task.get_exitcode().unwrap(), 0);
//Clone task
let clone: Task = task.clone();
//Verify clone
assert!(clone.exit_code.is_none());
assert!(clone.process.is_none());
assert!(clone.next.is_some());
assert_eq!(clone.relation, TaskRelation::And);
assert_eq!(clone.stderr_redirection, Redirection::Stderr);
assert_eq!(clone.stdout_redirection, Redirection::Stdout);
assert_eq!(clone.command[0], String::from("echo"));
assert_eq!(clone.command[1], String::from("foo"));
}
fn create_tmpfile() -> tempfile::NamedTempFile {
tempfile::NamedTempFile::new().unwrap()
}
fn read_file(tmpfile: &mut tempfile::NamedTempFile) -> String {
let file: &mut File = tmpfile.as_file_mut();
let mut out: String = String::new();
assert!(file.read_to_string(&mut out).is_ok());
out
}
}
| 37.691228 | 181 | 0.549773 |
5b8a5b184411d3c8a0cbcffd24780c2fc3fe8394 | 2,144 | //! Send a transfer to an address.
//!
//! Run with:
//!
//! ```
//! cargo run --example send_transfers
//! ```
use anyhow::Result;
use iota::{
transaction::bundled::{Address, BundledTransactionField},
client::Transfer,
crypto::ternary::Kerl,
signing::ternary::{TernarySeed, Seed},
ternary::{T1B1Buf, TryteBuf},
};
use iota_conversion::Trinary;
#[smol_potat::main]
async fn main() -> Result<()> {
// Prepare a vector of transfers
let mut transfers = Vec::new();
// Push the transfer to vector.
transfers.push(Transfer {
// Address is 81 trytes.
address: Address::from_inner_unchecked(
TryteBuf::try_from_str(
"RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVA",
)
.unwrap()
.as_trits()
.encode(),
),
// We are using a zero balance seed so we make a zero value transfer here
value: 0,
message: None,
tag: None,
});
// Create a client instance
iota::Client::add_node("https://nodes.comnet.thetangle.org")?;
// Call send_transfers api
// Below is just a dummy seed which just serves as an example.
// If you want to replace your own. It probably should be a seed with balance on comnet/devnet.
let res = iota::Client::send_transfers(Some(
&TernarySeed::<Kerl>::from_buf(
TryteBuf::try_from_str(
"RVORZ9SIIP9RCYMREUIXXVPQIPHVCNPQ9HZWYKFWYWZRE9JQKG9REPKIASHUUECPSQO9JT9XNMVKWYGVA",
)
.unwrap()
.as_trits()
.encode::<T1B1Buf>(),
)
.unwrap(),
))
// Input the transfers
.transfers(transfers)
// We are sending to comnet, so mwm should be 10. It's 14 by default if you don't call this.
.min_weight_magnitude(10)
// Sending to the node and receive the response
.send()
.await?;
// The response of send_transfers is vector of Transaction type. We choose the first one and see what is its bundle hash
println!("{:?}", res[0].bundle().to_inner().as_i8_slice().trytes());
Ok(())
}
| 31.072464 | 124 | 0.614739 |
eb0f6da2a04921675a15fe3bba210a8e9e534e6e | 761 | #[cfg(test)]
mod test {
use crate::BizActivity;
use rbatis::crud::{CRUDMut, CRUD};
use rbatis::rbatis::Rbatis;
#[tokio::test]
pub async fn test_use_driver_wrapper() {
fast_log::init_log("requests.log", 1000, log::Level::Info, None, true);
let rb = Rbatis::new();
rb.link("mysql://root:123456@localhost:3306/test")
.await
.unwrap();
let name = "test";
let w = rb
.new_wrapper()
.r#in("delete_flag", &[0, 1])
.and()
.ne("delete_flag", -1)
.do_if(!name.is_empty(), |w| w.and().like("name", name));
let r: Vec<BizActivity> = rb.fetch_list_by_wrapper(w).await.unwrap();
println!("done:{:?}", r);
}
}
| 29.269231 | 79 | 0.51774 |
0ee03bc4c6e00902d0026a78e23b8be1c63a0a5d | 21,085 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
///////////////////////////////////////////////////////////////////////////
// # Type combining
//
// There are four type combiners: equate, sub, lub, and glb. Each
// implements the trait `Combine` and contains methods for combining
// two instances of various things and yielding a new instance. These
// combiner methods always yield a `Result<T>`. There is a lot of
// common code for these operations, implemented as default methods on
// the `Combine` trait.
//
// Each operation may have side-effects on the inference context,
// though these can be unrolled using snapshots. On success, the
// LUB/GLB operations return the appropriate bound. The Eq and Sub
// operations generally return the first operand.
//
// ## Contravariance
//
// When you are relating two things which have a contravariant
// relationship, you should use `contratys()` or `contraregions()`,
// rather than inversing the order of arguments! This is necessary
// because the order of arguments is not relevant for LUB and GLB. It
// is also useful to track which value is the "expected" value in
// terms of error reporting.
use super::equate::Equate;
use super::glb::Glb;
use super::{InferCtxt, MiscVariable, TypeTrace};
use super::lub::Lub;
use super::sub::Sub;
use super::type_variable::TypeVariableValue;
use hir::def_id::DefId;
use ty::{IntType, UintType};
use ty::{self, Ty, TyCtxt};
use ty::error::TypeError;
use ty::relate::{self, Relate, RelateResult, TypeRelation};
use ty::subst::Substs;
use traits::{Obligation, PredicateObligations};
use syntax::ast;
use syntax_pos::Span;
#[derive(Clone)]
pub struct CombineFields<'infcx, 'gcx: 'infcx+'tcx, 'tcx: 'infcx> {
pub infcx: &'infcx InferCtxt<'infcx, 'gcx, 'tcx>,
pub trace: TypeTrace<'tcx>,
pub cause: Option<ty::relate::Cause>,
pub param_env: ty::ParamEnv<'tcx>,
pub obligations: PredicateObligations<'tcx>,
}
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)]
pub enum RelationDir {
SubtypeOf, SupertypeOf, EqTo
}
impl<'infcx, 'gcx, 'tcx> InferCtxt<'infcx, 'gcx, 'tcx> {
pub fn super_combine_tys<R>(&self,
relation: &mut R,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> RelateResult<'tcx, Ty<'tcx>>
where R: TypeRelation<'infcx, 'gcx, 'tcx>
{
let a_is_expected = relation.a_is_expected();
match (&a.sty, &b.sty) {
// Relate integral variables to other types
(&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => {
self.int_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| int_unification_error(a_is_expected, e))?;
Ok(a)
}
(&ty::Infer(ty::IntVar(v_id)), &ty::Int(v)) => {
self.unify_integral_variable(a_is_expected, v_id, IntType(v))
}
(&ty::Int(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
}
(&ty::Infer(ty::IntVar(v_id)), &ty::Uint(v)) => {
self.unify_integral_variable(a_is_expected, v_id, UintType(v))
}
(&ty::Uint(v), &ty::Infer(ty::IntVar(v_id))) => {
self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
}
// Relate floating-point variables to other types
(&ty::Infer(ty::FloatVar(a_id)), &ty::Infer(ty::FloatVar(b_id))) => {
self.float_unification_table
.borrow_mut()
.unify_var_var(a_id, b_id)
.map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
Ok(a)
}
(&ty::Infer(ty::FloatVar(v_id)), &ty::Float(v)) => {
self.unify_float_variable(a_is_expected, v_id, v)
}
(&ty::Float(v), &ty::Infer(ty::FloatVar(v_id))) => {
self.unify_float_variable(!a_is_expected, v_id, v)
}
// All other cases of inference are errors
(&ty::Infer(_), _) |
(_, &ty::Infer(_)) => {
Err(TypeError::Sorts(ty::relate::expected_found(relation, &a, &b)))
}
_ => {
ty::relate::super_relate_tys(relation, a, b)
}
}
}
fn unify_integral_variable(&self,
vid_is_expected: bool,
vid: ty::IntVid,
val: ty::IntVarValue)
-> RelateResult<'tcx, Ty<'tcx>>
{
self.int_unification_table
.borrow_mut()
.unify_var_value(vid, Some(val))
.map_err(|e| int_unification_error(vid_is_expected, e))?;
match val {
IntType(v) => Ok(self.tcx.mk_mach_int(v)),
UintType(v) => Ok(self.tcx.mk_mach_uint(v)),
}
}
fn unify_float_variable(&self,
vid_is_expected: bool,
vid: ty::FloatVid,
val: ast::FloatTy)
-> RelateResult<'tcx, Ty<'tcx>>
{
self.float_unification_table
.borrow_mut()
.unify_var_value(vid, Some(ty::FloatVarValue(val)))
.map_err(|e| float_unification_error(vid_is_expected, e))?;
Ok(self.tcx.mk_mach_float(val))
}
}
impl<'infcx, 'gcx, 'tcx> CombineFields<'infcx, 'gcx, 'tcx> {
pub fn tcx(&self) -> TyCtxt<'infcx, 'gcx, 'tcx> {
self.infcx.tcx
}
pub fn equate<'a>(&'a mut self, a_is_expected: bool) -> Equate<'a, 'infcx, 'gcx, 'tcx> {
Equate::new(self, a_is_expected)
}
pub fn sub<'a>(&'a mut self, a_is_expected: bool) -> Sub<'a, 'infcx, 'gcx, 'tcx> {
Sub::new(self, a_is_expected)
}
pub fn lub<'a>(&'a mut self, a_is_expected: bool) -> Lub<'a, 'infcx, 'gcx, 'tcx> {
Lub::new(self, a_is_expected)
}
pub fn glb<'a>(&'a mut self, a_is_expected: bool) -> Glb<'a, 'infcx, 'gcx, 'tcx> {
Glb::new(self, a_is_expected)
}
/// Here dir is either EqTo, SubtypeOf, or SupertypeOf. The
/// idea is that we should ensure that the type `a_ty` is equal
/// to, a subtype of, or a supertype of (respectively) the type
/// to which `b_vid` is bound.
///
/// Since `b_vid` has not yet been instantiated with a type, we
/// will first instantiate `b_vid` with a *generalized* version
/// of `a_ty`. Generalization introduces other inference
/// variables wherever subtyping could occur.
pub fn instantiate(&mut self,
a_ty: Ty<'tcx>,
dir: RelationDir,
b_vid: ty::TyVid,
a_is_expected: bool)
-> RelateResult<'tcx, ()>
{
use self::RelationDir::*;
// Get the actual variable that b_vid has been inferred to
debug_assert!(self.infcx.type_variables.borrow_mut().probe(b_vid).is_unknown());
debug!("instantiate(a_ty={:?} dir={:?} b_vid={:?})", a_ty, dir, b_vid);
// Generalize type of `a_ty` appropriately depending on the
// direction. As an example, assume:
//
// - `a_ty == &'x ?1`, where `'x` is some free region and `?1` is an
// inference variable,
// - and `dir` == `SubtypeOf`.
//
// Then the generalized form `b_ty` would be `&'?2 ?3`, where
// `'?2` and `?3` are fresh region/type inference
// variables. (Down below, we will relate `a_ty <: b_ty`,
// adding constraints like `'x: '?2` and `?1 <: ?3`.)
let Generalization { ty: b_ty, needs_wf } = self.generalize(a_ty, b_vid, dir)?;
debug!("instantiate(a_ty={:?}, dir={:?}, b_vid={:?}, generalized b_ty={:?})",
a_ty, dir, b_vid, b_ty);
self.infcx.type_variables.borrow_mut().instantiate(b_vid, b_ty);
if needs_wf {
self.obligations.push(Obligation::new(self.trace.cause.clone(),
self.param_env,
ty::Predicate::WellFormed(b_ty)));
}
// Finally, relate `b_ty` to `a_ty`, as described in previous comment.
//
// FIXME(#16847): This code is non-ideal because all these subtype
// relations wind up attributed to the same spans. We need
// to associate causes/spans with each of the relations in
// the stack to get this right.
match dir {
EqTo => self.equate(a_is_expected).relate(&a_ty, &b_ty),
SubtypeOf => self.sub(a_is_expected).relate(&a_ty, &b_ty),
SupertypeOf => self.sub(a_is_expected).relate_with_variance(
ty::Contravariant, &a_ty, &b_ty),
}?;
Ok(())
}
/// Attempts to generalize `ty` for the type variable `for_vid`.
/// This checks for cycle -- that is, whether the type `ty`
/// references `for_vid`. The `dir` is the "direction" for which we
/// a performing the generalization (i.e., are we producing a type
/// that can be used as a supertype etc).
///
/// Preconditions:
///
/// - `for_vid` is a "root vid"
fn generalize(&self,
ty: Ty<'tcx>,
for_vid: ty::TyVid,
dir: RelationDir)
-> RelateResult<'tcx, Generalization<'tcx>>
{
debug!("generalize(ty={:?}, for_vid={:?}, dir={:?}", ty, for_vid, dir);
// Determine the ambient variance within which `ty` appears.
// The surrounding equation is:
//
// ty [op] ty2
//
// where `op` is either `==`, `<:`, or `:>`. This maps quite
// naturally.
let ambient_variance = match dir {
RelationDir::EqTo => ty::Invariant,
RelationDir::SubtypeOf => ty::Covariant,
RelationDir::SupertypeOf => ty::Contravariant,
};
let mut generalize = Generalizer {
infcx: self.infcx,
span: self.trace.cause.span,
for_vid_sub_root: self.infcx.type_variables.borrow_mut().sub_root_var(for_vid),
ambient_variance,
needs_wf: false,
root_ty: ty,
};
let ty = match generalize.relate(&ty, &ty) {
Ok(ty) => ty,
Err(e) => {
debug!("generalize: failure {:?}", e);
return Err(e);
}
};
let needs_wf = generalize.needs_wf;
debug!("generalize: success {{ {:?}, {:?} }}", ty, needs_wf);
Ok(Generalization { ty, needs_wf })
}
}
struct Generalizer<'cx, 'gcx: 'cx+'tcx, 'tcx: 'cx> {
infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>,
/// Span, used when creating new type variables and things.
span: Span,
/// The vid of the type variable that is in the process of being
/// instantiated; if we find this within the type we are folding,
/// that means we would have created a cyclic type.
for_vid_sub_root: ty::TyVid,
/// Track the variance as we descend into the type.
ambient_variance: ty::Variance,
/// See the field `needs_wf` in `Generalization`.
needs_wf: bool,
/// The root type that we are generalizing. Used when reporting cycles.
root_ty: Ty<'tcx>,
}
/// Result from a generalization operation. This includes
/// not only the generalized type, but also a bool flag
/// indicating whether further WF checks are needed.
struct Generalization<'tcx> {
ty: Ty<'tcx>,
/// If true, then the generalized type may not be well-formed,
/// even if the source type is well-formed, so we should add an
/// additional check to enforce that it is. This arises in
/// particular around 'bivariant' type parameters that are only
/// constrained by a where-clause. As an example, imagine a type:
///
/// struct Foo<A, B> where A: Iterator<Item=B> {
/// data: A
/// }
///
/// here, `A` will be covariant, but `B` is
/// unconstrained. However, whatever it is, for `Foo` to be WF, it
/// must be equal to `A::Item`. If we have an input `Foo<?A, ?B>`,
/// then after generalization we will wind up with a type like
/// `Foo<?C, ?D>`. When we enforce that `Foo<?A, ?B> <: Foo<?C,
/// ?D>` (or `>:`), we will wind up with the requirement that `?A
/// <: ?C`, but no particular relationship between `?B` and `?D`
/// (after all, we do not know the variance of the normalized form
/// of `A::Item` with respect to `A`). If we do nothing else, this
/// may mean that `?D` goes unconstrained (as in #41677). So, in
/// this scenario where we create a new type variable in a
/// bivariant context, we set the `needs_wf` flag to true. This
/// will force the calling code to check that `WF(Foo<?C, ?D>)`
/// holds, which in turn implies that `?C::Item == ?D`. So once
/// `?C` is constrained, that should suffice to restrict `?D`.
needs_wf: bool,
}
impl<'cx, 'gcx, 'tcx> TypeRelation<'cx, 'gcx, 'tcx> for Generalizer<'cx, 'gcx, 'tcx> {
fn tcx(&self) -> TyCtxt<'cx, 'gcx, 'tcx> {
self.infcx.tcx
}
fn tag(&self) -> &'static str {
"Generalizer"
}
fn a_is_expected(&self) -> bool {
true
}
fn binders<T>(&mut self, a: &ty::Binder<T>, b: &ty::Binder<T>)
-> RelateResult<'tcx, ty::Binder<T>>
where T: Relate<'tcx>
{
Ok(ty::Binder::bind(self.relate(a.skip_binder(), b.skip_binder())?))
}
fn relate_item_substs(&mut self,
item_def_id: DefId,
a_subst: &'tcx Substs<'tcx>,
b_subst: &'tcx Substs<'tcx>)
-> RelateResult<'tcx, &'tcx Substs<'tcx>>
{
if self.ambient_variance == ty::Variance::Invariant {
// Avoid fetching the variance if we are in an invariant
// context; no need, and it can induce dependency cycles
// (e.g. #41849).
relate::relate_substs(self, None, a_subst, b_subst)
} else {
let opt_variances = self.tcx().variances_of(item_def_id);
relate::relate_substs(self, Some(&opt_variances), a_subst, b_subst)
}
}
fn relate_with_variance<T: Relate<'tcx>>(&mut self,
variance: ty::Variance,
a: &T,
b: &T)
-> RelateResult<'tcx, T>
{
let old_ambient_variance = self.ambient_variance;
self.ambient_variance = self.ambient_variance.xform(variance);
let result = self.relate(a, b);
self.ambient_variance = old_ambient_variance;
result
}
fn tys(&mut self, t: Ty<'tcx>, t2: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
assert_eq!(t, t2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
// Check to see whether the type we are genealizing references
// any other type variable related to `vid` via
// subtyping. This is basically our "occurs check", preventing
// us from creating infinitely sized types.
match t.sty {
ty::Infer(ty::TyVar(vid)) => {
let mut variables = self.infcx.type_variables.borrow_mut();
let vid = variables.root_var(vid);
let sub_vid = variables.sub_root_var(vid);
if sub_vid == self.for_vid_sub_root {
// If sub-roots are equal, then `for_vid` and
// `vid` are related via subtyping.
return Err(TypeError::CyclicTy(self.root_ty));
} else {
match variables.probe(vid) {
TypeVariableValue::Known { value: u } => {
drop(variables);
self.relate(&u, &u)
}
TypeVariableValue::Unknown { universe } => {
match self.ambient_variance {
// Invariant: no need to make a fresh type variable.
ty::Invariant => return Ok(t),
// Bivariant: make a fresh var, but we
// may need a WF predicate. See
// comment on `needs_wf` field for
// more info.
ty::Bivariant => self.needs_wf = true,
// Co/contravariant: this will be
// sufficiently constrained later on.
ty::Covariant | ty::Contravariant => (),
}
let origin = *variables.var_origin(vid);
let new_var_id = variables.new_var(universe, false, origin);
let u = self.tcx().mk_var(new_var_id);
debug!("generalize: replacing original vid={:?} with new={:?}",
vid, u);
return Ok(u);
}
}
}
}
ty::Infer(ty::IntVar(_)) |
ty::Infer(ty::FloatVar(_)) => {
// No matter what mode we are in,
// integer/floating-point types must be equal to be
// relatable.
Ok(t)
}
_ => {
relate::super_relate_tys(self, t, t)
}
}
}
fn regions(&mut self, r: ty::Region<'tcx>, r2: ty::Region<'tcx>)
-> RelateResult<'tcx, ty::Region<'tcx>> {
assert_eq!(r, r2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
match *r {
// Never make variables for regions bound within the type itself,
// nor for erased regions.
ty::ReLateBound(..) |
ty::ReErased => {
return Ok(r);
}
// Always make a fresh region variable for placeholder
// regions; the higher-ranked decision procedures rely on
// this.
ty::RePlaceholder(..) => { }
// For anything else, we make a region variable, unless we
// are *equating*, in which case it's just wasteful.
ty::ReEmpty |
ty::ReStatic |
ty::ReScope(..) |
ty::ReVar(..) |
ty::ReEarlyBound(..) |
ty::ReFree(..) => {
match self.ambient_variance {
ty::Invariant => return Ok(r),
ty::Bivariant | ty::Covariant | ty::Contravariant => (),
}
}
ty::ReCanonical(..) |
ty::ReClosureBound(..) => {
span_bug!(
self.span,
"encountered unexpected ReClosureBound: {:?}",
r,
);
}
}
// FIXME: This is non-ideal because we don't give a
// very descriptive origin for this region variable.
Ok(self.infcx.next_region_var(MiscVariable(self.span)))
}
}
pub trait RelateResultCompare<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
F: FnOnce() -> TypeError<'tcx>;
}
impl<'tcx, T:Clone + PartialEq> RelateResultCompare<'tcx, T> for RelateResult<'tcx, T> {
fn compare<F>(&self, t: T, f: F) -> RelateResult<'tcx, T> where
F: FnOnce() -> TypeError<'tcx>,
{
self.clone().and_then(|s| {
if s == t {
self.clone()
} else {
Err(f())
}
})
}
}
fn int_unification_error<'tcx>(a_is_expected: bool, v: (ty::IntVarValue, ty::IntVarValue))
-> TypeError<'tcx>
{
let (a, b) = v;
TypeError::IntMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
}
fn float_unification_error<'tcx>(a_is_expected: bool,
v: (ty::FloatVarValue, ty::FloatVarValue))
-> TypeError<'tcx>
{
let (ty::FloatVarValue(a), ty::FloatVarValue(b)) = v;
TypeError::FloatMismatch(ty::relate::expected_found_bool(a_is_expected, &a, &b))
}
| 39.264432 | 95 | 0.530235 |
ab3f83d34d59013b9a1f65104bb7d564f66b3bbb | 19,069 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
More runtime type reflection
*/
#[allow(missing_doc)];
use cast::transmute;
use char;
use container::Container;
use intrinsic;
use intrinsic::{TyDesc, TyVisitor, visit_tydesc};
use intrinsic::Opaque;
use io::{Writer, WriterUtil};
use iterator::IteratorUtil;
use libc::c_void;
use managed;
use ptr;
use reflect;
use reflect::{MovePtr, align};
use str::StrSlice;
use to_str::ToStr;
use vec::raw::{VecRepr, SliceRepr};
use vec;
use vec::{OwnedVector, UnboxedVecRepr};
#[cfg(test)] use io;
pub use managed::raw::BoxRepr;
/// Helpers
trait EscapedCharWriter {
fn write_escaped_char(&self, ch: char);
}
impl EscapedCharWriter for @Writer {
fn write_escaped_char(&self, ch: char) {
match ch {
'\t' => self.write_str("\\t"),
'\r' => self.write_str("\\r"),
'\n' => self.write_str("\\n"),
'\\' => self.write_str("\\\\"),
'\'' => self.write_str("\\'"),
'"' => self.write_str("\\\""),
'\x20'..'\x7e' => self.write_char(ch),
_ => {
// FIXME #4423: This is inefficient because it requires a
// malloc.
self.write_str(char::escape_unicode(ch))
}
}
}
}
/// Representations
trait Repr {
fn write_repr(&self, writer: @Writer);
}
impl Repr for () {
fn write_repr(&self, writer: @Writer) { writer.write_str("()"); }
}
impl Repr for bool {
fn write_repr(&self, writer: @Writer) {
writer.write_str(if *self { "true" } else { "false" })
}
}
impl Repr for int {
fn write_repr(&self, writer: @Writer) { writer.write_int(*self); }
}
impl Repr for i8 {
fn write_repr(&self, writer: @Writer) { writer.write_int(*self as int); }
}
impl Repr for i16 {
fn write_repr(&self, writer: @Writer) { writer.write_int(*self as int); }
}
impl Repr for i32 {
fn write_repr(&self, writer: @Writer) { writer.write_int(*self as int); }
}
impl Repr for i64 {
// FIXME #4424: This can lose precision.
fn write_repr(&self, writer: @Writer) { writer.write_int(*self as int); }
}
impl Repr for uint {
fn write_repr(&self, writer: @Writer) { writer.write_uint(*self); }
}
impl Repr for u8 {
fn write_repr(&self, writer: @Writer) {
writer.write_uint(*self as uint);
}
}
impl Repr for u16 {
fn write_repr(&self, writer: @Writer) {
writer.write_uint(*self as uint);
}
}
impl Repr for u32 {
fn write_repr(&self, writer: @Writer) {
writer.write_uint(*self as uint);
}
}
impl Repr for u64 {
// FIXME #4424: This can lose precision.
fn write_repr(&self, writer: @Writer) {
writer.write_uint(*self as uint);
}
}
impl Repr for float {
// FIXME #4423: This mallocs.
fn write_repr(&self, writer: @Writer) { writer.write_str(self.to_str()); }
}
impl Repr for f32 {
// FIXME #4423 This mallocs.
fn write_repr(&self, writer: @Writer) { writer.write_str(self.to_str()); }
}
impl Repr for f64 {
// FIXME #4423: This mallocs.
fn write_repr(&self, writer: @Writer) { writer.write_str(self.to_str()); }
}
impl Repr for char {
fn write_repr(&self, writer: @Writer) { writer.write_char(*self); }
}
// New implementation using reflect::MovePtr
enum VariantState {
SearchingFor(int),
Matched,
AlreadyFound
}
pub struct ReprVisitor {
ptr: @mut *c_void,
ptr_stk: @mut ~[*c_void],
var_stk: @mut ~[VariantState],
writer: @Writer
}
pub fn ReprVisitor(ptr: *c_void, writer: @Writer) -> ReprVisitor {
ReprVisitor {
ptr: @mut ptr,
ptr_stk: @mut ~[],
var_stk: @mut ~[],
writer: writer,
}
}
impl MovePtr for ReprVisitor {
#[inline]
fn move_ptr(&self, adjustment: &fn(*c_void) -> *c_void) {
*self.ptr = adjustment(*self.ptr);
}
fn push_ptr(&self) {
self.ptr_stk.push(*self.ptr);
}
fn pop_ptr(&self) {
*self.ptr = self.ptr_stk.pop();
}
}
impl ReprVisitor {
// Various helpers for the TyVisitor impl
#[inline]
pub fn get<T>(&self, f: &fn(&T)) -> bool {
unsafe {
f(transmute::<*c_void,&T>(*self.ptr));
}
true
}
#[inline]
pub fn visit_inner(&self, inner: *TyDesc) -> bool {
self.visit_ptr_inner(*self.ptr, inner)
}
#[inline]
pub fn visit_ptr_inner(&self, ptr: *c_void, inner: *TyDesc) -> bool {
unsafe {
let u = ReprVisitor(ptr, self.writer);
let v = reflect::MovePtrAdaptor(u);
visit_tydesc(inner, @v as @TyVisitor);
true
}
}
#[inline]
pub fn write<T:Repr>(&self) -> bool {
do self.get |v:&T| {
v.write_repr(self.writer);
}
}
pub fn write_escaped_slice(&self, slice: &str) {
self.writer.write_char('"');
for slice.iter().advance |ch| {
self.writer.write_escaped_char(ch);
}
self.writer.write_char('"');
}
pub fn write_mut_qualifier(&self, mtbl: uint) {
if mtbl == 0 {
self.writer.write_str("mut ");
} else if mtbl == 1 {
// skip, this is ast::m_imm
} else {
assert_eq!(mtbl, 2);
self.writer.write_str("const ");
}
}
pub fn write_vec_range(&self,
mtbl: uint,
ptr: *u8,
len: uint,
inner: *TyDesc)
-> bool {
let mut p = ptr;
let end = ptr::offset(p, len);
let (sz, al) = unsafe { ((*inner).size, (*inner).align) };
self.writer.write_char('[');
let mut first = true;
while (p as uint) < (end as uint) {
if first {
first = false;
} else {
self.writer.write_str(", ");
}
self.write_mut_qualifier(mtbl);
self.visit_ptr_inner(p as *c_void, inner);
p = align(ptr::offset(p, sz) as uint, al) as *u8;
}
self.writer.write_char(']');
true
}
pub fn write_unboxed_vec_repr(&self,
mtbl: uint,
v: &UnboxedVecRepr,
inner: *TyDesc)
-> bool {
self.write_vec_range(mtbl, ptr::to_unsafe_ptr(&v.data),
v.fill, inner)
}
}
impl TyVisitor for ReprVisitor {
fn visit_bot(&self) -> bool {
self.writer.write_str("!");
true
}
fn visit_nil(&self) -> bool { self.write::<()>() }
fn visit_bool(&self) -> bool { self.write::<bool>() }
fn visit_int(&self) -> bool { self.write::<int>() }
fn visit_i8(&self) -> bool { self.write::<i8>() }
fn visit_i16(&self) -> bool { self.write::<i16>() }
fn visit_i32(&self) -> bool { self.write::<i32>() }
fn visit_i64(&self) -> bool { self.write::<i64>() }
fn visit_uint(&self) -> bool { self.write::<uint>() }
fn visit_u8(&self) -> bool { self.write::<u8>() }
fn visit_u16(&self) -> bool { self.write::<u16>() }
fn visit_u32(&self) -> bool { self.write::<u32>() }
fn visit_u64(&self) -> bool { self.write::<u64>() }
fn visit_float(&self) -> bool { self.write::<float>() }
fn visit_f32(&self) -> bool { self.write::<f32>() }
fn visit_f64(&self) -> bool { self.write::<f64>() }
fn visit_char(&self) -> bool {
do self.get::<char> |&ch| {
self.writer.write_char('\'');
self.writer.write_escaped_char(ch);
self.writer.write_char('\'');
}
}
// Type no longer exists, vestigial function.
fn visit_str(&self) -> bool { fail!(); }
fn visit_estr_box(&self) -> bool {
do self.get::<@str> |s| {
self.writer.write_char('@');
self.write_escaped_slice(*s);
}
}
fn visit_estr_uniq(&self) -> bool {
do self.get::<~str> |s| {
self.writer.write_char('~');
self.write_escaped_slice(*s);
}
}
fn visit_estr_slice(&self) -> bool {
do self.get::<&str> |s| {
self.write_escaped_slice(*s);
}
}
// Type no longer exists, vestigial function.
fn visit_estr_fixed(&self, _n: uint, _sz: uint,
_align: uint) -> bool { fail!(); }
fn visit_box(&self, mtbl: uint, inner: *TyDesc) -> bool {
self.writer.write_char('@');
self.write_mut_qualifier(mtbl);
do self.get::<&managed::raw::BoxRepr> |b| {
let p = ptr::to_unsafe_ptr(&b.data) as *c_void;
self.visit_ptr_inner(p, inner);
}
}
fn visit_uniq(&self, mtbl: uint, inner: *TyDesc) -> bool {
self.writer.write_char('~');
self.write_mut_qualifier(mtbl);
do self.get::<&managed::raw::BoxRepr> |b| {
let p = ptr::to_unsafe_ptr(&b.data) as *c_void;
self.visit_ptr_inner(p, inner);
}
}
fn visit_ptr(&self, _mtbl: uint, _inner: *TyDesc) -> bool {
do self.get::<*c_void> |p| {
self.writer.write_str(fmt!("(0x%x as *())",
*p as uint));
}
}
fn visit_rptr(&self, mtbl: uint, inner: *TyDesc) -> bool {
self.writer.write_char('&');
self.write_mut_qualifier(mtbl);
do self.get::<*c_void> |p| {
self.visit_ptr_inner(*p, inner);
}
}
// Type no longer exists, vestigial function.
fn visit_vec(&self, _mtbl: uint, _inner: *TyDesc) -> bool { fail!(); }
fn visit_unboxed_vec(&self, mtbl: uint, inner: *TyDesc) -> bool {
do self.get::<vec::UnboxedVecRepr> |b| {
self.write_unboxed_vec_repr(mtbl, b, inner);
}
}
fn visit_evec_box(&self, mtbl: uint, inner: *TyDesc) -> bool {
do self.get::<&VecRepr> |b| {
self.writer.write_char('@');
self.write_unboxed_vec_repr(mtbl, &b.unboxed, inner);
}
}
fn visit_evec_uniq(&self, mtbl: uint, inner: *TyDesc) -> bool {
do self.get::<&VecRepr> |b| {
self.writer.write_char('~');
self.write_unboxed_vec_repr(mtbl, &b.unboxed, inner);
}
}
fn visit_evec_slice(&self, mtbl: uint, inner: *TyDesc) -> bool {
do self.get::<SliceRepr> |s| {
self.writer.write_char('&');
self.write_vec_range(mtbl, s.data, s.len, inner);
}
}
fn visit_evec_fixed(&self, _n: uint, sz: uint, _align: uint,
mtbl: uint, inner: *TyDesc) -> bool {
do self.get::<u8> |b| {
self.write_vec_range(mtbl, ptr::to_unsafe_ptr(b), sz, inner);
}
}
fn visit_enter_rec(&self, _n_fields: uint,
_sz: uint, _align: uint) -> bool {
self.writer.write_char('{');
true
}
fn visit_rec_field(&self, i: uint, name: &str,
mtbl: uint, inner: *TyDesc) -> bool {
if i != 0 {
self.writer.write_str(", ");
}
self.write_mut_qualifier(mtbl);
self.writer.write_str(name);
self.writer.write_str(": ");
self.visit_inner(inner);
true
}
fn visit_leave_rec(&self, _n_fields: uint,
_sz: uint, _align: uint) -> bool {
self.writer.write_char('}');
true
}
fn visit_enter_class(&self, _n_fields: uint,
_sz: uint, _align: uint) -> bool {
self.writer.write_char('{');
true
}
fn visit_class_field(&self, i: uint, name: &str,
mtbl: uint, inner: *TyDesc) -> bool {
if i != 0 {
self.writer.write_str(", ");
}
self.write_mut_qualifier(mtbl);
self.writer.write_str(name);
self.writer.write_str(": ");
self.visit_inner(inner);
true
}
fn visit_leave_class(&self, _n_fields: uint,
_sz: uint, _align: uint) -> bool {
self.writer.write_char('}');
true
}
fn visit_enter_tup(&self, _n_fields: uint,
_sz: uint, _align: uint) -> bool {
self.writer.write_char('(');
true
}
fn visit_tup_field(&self, i: uint, inner: *TyDesc) -> bool {
if i != 0 {
self.writer.write_str(", ");
}
self.visit_inner(inner);
true
}
fn visit_leave_tup(&self, _n_fields: uint,
_sz: uint, _align: uint) -> bool {
if _n_fields == 1 {
self.writer.write_char(',');
}
self.writer.write_char(')');
true
}
fn visit_enter_enum(&self,
_n_variants: uint,
get_disr: extern unsafe fn(ptr: *Opaque) -> int,
_sz: uint,
_align: uint) -> bool {
let var_stk: &mut ~[VariantState] = self.var_stk;
let disr = unsafe {
get_disr(transmute(*self.ptr))
};
var_stk.push(SearchingFor(disr));
true
}
fn visit_enter_enum_variant(&self, _variant: uint,
disr_val: int,
n_fields: uint,
name: &str) -> bool {
let mut write = false;
match self.var_stk.pop() {
SearchingFor(sought) => {
if disr_val == sought {
self.var_stk.push(Matched);
write = true;
} else {
self.var_stk.push(SearchingFor(sought));
}
}
Matched | AlreadyFound => {
self.var_stk.push(AlreadyFound);
}
}
if write {
self.writer.write_str(name);
if n_fields > 0 {
self.writer.write_char('(');
}
}
true
}
fn visit_enum_variant_field(&self,
i: uint,
_offset: uint,
inner: *TyDesc)
-> bool {
match self.var_stk[self.var_stk.len() - 1] {
Matched => {
if i != 0 {
self.writer.write_str(", ");
}
if ! self.visit_inner(inner) {
return false;
}
}
_ => ()
}
true
}
fn visit_leave_enum_variant(&self, _variant: uint,
_disr_val: int,
n_fields: uint,
_name: &str) -> bool {
match self.var_stk[self.var_stk.len() - 1] {
Matched => {
if n_fields > 0 {
self.writer.write_char(')');
}
}
_ => ()
}
true
}
fn visit_leave_enum(&self,
_n_variants: uint,
_get_disr: extern unsafe fn(ptr: *Opaque) -> int,
_sz: uint,
_align: uint)
-> bool {
let var_stk: &mut ~[VariantState] = self.var_stk;
match var_stk.pop() {
SearchingFor(*) => fail!("enum value matched no variant"),
_ => true
}
}
fn visit_enter_fn(&self, _purity: uint, _proto: uint,
_n_inputs: uint, _retstyle: uint) -> bool { true }
fn visit_fn_input(&self, _i: uint, _mode: uint, _inner: *TyDesc) -> bool {
true
}
fn visit_fn_output(&self, _retstyle: uint, _inner: *TyDesc) -> bool {
true
}
fn visit_leave_fn(&self, _purity: uint, _proto: uint,
_n_inputs: uint, _retstyle: uint) -> bool { true }
fn visit_trait(&self) -> bool { true }
fn visit_var(&self) -> bool { true }
fn visit_var_integral(&self) -> bool { true }
fn visit_param(&self, _i: uint) -> bool { true }
fn visit_self(&self) -> bool { true }
fn visit_type(&self) -> bool { true }
fn visit_opaque_box(&self) -> bool {
self.writer.write_char('@');
do self.get::<&managed::raw::BoxRepr> |b| {
let p = ptr::to_unsafe_ptr(&b.data) as *c_void;
self.visit_ptr_inner(p, b.header.type_desc);
}
}
// Type no longer exists, vestigial function.
fn visit_constr(&self, _inner: *TyDesc) -> bool { fail!(); }
fn visit_closure_ptr(&self, _ck: uint) -> bool { true }
}
pub fn write_repr<T>(writer: @Writer, object: &T) {
unsafe {
let ptr = ptr::to_unsafe_ptr(object) as *c_void;
let tydesc = intrinsic::get_tydesc::<T>();
let u = ReprVisitor(ptr, writer);
let v = reflect::MovePtrAdaptor(u);
visit_tydesc(tydesc, @v as @TyVisitor)
}
}
#[cfg(test)]
struct P {a: int, b: float}
#[test]
fn test_repr() {
fn exact_test<T>(t: &T, e:&str) {
let s : &str = io::with_str_writer(|w| write_repr(w, t));
if s != e {
error!("expected '%s', got '%s'",
e, s);
}
assert_eq!(s, e);
}
exact_test(&10, "10");
exact_test(&true, "true");
exact_test(&false, "false");
exact_test(&1.234, "1.234");
exact_test(&(&"hello"), "\"hello\"");
exact_test(&(@"hello"), "@\"hello\"");
exact_test(&(~"he\u10f3llo"), "~\"he\\u10f3llo\"");
// FIXME #4210: the mut fields are a bit off here.
exact_test(&(@10), "@10");
exact_test(&(@mut 10), "@10");
exact_test(&(~10), "~10");
exact_test(&(&10), "&10");
let mut x = 10;
exact_test(&(&mut x), "&mut 10");
exact_test(&(1,), "(1,)");
exact_test(&(@[1,2,3,4,5,6,7,8]),
"@[1, 2, 3, 4, 5, 6, 7, 8]");
exact_test(&(@[1u8,2u8,3u8,4u8]),
"@[1, 2, 3, 4]");
exact_test(&(@["hi", "there"]),
"@[\"hi\", \"there\"]");
exact_test(&(~["hi", "there"]),
"~[\"hi\", \"there\"]");
exact_test(&(&["hi", "there"]),
"&[\"hi\", \"there\"]");
exact_test(&(P{a:10, b:1.234}),
"{a: 10, b: 1.234}");
exact_test(&(@P{a:10, b:1.234}),
"@{a: 10, b: 1.234}");
exact_test(&(~P{a:10, b:1.234}),
"~{a: 10, b: 1.234}");
exact_test(&(10_u8, ~"hello"),
"(10, ~\"hello\")");
exact_test(&(10_u16, ~"hello"),
"(10, ~\"hello\")");
exact_test(&(10_u32, ~"hello"),
"(10, ~\"hello\")");
exact_test(&(10_u64, ~"hello"),
"(10, ~\"hello\")");
}
| 29.427469 | 78 | 0.503645 |
01f0f901d61c773e084e41a786b80745b4ccdad0 | 6,784 | use std::fmt;
use std::iter::FusedIterator;
use std::mem::ManuallyDrop;
use std::ops::{Index, IndexMut};
use std::ptr::NonNull;
pub struct GslVec(NonNull<gsl_sys::gsl_vector>);
impl GslVec {
pub fn new(len: usize) -> Self {
let ptr = unsafe { gsl_sys::gsl_vector_calloc(len) };
let ptr = NonNull::new(ptr).expect("out of memory");
Self(ptr)
}
pub fn len(&self) -> usize {
self.as_ref().size
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn get(&self, index: usize) -> Option<&f64> {
if index < self.len() {
Some(unsafe { &*gsl_sys::gsl_vector_const_ptr(self.as_raw(), index) })
} else {
None
}
}
pub fn get_mut(&self, index: usize) -> Option<&mut f64> {
if index < self.len() {
Some(unsafe { &mut *gsl_sys::gsl_vector_ptr(self.as_raw(), index) })
} else {
None
}
}
pub fn fill(&mut self, x: f64) {
unsafe { gsl_sys::gsl_vector_set_all(self.as_raw(), x) }
}
pub fn iter(&self) -> GslVecIter<'_> {
GslVecIter { vec: self, curr: 0 }
}
pub fn as_slice(&self) -> &[f64] {
unsafe { as_slice(self.as_raw()) }
}
pub fn as_mut_slice(&mut self) -> &mut [f64] {
unsafe { as_mut_slice(self.as_raw()) }
}
pub fn as_raw(&self) -> *mut gsl_sys::gsl_vector {
self.0.as_ptr()
}
pub fn into_raw(v: Self) -> *mut gsl_sys::gsl_vector {
ManuallyDrop::new(v).as_raw()
}
/// # Safety
///
/// Given pointer must be a valid allocation of GSL vector produced either
/// by [`into_raw`](Self::into_raw) or by an external call to
/// `gsl_vector_alloc` or `gsl_vector_calloc`.
pub unsafe fn from_raw(raw: *mut gsl_sys::gsl_vector) -> Self {
let ptr = NonNull::new(raw).expect("invalid pointer");
Self(ptr)
}
fn as_ref(&self) -> &gsl_sys::gsl_vector {
unsafe { self.0.as_ref() }
}
}
pub(crate) unsafe fn as_slice<'a>(ptr: *const gsl_sys::gsl_vector) -> &'a [f64] {
std::slice::from_raw_parts((*ptr).data, (*ptr).size)
}
pub(crate) unsafe fn as_mut_slice<'a>(ptr: *mut gsl_sys::gsl_vector) -> &'a mut [f64] {
std::slice::from_raw_parts_mut((*ptr).data, (*ptr).size)
}
impl Drop for GslVec {
fn drop(&mut self) {
unsafe { gsl_sys::gsl_vector_free(self.as_raw()) }
}
}
impl From<&[f64]> for GslVec {
fn from(s: &[f64]) -> Self {
let ptr = unsafe { gsl_sys::gsl_vector_alloc(s.len()) };
let ptr = NonNull::new(ptr).expect("out of memory");
for (i, v) in s.iter().copied().enumerate() {
unsafe {
gsl_sys::gsl_vector_set(ptr.as_ptr(), i, v);
}
}
Self(ptr)
}
}
impl From<GslVec> for Vec<f64> {
fn from(v: GslVec) -> Self {
v.iter().copied().collect()
}
}
impl From<Vec<f64>> for GslVec {
fn from(v: Vec<f64>) -> Self {
Self::from(v.as_slice())
}
}
impl Index<usize> for GslVec {
type Output = f64;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("index out of bounds")
}
}
impl IndexMut<usize> for GslVec {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_mut(index).expect("index out of bounds")
}
}
impl Clone for GslVec {
fn clone(&self) -> Self {
let cloned = Self::new(self.len());
unsafe {
gsl_sys::gsl_vector_memcpy(cloned.as_raw(), self.as_raw());
}
cloned
}
}
impl PartialEq for GslVec {
fn eq(&self, other: &Self) -> bool {
unsafe { gsl_sys::gsl_vector_equal(self.as_raw(), other.as_raw()) == 1 }
}
}
impl Eq for GslVec {}
impl fmt::Debug for GslVec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GslVec([")?;
let mut it = self.iter();
if let Some(v) = it.next() {
write!(f, "{}", v)?;
}
for v in it {
write!(f, ", {}", v)?;
}
write!(f, "])")
}
}
// SAFETY: GslVec follows standard rules for aliasing. Thus, Sync invariants are
// checked by Rust borrow checker. Send is satisfied, because GslVec does not
// implement Copy and its Clone implementation does not share a single
// allocation.
unsafe impl Send for GslVec {}
unsafe impl Sync for GslVec {}
pub struct GslVecIter<'a> {
vec: &'a GslVec,
curr: usize,
}
impl GslVecIter<'_> {
fn remaining(&self) -> usize {
self.vec.len() - self.curr
}
fn consume(&mut self) {
self.curr = self.vec.len()
}
}
impl<'a> Iterator for GslVecIter<'a> {
type Item = &'a f64;
fn next(&mut self) -> Option<Self::Item> {
let curr = self.curr;
self.curr = std::cmp::min(curr + 1, self.vec.len());
self.vec.get(curr)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let r = self.remaining();
(r, Some(r))
}
fn count(mut self) -> usize
where
Self: Sized,
{
let r = self.remaining();
self.consume();
r
}
fn last(mut self) -> Option<Self::Item>
where
Self: Sized,
{
if !self.vec.is_empty() {
let last = &self.vec[self.vec.len() - 1];
self.consume();
Some(last)
} else {
None
}
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.curr += n;
self.vec.get(self.curr)
}
}
impl ExactSizeIterator for GslVecIter<'_> {}
impl FusedIterator for GslVecIter<'_> {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic() {
let mut x = GslVec::new(3);
assert_eq!(x.len(), 3);
assert_eq!(x[0], 0.0);
x[0] = 3.0;
assert_eq!(x[0], 3.0);
}
#[test]
fn slice() {
let x = GslVec::from(&[3.0, 2.0, 1.0][..]);
assert_eq!(x.as_slice(), &[3.0, 2.0, 1.0]);
}
#[test]
fn fill() {
let mut x = GslVec::new(3);
x.fill(3.0);
assert_eq!(x.as_slice(), &[3.0, 3.0, 3.0]);
}
#[test]
fn iter() {
let x = GslVec::from(&[3.0, 2.0, 1.0][..]);
let mut it = x.iter();
assert_eq!(it.next(), Some(&3.0));
assert_eq!(it.next(), Some(&2.0));
assert_eq!(it.next(), Some(&1.0));
assert_eq!(it.next(), None);
}
#[test]
fn clone() {
let x = GslVec::from(&[3.0, 2.0, 1.0][..]);
let y = x.clone();
assert_ne!(x.as_raw(), y.as_raw());
}
#[test]
fn equality() {
let x = GslVec::from(&[3.0, 2.0, 1.0][..]);
let mut y = x.clone();
assert_eq!(x, y);
y[0] = 42.0;
assert_ne!(x, y);
}
}
| 23.07483 | 87 | 0.521079 |
29f6e550178eabf84e361dc0754f1c7d8624a1ce | 8,842 | use std::collections::VecDeque;
#[derive(Debug)]
enum StepResult {
Continue,
InputRequired,
Halted,
}
#[derive(Debug, PartialEq)]
pub enum YieldReason {
InputRequired,
Halted,
}
#[derive(Debug)]
pub struct IntCodeEmulator {
ram: Vec<i64>,
pointer: usize,
base: i64,
stdin: VecDeque<i64>,
stdout: VecDeque<i64>,
}
impl IntCodeEmulator {
pub fn new(program: Vec<i64>) -> IntCodeEmulator {
IntCodeEmulator {
ram: program,
pointer: 0,
base: 0,
stdin: VecDeque::new(),
stdout: VecDeque::new(),
}
}
pub fn from_input(input: &str) -> IntCodeEmulator {
let program = IntCodeEmulator::parse_input(input);
IntCodeEmulator::new(program)
}
pub fn parse_input(input: &str) -> Vec<i64> {
input
.trim()
.split(',')
.map(|l| l.parse().expect("Unable to parse input"))
.collect()
}
pub fn ram(&self) -> &Vec<i64> {
&self.ram
}
pub fn stdin(&mut self) -> &mut VecDeque<i64> {
&mut self.stdin
}
pub fn stdout(&mut self) -> &mut VecDeque<i64> {
&mut self.stdout
}
pub fn execute(&mut self) {
loop {
match self.step() {
StepResult::Continue => continue,
StepResult::InputRequired => panic!("Input required but none received"),
StepResult::Halted => break,
}
}
}
pub fn execute_until_yield(&mut self) -> YieldReason {
loop {
match self.step() {
StepResult::Continue => continue,
StepResult::InputRequired => return YieldReason::InputRequired,
StepResult::Halted => return YieldReason::Halted,
}
}
}
fn step(&mut self) -> StepResult {
let program = &mut self.ram;
let base = &mut self.base;
let instruction = Instruction::parse(program, self.pointer);
let steps = instruction.steps();
match instruction {
Instruction::Add(left, right, dest) => dest.write(
program,
*base,
left.read(program, *base) + right.read(program, *base),
),
Instruction::Multiply(left, right, dest) => dest.write(
program,
*base,
left.read(program, *base) * right.read(program, *base),
),
Instruction::Input(dest) => {
let input = match self.stdin.pop_front() {
None => return StepResult::InputRequired,
Some(v) => v,
};
dest.write(program, *base, input);
}
Instruction::Output(src) => {
self.stdout.push_back(src.read(program, *base));
}
Instruction::JumpTrue(condition, dest) => {
if condition.read(program, *base) != 0 {
self.pointer = dest.read(program, *base) as usize;
return StepResult::Continue;
}
}
Instruction::JumpFalse(condition, dest) => {
if condition.read(program, *base) == 0 {
self.pointer = dest.read(program, *base) as usize;
return StepResult::Continue;
}
}
Instruction::LessThan(left, right, dest) => {
dest.write(
program,
*base,
if left.read(program, *base) < right.read(program, *base) {
1
} else {
0
},
);
}
Instruction::Equals(left, right, dest) => {
dest.write(
program,
*base,
if left.read(program, *base) == right.read(program, *base) {
1
} else {
0
},
);
}
Instruction::AdjustBase(offset) => {
*base += offset.read(program, *base);
}
Instruction::Halt => return StepResult::Halted,
}
self.pointer += steps;
StepResult::Continue
}
}
#[derive(Debug)]
enum Instruction {
Add(ReadValue, ReadValue, WriteValue),
Multiply(ReadValue, ReadValue, WriteValue),
Input(WriteValue),
Output(ReadValue),
JumpTrue(ReadValue, ReadValue),
JumpFalse(ReadValue, ReadValue),
LessThan(ReadValue, ReadValue, WriteValue),
Equals(ReadValue, ReadValue, WriteValue),
AdjustBase(ReadValue),
Halt,
}
impl Instruction {
fn parse(program: &[i64], pointer: usize) -> Instruction {
let opcode = program[pointer];
let mode1 = (opcode / 100) % 10;
let mode2 = (opcode / 1000) % 10;
let mode3 = (opcode / 10000) % 10;
match opcode % 100 {
1 => Instruction::Add(
ReadValue::new(program[pointer + 1], mode1),
ReadValue::new(program[pointer + 2], mode2),
WriteValue::new(program[pointer + 3], mode3),
),
2 => Instruction::Multiply(
ReadValue::new(program[pointer + 1], mode1),
ReadValue::new(program[pointer + 2], mode2),
WriteValue::new(program[pointer + 3], mode3),
),
3 => Instruction::Input(WriteValue::new(program[pointer + 1], mode1)),
4 => Instruction::Output(ReadValue::new(program[pointer + 1], mode1)),
5 => Instruction::JumpTrue(
ReadValue::new(program[pointer + 1], mode1),
ReadValue::new(program[pointer + 2], mode2),
),
6 => Instruction::JumpFalse(
ReadValue::new(program[pointer + 1], mode1),
ReadValue::new(program[pointer + 2], mode2),
),
7 => Instruction::LessThan(
ReadValue::new(program[pointer + 1], mode1),
ReadValue::new(program[pointer + 2], mode2),
WriteValue::new(program[pointer + 3], mode3),
),
8 => Instruction::Equals(
ReadValue::new(program[pointer + 1], mode1),
ReadValue::new(program[pointer + 2], mode2),
WriteValue::new(program[pointer + 3], mode3),
),
9 => Instruction::AdjustBase(ReadValue::new(program[pointer + 1], mode1)),
99 => Instruction::Halt,
_ => panic!("Unknown opcode {} at pointer {}", opcode, pointer),
}
}
fn steps(&self) -> usize {
match self {
Instruction::Add(..) => 4,
Instruction::Multiply(..) => 4,
Instruction::Input(..) => 2,
Instruction::Output(..) => 2,
Instruction::JumpTrue(..) => 3,
Instruction::JumpFalse(..) => 3,
Instruction::LessThan(..) => 4,
Instruction::Equals(..) => 4,
Instruction::AdjustBase(..) => 2,
Instruction::Halt => 0,
}
}
}
#[derive(Debug)]
enum ReadValue {
Position(i64),
Immediate(i64),
Relative(i64),
}
impl ReadValue {
fn new(value: i64, mode: i64) -> ReadValue {
match mode {
0 => ReadValue::Position(value),
1 => ReadValue::Immediate(value),
2 => ReadValue::Relative(value),
_ => panic!("Unsupported read mode: {:?}", mode),
}
}
fn read(&self, program: &[i64], base: i64) -> i64 {
let position = match *self {
ReadValue::Position(position) => position as usize,
ReadValue::Relative(position) => (position + base) as usize,
ReadValue::Immediate(value) => return value,
};
*program.get(position).unwrap_or(&0)
}
}
#[derive(Debug)]
enum WriteValue {
Position(i64),
Relative(i64),
}
impl WriteValue {
fn new(value: i64, mode: i64) -> WriteValue {
match mode {
0 => WriteValue::Position(value),
2 => WriteValue::Relative(value),
_ => panic!("Unsupported write mode: {:?}", mode),
}
}
fn write(&self, program: &mut Vec<i64>, base: i64, value: i64) {
let position: usize = match *self {
WriteValue::Position(position) => position as usize,
WriteValue::Relative(position) => (position + base) as usize,
};
if position >= program.len() {
// memory can grow dynamically if we try to access a non-existant index
program.resize_with(position + 1, Default::default);
}
program[position] = value;
}
}
| 29.972881 | 88 | 0.49525 |
621b5ee001e9ccfa560d8fe4b7a96f9f065f7707 | 9,804 | //! DER decoder.
use crate::{
asn1::*, Choice, Decodable, DecodeValue, Error, ErrorKind, Length, Result, Tag, TagMode,
TagNumber, Tagged,
};
use core::convert::{TryFrom, TryInto};
/// DER decoder.
#[derive(Debug)]
pub struct Decoder<'a> {
/// Byte slice being decoded.
///
/// In the event an error was previously encountered this will be set to
/// `None` to prevent further decoding while in a bad state.
bytes: Option<&'a [u8]>,
/// Position within the decoded slice.
position: Length,
}
impl<'a> Decoder<'a> {
/// Create a new decoder for the given byte slice.
pub fn new(bytes: &'a [u8]) -> Self {
Self {
bytes: Some(bytes),
position: Length::ZERO,
}
}
/// Decode a value which impls the [`Decodable`] trait.
pub fn decode<T: Decodable<'a>>(&mut self) -> Result<T> {
if self.is_failed() {
return Err(self.error(ErrorKind::Failed));
}
T::decode(self).map_err(|e| {
self.bytes.take();
e.nested(self.position)
})
}
/// Return an error with the given [`ErrorKind`], annotating it with
/// context about where the error occurred.
pub fn error(&mut self, kind: ErrorKind) -> Error {
self.bytes.take();
kind.at(self.position)
}
/// Return an error for an invalid value with the given tag.
pub fn value_error(&mut self, tag: Tag) -> Error {
self.error(tag.value_error().kind())
}
/// Did the decoding operation fail due to an error?
pub fn is_failed(&self) -> bool {
self.bytes.is_none()
}
/// Get the position within the buffer.
pub fn position(&self) -> Length {
self.position
}
/// Peek at the next byte in the decoder without modifying the cursor.
pub fn peek(&self) -> Option<u8> {
self.remaining()
.ok()
.and_then(|bytes| bytes.get(0).cloned())
}
/// Finish decoding, returning the given value if there is no
/// remaining data, or an error otherwise
pub fn finish<T>(self, value: T) -> Result<T> {
if self.is_failed() {
Err(ErrorKind::Failed.at(self.position))
} else if !self.is_finished() {
Err(ErrorKind::TrailingData {
decoded: self.position,
remaining: self.remaining_len()?,
}
.at(self.position))
} else {
Ok(value)
}
}
/// Have we decoded all of the bytes in this [`Decoder`]?
///
/// Returns `false` if we're not finished decoding or if a fatal error
/// has occurred.
pub fn is_finished(&self) -> bool {
self.remaining().map(|rem| rem.is_empty()).unwrap_or(false)
}
/// Attempt to decode an ASN.1 `ANY` value.
pub fn any(&mut self) -> Result<Any<'a>> {
self.decode()
}
/// Attempt to decode an `OPTIONAL` ASN.1 `ANY` value.
pub fn any_optional(&mut self) -> Result<Option<Any<'a>>> {
self.decode()
}
/// Attempt to decode ASN.1 `INTEGER` as `i8`
pub fn int8(&mut self) -> Result<i8> {
self.decode()
}
/// Attempt to decode ASN.1 `INTEGER` as `i16`
pub fn int16(&mut self) -> Result<i16> {
self.decode()
}
/// Attempt to decode unsigned ASN.1 `INTEGER` as `u8`
pub fn uint8(&mut self) -> Result<u8> {
self.decode()
}
/// Attempt to decode unsigned ASN.1 `INTEGER` as `u16`
pub fn uint16(&mut self) -> Result<u16> {
self.decode()
}
/// Attempt to decode an ASN.1 `INTEGER` as a [`UIntBytes`].
#[cfg(feature = "bigint")]
#[cfg_attr(docsrs, doc(cfg(feature = "bigint")))]
pub fn uint_bytes(&mut self) -> Result<UIntBytes<'a>> {
self.decode()
}
/// Attempt to decode an ASN.1 `BIT STRING`.
pub fn bit_string(&mut self) -> Result<BitString<'a>> {
self.decode()
}
/// Attempt to decode an ASN.1 `CONTEXT-SPECIFIC` field with the
/// provided [`TagNumber`].
pub fn context_specific<T>(
&mut self,
tag_number: TagNumber,
tag_mode: TagMode,
) -> Result<Option<T>>
where
T: DecodeValue<'a> + Tagged,
{
Ok(match tag_mode {
TagMode::Explicit => ContextSpecific::<T>::decode_explicit(self, tag_number)?,
TagMode::Implicit => ContextSpecific::<T>::decode_implicit(self, tag_number)?,
}
.map(|field| field.value))
}
/// Attempt to decode an ASN.1 `GeneralizedTime`.
pub fn generalized_time(&mut self) -> Result<GeneralizedTime> {
self.decode()
}
/// Attempt to decode an ASN.1 `IA5String`.
pub fn ia5_string(&mut self) -> Result<Ia5String<'a>> {
self.decode()
}
/// Attempt to decode an ASN.1 `NULL` value.
pub fn null(&mut self) -> Result<Null> {
self.decode()
}
/// Attempt to decode an ASN.1 `OCTET STRING`.
pub fn octet_string(&mut self) -> Result<OctetString<'a>> {
self.decode()
}
/// Attempt to decode an ASN.1 `OBJECT IDENTIFIER`.
#[cfg(feature = "oid")]
#[cfg_attr(docsrs, doc(cfg(feature = "oid")))]
pub fn oid(&mut self) -> Result<ObjectIdentifier> {
self.decode()
}
/// Attempt to decode an ASN.1 `OPTIONAL` value.
pub fn optional<T: Choice<'a>>(&mut self) -> Result<Option<T>> {
self.decode()
}
/// Attempt to decode an ASN.1 `PrintableString`.
pub fn printable_string(&mut self) -> Result<PrintableString<'a>> {
self.decode()
}
/// Attempt to decode an ASN.1 `UTCTime`.
pub fn utc_time(&mut self) -> Result<UtcTime> {
self.decode()
}
/// Attempt to decode an ASN.1 `UTF8String`.
pub fn utf8_string(&mut self) -> Result<Utf8String<'a>> {
self.decode()
}
/// Attempt to decode an ASN.1 `SEQUENCE`, creating a new nested
/// [`Decoder`] and calling the provided argument with it.
pub fn sequence<F, T>(&mut self, f: F) -> Result<T>
where
F: FnOnce(&mut Decoder<'a>) -> Result<T>,
{
Tag::try_from(self.byte()?)?.assert_eq(Tag::Sequence)?;
let len = Length::decode(self)?;
self.decode_nested(len, f)
}
/// Decode a single byte, updating the internal cursor.
pub(crate) fn byte(&mut self) -> Result<u8> {
match self.bytes(1u8)? {
[byte] => Ok(*byte),
_ => Err(self.error(ErrorKind::Truncated)),
}
}
/// Obtain a slice of bytes of the given length from the current cursor
/// position, or return an error if we have insufficient data.
pub(crate) fn bytes(&mut self, len: impl TryInto<Length>) -> Result<&'a [u8]> {
if self.is_failed() {
return Err(self.error(ErrorKind::Failed));
}
let len = len
.try_into()
.map_err(|_| self.error(ErrorKind::Overflow))?;
let result = self
.remaining()?
.get(..len.try_into()?)
.ok_or_else(|| self.error(ErrorKind::Truncated))?;
self.position = (self.position + len)?;
Ok(result)
}
/// Get the number of bytes still remaining in the buffer.
pub(crate) fn remaining_len(&self) -> Result<Length> {
self.remaining()?.len().try_into()
}
/// Create a nested decoder which operates over the provided [`Length`].
///
/// The nested decoder is passed to the provided callback function which is
/// expected to decode a value of type `T` with it.
fn decode_nested<F, T>(&mut self, length: Length, f: F) -> Result<T>
where
F: FnOnce(&mut Self) -> Result<T>,
{
let start_pos = self.position();
let end_pos = (start_pos + length)?;
let bytes = match self.bytes {
Some(slice) => slice
.get(..end_pos.try_into()?)
.ok_or(ErrorKind::Truncated)?,
None => return Err(self.error(ErrorKind::Failed)),
};
let mut nested_decoder = Self {
bytes: Some(bytes),
position: start_pos,
};
self.position = end_pos;
let result = f(&mut nested_decoder)?;
nested_decoder.finish(result)
}
/// Obtain the remaining bytes in this decoder from the current cursor
/// position.
fn remaining(&self) -> Result<&'a [u8]> {
let pos = usize::try_from(self.position)?;
self.bytes
.and_then(|b| b.get(pos..))
.ok_or_else(|| ErrorKind::Truncated.at(self.position))
}
}
impl<'a> From<&'a [u8]> for Decoder<'a> {
fn from(bytes: &'a [u8]) -> Decoder<'a> {
Decoder::new(bytes)
}
}
#[cfg(test)]
mod tests {
use super::Decoder;
use crate::{Decodable, ErrorKind, Length};
#[test]
fn truncated_message() {
let mut decoder = Decoder::new(&[]);
let err = bool::decode(&mut decoder).err().unwrap();
assert_eq!(ErrorKind::Truncated, err.kind());
assert_eq!(Some(Length::ZERO), err.position());
}
#[test]
fn invalid_field_length() {
let mut decoder = Decoder::new(&[0x02, 0x01]);
let err = i8::decode(&mut decoder).err().unwrap();
assert_eq!(ErrorKind::Truncated, err.kind());
assert_eq!(Some(Length::from(2u8)), err.position());
}
#[test]
fn trailing_data() {
let mut decoder = Decoder::new(&[0x02, 0x01, 0x2A, 0x00]);
let x = decoder.decode().unwrap();
assert_eq!(42i8, x);
let err = decoder.finish(x).err().unwrap();
assert_eq!(
ErrorKind::TrailingData {
decoded: 3u8.into(),
remaining: 1u8.into()
},
err.kind()
);
assert_eq!(Some(Length::from(3u8)), err.position());
}
}
| 29.890244 | 92 | 0.56171 |
696b1be208e5b84af3d4506fe2179a6b10954a30 | 2,025 | use cairo::{Context, Format, ImageSurface};
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use piet::{ImageFormat, RenderContext};
use piet_cairo::CairoRenderContext;
use std::convert::{TryFrom, TryInto};
fn fill_random(data: &mut [u8]) {
// A simple LCG with parameters from Wikipedia. See glibc/ ANSI C, CodeWarrior, ... in
// https://en.wikipedia.org/w/index.php?title=Linear_congruential_generator&oldid=1028647893#Parameters_in_common_use
let mut state: u32 = 123456789;
let m: u32 = 1 << 31;
let a: u32 = 1103515245;
let c: u32 = 12345;
let mut next_number = || {
state = (a * state + c) % m;
// Take a higher byte since it is more random than the low bytes
(state >> 16) as u8
};
data.iter_mut().for_each(|b| *b = next_number());
}
pub fn bench_make_image(c: &mut Criterion) {
let formats = [
(1, ImageFormat::Grayscale),
(3, ImageFormat::Rgb),
(4, ImageFormat::RgbaSeparate),
(4, ImageFormat::RgbaPremul),
];
for &(bpp, format) in formats.iter() {
let (name, width, height) = ("2160p", 3840, 2160);
let bytes = width * height * bpp;
let mut data = vec![0; usize::try_from(bytes).expect("Should fit into usize")];
fill_random(&mut data[..]);
c.bench_function(&format!("make_image_{}_{:?}", name, format), |b| {
let unused_surface =
ImageSurface::create(Format::ARgb32, 1, 1).expect("Can't create surface");
let cr = Context::new(&unused_surface).unwrap();
let mut piet_context = CairoRenderContext::new(&cr);
let width = black_box(width.try_into().unwrap());
let height = black_box(height.try_into().unwrap());
let data = black_box(&data);
let format = black_box(format);
b.iter(|| piet_context.make_image(width, height, data, format));
});
}
}
criterion_group!(benches, bench_make_image);
criterion_main!(benches);
| 36.160714 | 121 | 0.615802 |
d9bfdb0c5053eca56df119a2db7c32a709955417 | 221 | //! Various finishers for world generation, such as grass, snow, and trees.
mod clumped;
mod single;
mod snow;
pub use clumped::ClumpedFoliageFinisher;
pub use single::SingleFoliageFinisher;
pub use snow::SnowFinisher;
| 22.1 | 75 | 0.782805 |
64ec6511ef117cb3fdc5f4a3a781bb38b0e0f178 | 2,580 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// TODO Follow 2018 idioms
#![allow(elided_lifetimes_in_paths)]
//! Crate wlan-common hosts common libraries
//! to be used for WLAN SME, MLME, and binaries written in Rust.
#![cfg_attr(feature = "benchmark", feature(test))]
pub mod appendable;
pub mod big_endian;
pub mod bss;
pub mod buffer_reader;
pub mod buffer_writer;
pub mod channel;
pub mod data_writer;
#[allow(unused)]
pub mod energy;
pub mod error;
pub mod format;
pub mod hasher;
pub mod ie;
pub mod mac;
pub mod mgmt_writer;
pub mod organization;
pub mod sequence;
#[allow(unused)]
pub mod stats;
pub mod test_utils;
pub mod tim;
pub mod time;
pub mod unaligned_view;
pub mod wmm;
use {
channel::{Cbw, Phy},
fidl_fuchsia_wlan_sme as fidl_sme,
};
use std::fmt;
pub use time::TimeUnit;
#[derive(Clone, Debug, Default, PartialEq)]
pub struct RadioConfig {
pub phy: Option<Phy>,
pub cbw: Option<Cbw>,
pub primary_chan: Option<u8>,
}
impl RadioConfig {
pub fn new(phy: Phy, cbw: Cbw, primary_chan: u8) -> Self {
RadioConfig { phy: Some(phy), cbw: Some(cbw), primary_chan: Some(primary_chan) }
}
pub fn to_fidl(&self) -> fidl_sme::RadioConfig {
let (cbw, _) = self.cbw.or(Some(Cbw::Cbw20)).unwrap().to_fidl();
fidl_sme::RadioConfig {
override_phy: self.phy.is_some(),
phy: self.phy.or(Some(Phy::Ht)).unwrap().to_fidl(),
override_cbw: self.cbw.is_some(),
cbw,
override_primary_chan: self.primary_chan.is_some(),
primary_chan: self.primary_chan.unwrap_or(0),
}
}
pub fn from_fidl(radio_cfg: fidl_sme::RadioConfig) -> Self {
RadioConfig {
phy: if radio_cfg.override_phy { Some(Phy::from_fidl(radio_cfg.phy)) } else { None },
cbw: if radio_cfg.override_cbw { Some(Cbw::from_fidl(radio_cfg.cbw, 0)) } else { None },
primary_chan: if radio_cfg.override_primary_chan {
Some(radio_cfg.primary_chan)
} else {
None
},
}
}
}
#[derive(Copy, Clone)]
pub enum StationMode {
Client,
Ap,
Mesh,
}
impl fmt::Display for StationMode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
StationMode::Client => f.write_str("client"),
StationMode::Ap => f.write_str("AP"),
StationMode::Mesh => f.write_str("mesh"),
}
}
}
| 26.326531 | 100 | 0.631395 |
11025f3d43d9fb9ee26b076c03fdc32f9ee9a5aa | 747 | use actix::prelude::*;
use diesel::prelude::*;
use crate::db::DbExecutor;
use crate::prelude::*;
use crate::utils::{
auth::{Auth, GenerateAuth},
jwt::CanDecodeJwt,
};
impl Message for GenerateAuth {
type Result = Result<Auth>;
}
impl Handler<GenerateAuth> for DbExecutor {
type Result = Result<Auth>;
fn handle(&mut self, msg: GenerateAuth, _: &mut Self::Context) -> Self::Result {
use crate::schema::users::dsl::*;
let claims = msg.token.decode_jwt()?.claims;
let conn = &self.0.get()?;
match users.find(claims.id).first(conn) {
Ok(user) => Ok(Auth {
user,
token: msg.token,
}),
Err(e) => Err(e.into()),
}
}
} | 24.096774 | 84 | 0.552878 |
8a5b5918447120e214840a0dfa62d3dd9a3b99f8 | 274,312 | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
// #[PerformanceCriticalPath]
//! This module contains TiKV's transaction layer. It lowers high-level, transactional
//! commands to low-level (raw key-value) interactions with persistent storage.
//!
//! This module is further split into layers: [`txn`](txn) lowers transactional commands to
//! key-value operations on an MVCC abstraction. [`mvcc`](mvcc) is our MVCC implementation.
//! [`kv`](kv) is an abstraction layer over persistent storage.
//!
//! Other responsibilities of this module are managing latches (see [`latch`](txn::latch)), deadlock
//! and wait handling (see [`lock_manager`](lock_manager)), sche
//! duling command execution (see
//! [`txn::scheduler`](txn::scheduler)), and handling commands from the raw and versioned APIs (in
//! the [`Storage`](Storage) struct).
//!
//! For more information about TiKV's transactions, see the [sig-txn docs](https://github.com/tikv/sig-transaction/tree/master/doc).
//!
//! Some important types are:
//!
//! * the [`Engine`](kv::Engine) trait and related traits, which abstracts over underlying storage,
//! * the [`MvccTxn`](mvcc::txn::MvccTxn) struct, which is the primary object in the MVCC
//! implementation,
//! * the commands in the [`commands`](txn::commands) module, which are how each command is implemented,
//! * the [`Storage`](Storage) struct, which is the primary entry point for this module.
//!
//! Related code:
//!
//! * the [`kv`](crate::server::service::kv) module, which is the interface for TiKV's APIs,
//! * the [`lock_manager](crate::server::lock_manager), which takes part in lock and deadlock
//! management,
//! * [`gc_worker`](crate::server::gc_worker), which drives garbage collection of old values,
//! * the [`txn_types](::txn_types) crate, some important types for this module's interface,
//! * the [`kvproto`](::kvproto) crate, which defines TiKV's protobuf API and includes some
//! documentation of the commands implemented here,
//! * the [`test_storage`](::test_storage) crate, integration tests for this module,
//! * the [`engine_traits`](::engine_traits) crate, more detail of the engine abstraction.
pub mod config;
pub mod errors;
pub mod kv;
pub mod lock_manager;
pub(crate) mod metrics;
pub mod mvcc;
pub mod raw;
pub mod txn;
mod read_pool;
mod types;
use self::kv::SnapContext;
pub use self::{
errors::{get_error_kind_from_header, get_tag_from_header, Error, ErrorHeaderKind, ErrorInner},
kv::{
CfStatistics, Cursor, CursorBuilder, Engine, FlowStatistics, FlowStatsReporter, Iterator,
PerfStatisticsDelta, PerfStatisticsInstant, RocksEngine, ScanMode, Snapshot,
StageLatencyStats, Statistics, TestEngineBuilder,
},
raw::{RawEncodeSnapshot, RawStore},
read_pool::{build_read_pool, build_read_pool_for_test},
txn::{Latches, Lock as LatchLock, ProcessResult, Scanner, SnapshotStore, Store},
types::{PessimisticLockRes, PrewriteResult, SecondaryLocksStatus, StorageCallback, TxnStatus},
};
use crate::read_pool::{ReadPool, ReadPoolHandle};
use crate::storage::metrics::CommandKind;
use crate::storage::mvcc::MvccReader;
use crate::storage::txn::commands::{RawAtomicStore, RawCompareAndSwap};
use crate::storage::txn::flow_controller::FlowController;
use crate::server::lock_manager::waiter_manager;
use crate::storage::{
config::Config,
kv::{with_tls_engine, Modify, WriteData},
lock_manager::{DummyLockManager, LockManager},
metrics::*,
mvcc::PointGetterBuilder,
txn::{commands::TypedCommand, scheduler::Scheduler as TxnScheduler, Command},
types::StorageCallbackType,
};
use concurrency_manager::ConcurrencyManager;
use engine_traits::{
key_prefix,
raw_value::{ttl_to_expire_ts, RawValue},
CfName, CF_DEFAULT, CF_LOCK, CF_WRITE, DATA_CFS,
};
use futures::prelude::*;
use kvproto::kvrpcpb::ApiVersion;
use kvproto::kvrpcpb::{
ChecksumAlgorithm, CommandPri, Context, GetRequest, IsolationLevel, KeyRange, LockInfo,
RawGetRequest,
};
use kvproto::pdpb::QueryKind;
use raftstore::store::{util::build_key_range, TxnExt};
use raftstore::store::{ReadStats, WriteStats};
use rand::prelude::*;
use resource_metering::{FutureExt, ResourceTagFactory};
use std::{
borrow::Cow,
iter,
sync::{
atomic::{self, AtomicBool},
Arc,
},
};
use tikv_kv::SnapshotExt;
use tikv_util::time::{duration_to_ms, Instant, ThreadReadId};
use txn_types::{Key, KvPair, Lock, OldValues, RawMutation, TimeStamp, TsSet, Value};
pub type Result<T> = std::result::Result<T, Error>;
pub type Callback<T> = Box<dyn FnOnce(Result<T>) + Send>;
/// [`Storage`](Storage) implements transactional KV APIs and raw KV APIs on a given [`Engine`].
/// An [`Engine`] provides low level KV functionality. [`Engine`] has multiple implementations.
/// When a TiKV server is running, a [`RaftKv`](crate::server::raftkv::RaftKv) will be the
/// underlying [`Engine`] of [`Storage`]. The other two types of engines are for test purpose.
///
///[`Storage`] is reference counted and cloning [`Storage`] will just increase the reference counter.
/// Storage resources (i.e. threads, engine) will be released when all references are dropped.
///
/// Notice that read and write methods may not be performed over full data in most cases, i.e. when
/// underlying engine is [`RaftKv`](crate::server::raftkv::RaftKv),
/// which limits data access in the range of a single region
/// according to specified `ctx` parameter. However,
/// [`unsafe_destroy_range`](crate::server::gc_worker::GcTask::UnsafeDestroyRange) is the only exception.
/// It's always performed on the whole TiKV.
///
/// Operations of [`Storage`](Storage) can be divided into two types: MVCC operations and raw operations.
/// MVCC operations uses MVCC keys, which usually consist of several physical keys in different
/// CFs. In default CF and write CF, the key will be memcomparable-encoded and append the timestamp
/// to it, so that multiple versions can be saved at the same time.
/// Raw operations use raw keys, which are saved directly to the engine without memcomparable-
/// encoding and appending timestamp.
pub struct Storage<E: Engine, L: LockManager> {
// TODO: Too many Arcs, would be slow when clone.
engine: E,
sched: TxnScheduler<E, L>,
/// The thread pool used to run most read operations.
read_pool: ReadPoolHandle,
concurrency_manager: ConcurrencyManager,
/// How many strong references. Thread pool and workers will be stopped
/// once there are no more references.
// TODO: This should be implemented in thread pool and worker.
refs: Arc<atomic::AtomicUsize>,
// Fields below are storage configurations.
max_key_size: usize,
resource_tag_factory: ResourceTagFactory,
api_version: ApiVersion,
}
impl<E: Engine, L: LockManager> Clone for Storage<E, L> {
#[inline]
fn clone(&self) -> Self {
let refs = self.refs.fetch_add(1, atomic::Ordering::SeqCst);
trace!(
"Storage referenced"; "original_ref" => refs
);
Self {
engine: self.engine.clone(),
sched: self.sched.clone(),
read_pool: self.read_pool.clone(),
refs: self.refs.clone(),
max_key_size: self.max_key_size,
concurrency_manager: self.concurrency_manager.clone(),
api_version: self.api_version,
resource_tag_factory: self.resource_tag_factory.clone(),
}
}
}
impl<E: Engine, L: LockManager> Drop for Storage<E, L> {
#[inline]
fn drop(&mut self) {
let refs = self.refs.fetch_sub(1, atomic::Ordering::SeqCst);
trace!(
"Storage de-referenced"; "original_ref" => refs
);
if refs != 1 {
return;
}
info!("Storage stopped.");
}
}
macro_rules! check_key_size {
($key_iter: expr, $max_key_size: expr, $callback: ident) => {
for k in $key_iter {
let key_size = k.len();
if key_size > $max_key_size {
$callback(Err(Error::from(ErrorInner::KeyTooLarge {
size: key_size,
limit: $max_key_size,
})));
return Ok(());
}
}
};
}
impl<E: Engine, L: LockManager> Storage<E, L> {
/// Create a `Storage` from given engine.
pub fn from_engine<R: FlowStatsReporter>(
engine: E,
config: &Config,
read_pool: ReadPoolHandle,
lock_mgr: L,
concurrency_manager: ConcurrencyManager,
dynamic_switches: DynamicConfigs,
flow_controller: Arc<FlowController>,
reporter: R,
resource_tag_factory: ResourceTagFactory,
) -> Result<Self> {
let sched = TxnScheduler::new(
engine.clone(),
lock_mgr,
concurrency_manager.clone(),
config,
dynamic_switches,
flow_controller,
reporter,
resource_tag_factory.clone(),
);
info!("Storage started.");
Ok(Storage {
engine,
sched,
read_pool,
concurrency_manager,
refs: Arc::new(atomic::AtomicUsize::new(1)),
max_key_size: config.max_key_size,
api_version: config.api_version(),
resource_tag_factory,
})
}
/// Get the underlying `Engine` of the `Storage`.
pub fn get_engine(&self) -> E {
self.engine.clone()
}
pub fn get_concurrency_manager(&self) -> ConcurrencyManager {
self.concurrency_manager.clone()
}
pub fn dump_wait_for_entries(&self, cb: waiter_manager::Callback) {
self.sched.dump_wait_for_entries(cb);
}
/// Get a snapshot of `engine`.
fn snapshot(
engine: &E,
ctx: SnapContext<'_>,
) -> impl std::future::Future<Output = Result<E::Snap>> {
kv::snapshot(engine, ctx)
.map_err(txn::Error::from)
.map_err(Error::from)
}
#[cfg(test)]
pub fn get_snapshot(&self) -> E::Snap {
self.engine.snapshot(Default::default()).unwrap()
}
pub fn release_snapshot(&self) {
self.engine.release_snapshot();
}
pub fn get_readpool_queue_per_worker(&self) -> usize {
self.read_pool.get_queue_size_per_worker()
}
pub fn get_normal_pool_size(&self) -> usize {
self.read_pool.get_normal_pool_size()
}
#[inline]
fn with_tls_engine<F, R>(f: F) -> R
where
F: FnOnce(&E) -> R,
{
// Safety: the read pools ensure that a TLS engine exists.
unsafe { with_tls_engine(f) }
}
/// Check the given raw kv CF name. If the given cf is empty, CF_DEFAULT will be returned.
fn rawkv_cf(cf: &str, api_version: ApiVersion) -> Result<CfName> {
match api_version {
ApiVersion::V1 | ApiVersion::V1ttl => {
// In API V1, the possible cfs are CF_DEFAULT, CF_LOCK and CF_WRITE.
if cf.is_empty() {
return Ok(CF_DEFAULT);
}
for c in [CF_DEFAULT, CF_LOCK, CF_WRITE] {
if cf == c {
return Ok(c);
}
}
Err(Error::from(ErrorInner::InvalidCf(cf.to_owned())))
}
ApiVersion::V2 => {
// API V2 doesn't allow raw requests from explicitly specifying a `cf`.
if cf.is_empty() {
return Ok(CF_DEFAULT);
}
Err(Error::from(ErrorInner::CfDeprecated(cf.to_owned())))
}
}
}
/// Check if key range is valid
///
/// - If `reverse` is true, `end_key` is less than `start_key`. `end_key` is the lower bound.
/// - If `reverse` is false, `end_key` is greater than `start_key`. `end_key` is the upper bound.
fn check_key_ranges(ranges: &[KeyRange], reverse: bool) -> bool {
let ranges_len = ranges.len();
for i in 0..ranges_len {
let start_key = ranges[i].get_start_key();
let mut end_key = ranges[i].get_end_key();
if end_key.is_empty() && i + 1 != ranges_len {
end_key = ranges[i + 1].get_start_key();
}
if !end_key.is_empty()
&& (!reverse && start_key >= end_key || reverse && start_key <= end_key)
{
return false;
}
}
true
}
/// Check whether a raw kv command or not.
#[inline]
fn is_raw_command(cmd: CommandKind) -> bool {
matches!(
cmd,
CommandKind::raw_batch_get_command
| CommandKind::raw_get
| CommandKind::raw_batch_get
| CommandKind::raw_scan
| CommandKind::raw_batch_scan
| CommandKind::raw_put
| CommandKind::raw_batch_put
| CommandKind::raw_delete
| CommandKind::raw_delete_range
| CommandKind::raw_batch_delete
| CommandKind::raw_get_key_ttl
| CommandKind::raw_compare_and_swap
| CommandKind::raw_atomic_store
| CommandKind::raw_checksum
)
}
/// Check whether a trancsation kv command or not.
#[inline]
fn is_txn_command(cmd: CommandKind) -> bool {
!Self::is_raw_command(cmd)
}
/// Check api version.
///
/// When config.api_version = V1: accept request of V1 only.
/// When config.api_version = V2: accept the following:
/// * Request of V1 from TiDB, for compatibility.
/// * Request of V2 with legal prefix.
/// See the following for detail:
/// * rfc: https://github.com/tikv/rfcs/blob/master/text/0069-api-v2.md.
/// * proto: https://github.com/pingcap/kvproto/blob/master/proto/kvrpcpb.proto, enum APIVersion.
fn check_api_version(
storage_api_version: ApiVersion,
req_api_version: ApiVersion,
cmd: CommandKind,
keys: impl IntoIterator<Item = impl AsRef<[u8]>>,
) -> Result<()> {
match (storage_api_version, req_api_version) {
(ApiVersion::V1, ApiVersion::V1) => {}
(ApiVersion::V1ttl, ApiVersion::V1) if Self::is_raw_command(cmd) => {
// storage api_version = V1ttl, allow RawKV request only.
}
(ApiVersion::V2, ApiVersion::V1) if Self::is_txn_command(cmd) => {
// For compatibility, accept TiDB request only.
for key in keys {
if !key_prefix::is_tidb_key(key.as_ref()) {
return Err(ErrorInner::invalid_key_prefix(cmd, key.as_ref()).into());
}
}
}
(ApiVersion::V2, ApiVersion::V2) if Self::is_raw_command(cmd) => {
for key in keys {
if !key_prefix::is_raw_key(key.as_ref()) {
return Err(ErrorInner::invalid_key_prefix(cmd, key.as_ref()).into());
}
}
}
(ApiVersion::V2, ApiVersion::V2) if Self::is_txn_command(cmd) => {
for key in keys {
if !key_prefix::is_txn_key(key.as_ref()) {
return Err(ErrorInner::invalid_key_prefix(cmd, key.as_ref()).into());
}
}
}
_ => {
return Err(Error::from(ErrorInner::ApiVersionNotMatched {
cmd,
storage_api_version,
req_api_version,
}));
}
}
Ok(())
}
/// Get value of the given key from a snapshot.
///
/// Only writes that are committed before `start_ts` are visible.
pub fn get(
&self,
mut ctx: Context,
raw_key: Vec<u8>,
start_ts: TimeStamp,
) -> impl Future<Output = Result<(Option<Value>, KvGetStatistics)>> {
let stage_begin_ts = Instant::now_coarse();
let key: Key = Key::from_raw(&raw_key);
const CMD: CommandKind = CommandKind::get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let concurrency_manager = self.concurrency_manager.clone();
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
let stage_scheduled_ts = Instant::now_coarse();
tls_collect_query(
ctx.get_region_id(),
ctx.get_peer(),
key.as_encoded(),
key.as_encoded(),
false,
QueryKind::Get,
);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
Self::check_api_version(api_version, ctx.api_version, CMD, [&raw_key])?;
let command_duration = tikv_util::time::Instant::now_coarse();
// The bypass_locks and access_locks set will be checked at most once.
// `TsSet::vec` is more efficient here.
let bypass_locks = TsSet::vec_from_u64s(ctx.take_resolved_locks());
let access_locks = TsSet::vec_from_u64s(ctx.take_committed_locks());
let snap_ctx = prepare_snap_ctx(
&ctx,
iter::once(&key),
start_ts,
&bypass_locks,
&concurrency_manager,
CMD,
)?;
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let stage_snap_recv_ts = begin_instant;
let mut statistics = Statistics::default();
let perf_statistics = PerfStatisticsInstant::new();
let snap_store = SnapshotStore::new(
snapshot,
start_ts,
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
bypass_locks,
access_locks,
false,
);
let result = snap_store
.get(&key, &mut statistics)
// map storage::txn::Error -> storage::Error
.map_err(Error::from)
.map(|r| {
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC.get(CMD).observe(1_f64);
r
});
let delta = perf_statistics.delta();
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
metrics::tls_collect_perf_stats(CMD, &delta);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
let stage_finished_ts = Instant::now_coarse();
let schedule_wait_time =
stage_scheduled_ts.saturating_duration_since(stage_begin_ts);
let snapshot_wait_time =
stage_snap_recv_ts.saturating_duration_since(stage_scheduled_ts);
let wait_wall_time =
stage_snap_recv_ts.saturating_duration_since(stage_begin_ts);
let process_wall_time =
stage_finished_ts.saturating_duration_since(stage_snap_recv_ts);
let latency_stats = StageLatencyStats {
schedule_wait_time_ms: duration_to_ms(schedule_wait_time),
snapshot_wait_time_ms: duration_to_ms(snapshot_wait_time),
wait_wall_time_ms: duration_to_ms(wait_wall_time),
process_wall_time_ms: duration_to_ms(process_wall_time),
};
Ok((
result?,
KvGetStatistics {
stats: statistics,
perf_stats: delta,
latency_stats,
},
))
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Get values of a set of keys with separate context from a snapshot, return a list of `Result`s.
///
/// Only writes that are committed before their respective `start_ts` are visible.
pub fn batch_get_command<
P: 'static + ResponseBatchConsumer<(Option<Vec<u8>>, Statistics, PerfStatisticsDelta)>,
>(
&self,
requests: Vec<GetRequest>,
ids: Vec<u64>,
consumer: P,
begin_instant: tikv_util::time::Instant,
) -> impl Future<Output = Result<()>> {
const CMD: CommandKind = CommandKind::batch_get_command;
// all requests in a batch have the same region, epoch, term, replica_read
let priority = requests[0].get_context().get_priority();
let concurrency_manager = self.concurrency_manager.clone();
let api_version = self.api_version;
// The resource tags of these batched requests are not the same, and it is quite expensive
// to distinguish them, so we can find random one of them as a representative.
let rand_index = rand::thread_rng().gen_range(0, requests.len());
let resource_tag = self
.resource_tag_factory
.new_tag(requests[rand_index].get_context());
let res = self.read_pool.spawn_handle(
async move {
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(requests.len() as f64);
let command_duration = tikv_util::time::Instant::now_coarse();
let read_id = Some(ThreadReadId::new());
let mut statistics = Statistics::default();
let mut req_snaps = vec![];
for (mut req, id) in requests.into_iter().zip(ids) {
let mut ctx = req.take_context();
let region_id = ctx.get_region_id();
let peer = ctx.get_peer();
let raw_key = req.get_key();
let key = Key::from_raw(raw_key);
tls_collect_query(
region_id,
peer,
key.as_encoded(),
key.as_encoded(),
false,
QueryKind::Get,
);
Self::check_api_version(api_version, ctx.api_version, CMD, [raw_key])?;
let start_ts = req.get_version().into();
let isolation_level = ctx.get_isolation_level();
let fill_cache = !ctx.get_not_fill_cache();
let bypass_locks = TsSet::vec_from_u64s(ctx.take_resolved_locks());
let access_locks = TsSet::vec_from_u64s(ctx.take_committed_locks());
let region_id = ctx.get_region_id();
let snap_ctx = match prepare_snap_ctx(
&ctx,
iter::once(&key),
start_ts,
&bypass_locks,
&concurrency_manager,
CMD,
) {
Ok(mut snap_ctx) => {
snap_ctx.read_id = if ctx.get_stale_read() {
None
} else {
read_id.clone()
};
snap_ctx
}
Err(e) => {
consumer.consume(id, Err(e), begin_instant);
continue;
}
};
let snap = Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx));
req_snaps.push((
snap,
key,
start_ts,
isolation_level,
fill_cache,
bypass_locks,
access_locks,
region_id,
id,
));
}
Self::with_tls_engine(|engine| engine.release_snapshot());
for req_snap in req_snaps {
let (
snap,
key,
start_ts,
isolation_level,
fill_cache,
bypass_locks,
access_locks,
region_id,
id,
) = req_snap;
match snap.await {
Ok(snapshot) => {
match PointGetterBuilder::new(snapshot, start_ts)
.fill_cache(fill_cache)
.isolation_level(isolation_level)
.multi(false)
.bypass_locks(bypass_locks)
.access_locks(access_locks)
.build()
{
Ok(mut point_getter) => {
let perf_statistics = PerfStatisticsInstant::new();
let v = point_getter.get(&key);
let stat = point_getter.take_statistics();
let delta = perf_statistics.delta();
metrics::tls_collect_read_flow(region_id, &stat);
metrics::tls_collect_perf_stats(CMD, &delta);
statistics.add(&stat);
consumer.consume(
id,
v.map_err(|e| Error::from(txn::Error::from(e)))
.map(|v| (v, stat, delta)),
begin_instant,
);
}
Err(e) => {
consumer.consume(
id,
Err(Error::from(txn::Error::from(e))),
begin_instant,
);
}
}
}
Err(e) => {
consumer.consume(id, Err(e), begin_instant);
}
}
}
metrics::tls_collect_scan_details(CMD, &statistics);
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
Ok(())
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Get values of a set of keys in a batch from the snapshot.
///
/// Only writes that are committed before `start_ts` are visible.
pub fn batch_get(
&self,
mut ctx: Context,
raw_keys: Vec<Vec<u8>>,
start_ts: TimeStamp,
) -> impl Future<Output = Result<(Vec<Result<KvPair>>, KvGetStatistics)>> {
let stage_begin_ts = Instant::now_coarse();
const CMD: CommandKind = CommandKind::batch_get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let concurrency_manager = self.concurrency_manager.clone();
let api_version = self.api_version;
let keys: Vec<Key> = raw_keys.iter().map(|x| Key::from_raw(x)).collect();
let res = self.read_pool.spawn_handle(
async move {
let stage_scheduled_ts = Instant::now_coarse();
let mut key_ranges = vec![];
for key in &keys {
key_ranges.push(build_key_range(key.as_encoded(), key.as_encoded(), false));
}
tls_collect_query_batch(
ctx.get_region_id(),
ctx.get_peer(),
key_ranges,
QueryKind::Get,
);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
Self::check_api_version(api_version, ctx.api_version, CMD, &raw_keys)?;
let command_duration = tikv_util::time::Instant::now_coarse();
let bypass_locks = TsSet::from_u64s(ctx.take_resolved_locks());
let access_locks = TsSet::from_u64s(ctx.take_committed_locks());
let snap_ctx = prepare_snap_ctx(
&ctx,
&keys,
start_ts,
&bypass_locks,
&concurrency_manager,
CMD,
)?;
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let stage_snap_recv_ts = begin_instant;
let mut statistics = Statistics::default();
let perf_statistics = PerfStatisticsInstant::new();
let snap_store = SnapshotStore::new(
snapshot,
start_ts,
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
bypass_locks,
access_locks,
false,
);
let result = snap_store
.batch_get(&keys, &mut statistics)
.map_err(Error::from)
.map(|v| {
let kv_pairs: Vec<_> = v
.into_iter()
.zip(keys)
.filter(|&(ref v, ref _k)| {
!(v.is_ok() && v.as_ref().unwrap().is_none())
})
.map(|(v, k)| match v {
Ok(Some(x)) => Ok((k.into_raw().unwrap(), x)),
Err(e) => Err(Error::from(e)),
_ => unreachable!(),
})
.collect();
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(kv_pairs.len() as f64);
kv_pairs
});
let delta = perf_statistics.delta();
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
metrics::tls_collect_perf_stats(CMD, &delta);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
let stage_finished_ts = Instant::now_coarse();
let schedule_wait_time =
stage_scheduled_ts.saturating_duration_since(stage_begin_ts);
let snapshot_wait_time =
stage_snap_recv_ts.saturating_duration_since(stage_scheduled_ts);
let wait_wall_time =
stage_snap_recv_ts.saturating_duration_since(stage_begin_ts);
let process_wall_time =
stage_finished_ts.saturating_duration_since(stage_snap_recv_ts);
let latency_stats = StageLatencyStats {
schedule_wait_time_ms: duration_to_ms(schedule_wait_time),
snapshot_wait_time_ms: duration_to_ms(snapshot_wait_time),
wait_wall_time_ms: duration_to_ms(wait_wall_time),
process_wall_time_ms: duration_to_ms(process_wall_time),
};
Ok((
result?,
KvGetStatistics {
stats: statistics,
perf_stats: delta,
latency_stats,
},
))
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Scan keys in [`start_key`, `end_key`) up to `limit` keys from the snapshot.
/// If `reverse_scan` is true, it scans [`end_key`, `start_key`) in descending order.
/// If `end_key` is `None`, it means the upper bound or the lower bound if reverse scan is unbounded.
///
/// Only writes committed before `start_ts` are visible.
pub fn scan(
&self,
mut ctx: Context,
raw_start_key: Vec<u8>,
raw_end_key: Option<Vec<u8>>,
limit: usize,
sample_step: usize,
start_ts: TimeStamp,
key_only: bool,
reverse_scan: bool,
) -> impl Future<Output = Result<Vec<Result<KvPair>>>> {
let (start_key, end_key) = (
Key::from_raw(&raw_start_key),
raw_end_key.as_ref().map(|x| Key::from_raw(&x[..])),
);
const CMD: CommandKind = CommandKind::scan;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let concurrency_manager = self.concurrency_manager.clone();
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
{
let end_key = match &end_key {
Some(k) => k.as_encoded().as_slice(),
None => &[],
};
tls_collect_query(
ctx.get_region_id(),
ctx.get_peer(),
start_key.as_encoded(),
end_key,
reverse_scan,
QueryKind::Scan,
);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let raw_keys = Some(raw_start_key)
.into_iter()
.chain(raw_end_key.into_iter());
Self::check_api_version(api_version, ctx.api_version, CMD, raw_keys)?;
let (mut start_key, mut end_key) = (Some(start_key), end_key);
if reverse_scan {
std::mem::swap(&mut start_key, &mut end_key);
}
let command_duration = tikv_util::time::Instant::now_coarse();
let bypass_locks = TsSet::from_u64s(ctx.take_resolved_locks());
let access_locks = TsSet::from_u64s(ctx.take_committed_locks());
// Update max_ts and check the in-memory lock table before getting the snapshot
if !ctx.get_stale_read() {
concurrency_manager.update_max_ts(start_ts);
}
if ctx.get_isolation_level() == IsolationLevel::Si {
let begin_instant = Instant::now();
concurrency_manager
.read_range_check(start_key.as_ref(), end_key.as_ref(), |key, lock| {
Lock::check_ts_conflict(
Cow::Borrowed(lock),
key,
start_ts,
&bypass_locks,
)
})
.map_err(|e| {
CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC
.get(CMD)
.locked
.observe(begin_instant.saturating_elapsed().as_secs_f64());
txn::Error::from_mvcc(e)
})?;
CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC
.get(CMD)
.unlocked
.observe(begin_instant.saturating_elapsed().as_secs_f64());
}
let mut snap_ctx = SnapContext {
pb_ctx: &ctx,
start_ts,
..Default::default()
};
if need_check_locks_in_replica_read(&ctx) {
let mut key_range = KeyRange::default();
if let Some(start_key) = &start_key {
key_range.set_start_key(start_key.as_encoded().to_vec());
}
if let Some(end_key) = &end_key {
key_range.set_end_key(end_key.as_encoded().to_vec());
}
snap_ctx.key_ranges = vec![key_range];
}
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let perf_statistics = PerfStatisticsInstant::new();
let snap_store = SnapshotStore::new(
snapshot,
start_ts,
ctx.get_isolation_level(),
!ctx.get_not_fill_cache(),
bypass_locks,
access_locks,
false,
);
let mut scanner =
snap_store.scanner(reverse_scan, key_only, false, start_key, end_key)?;
let res = scanner.scan(limit, sample_step);
let statistics = scanner.take_statistics();
let delta = perf_statistics.delta();
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
metrics::tls_collect_perf_stats(CMD, &delta);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
res.map_err(Error::from).map(|results| {
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(results.len() as f64);
results
.into_iter()
.map(|x| x.map_err(Error::from))
.collect()
})
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
pub fn scan_lock(
&self,
mut ctx: Context,
max_ts: TimeStamp,
raw_start_key: Option<Vec<u8>>,
raw_end_key: Option<Vec<u8>>,
limit: usize,
) -> impl Future<Output = Result<Vec<LockInfo>>> {
let (start_key, end_key) = (
raw_start_key.as_ref().map(|x| Key::from_raw(&x[..])),
raw_end_key.as_ref().map(|x| Key::from_raw(&x[..])),
);
const CMD: CommandKind = CommandKind::scan_lock;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let concurrency_manager = self.concurrency_manager.clone();
let api_version = self.api_version;
// Do not allow replica read for scan_lock.
ctx.set_replica_read(false);
let res = self.read_pool.spawn_handle(
async move {
if let Some(start_key) = &start_key {
let end_key = match &end_key {
Some(k) => k.as_encoded().as_slice(),
None => &[],
};
tls_collect_query(
ctx.get_region_id(),
ctx.get_peer(),
start_key.as_encoded(),
end_key,
false,
QueryKind::Scan,
);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let raw_keys = raw_start_key.into_iter().chain(raw_end_key.into_iter());
Self::check_api_version(api_version, ctx.api_version, CMD, raw_keys)?;
let command_duration = tikv_util::time::Instant::now_coarse();
concurrency_manager.update_max_ts(max_ts);
let begin_instant = Instant::now();
// TODO: Though it's very unlikely to find a conflicting memory lock here, it's not
// a good idea to return an error to the client, making the GC fail. A better
// approach is to wait for these locks to be unlocked.
concurrency_manager.read_range_check(
start_key.as_ref(),
end_key.as_ref(),
|key, lock| {
// `Lock::check_ts_conflict` can't be used here, because LockType::Lock
// can't be ignored in this case.
if lock.ts <= max_ts {
CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC
.get(CMD)
.locked
.observe(begin_instant.saturating_elapsed().as_secs_f64());
Err(txn::Error::from_mvcc(mvcc::ErrorInner::KeyIsLocked(
lock.clone().into_lock_info(key.to_raw()?),
)))
} else {
Ok(())
}
},
)?;
CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC
.get(CMD)
.unlocked
.observe(begin_instant.saturating_elapsed().as_secs_f64());
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
{
let begin_instant = Instant::now_coarse();
let mut statistics = Statistics::default();
let perf_statistics = PerfStatisticsInstant::new();
let mut reader = MvccReader::new(
snapshot,
Some(ScanMode::Forward),
!ctx.get_not_fill_cache(),
);
let result = reader
.scan_locks(
start_key.as_ref(),
end_key.as_ref(),
|lock| lock.ts <= max_ts,
limit,
)
.map_err(txn::Error::from);
statistics.add(&reader.statistics);
let (kv_pairs, _) = result?;
let mut locks = Vec::with_capacity(kv_pairs.len());
for (key, lock) in kv_pairs {
let lock_info =
lock.into_lock_info(key.into_raw().map_err(txn::Error::from)?);
locks.push(lock_info);
}
let delta = perf_statistics.delta();
metrics::tls_collect_scan_details(CMD, &statistics);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
metrics::tls_collect_perf_stats(CMD, &delta);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
Ok(locks)
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
// The entry point of the storage scheduler. Not only transaction commands need to access keys serially.
pub fn sched_txn_command<T: StorageCallbackType>(
&self,
cmd: TypedCommand<T>,
callback: Callback<T>,
) -> Result<()> {
use crate::storage::txn::commands::{
AcquirePessimisticLock, Prewrite, PrewritePessimistic,
};
let cmd: Command = cmd.into();
match &cmd {
Command::Prewrite(Prewrite { mutations, .. }) => {
let keys = mutations.iter().map(|m| m.key().as_encoded());
Self::check_api_version(
self.api_version,
cmd.ctx().api_version,
CommandKind::prewrite,
keys.clone(),
)?;
check_key_size!(keys, self.max_key_size, callback);
}
Command::PrewritePessimistic(PrewritePessimistic { mutations, .. }) => {
let keys = mutations.iter().map(|(m, _)| m.key().as_encoded());
Self::check_api_version(
self.api_version,
cmd.ctx().api_version,
CommandKind::prewrite,
keys.clone(),
)?;
check_key_size!(keys, self.max_key_size, callback);
}
Command::AcquirePessimisticLock(AcquirePessimisticLock { keys, .. }) => {
let keys = keys.iter().map(|k| k.0.as_encoded());
Self::check_api_version(
self.api_version,
cmd.ctx().api_version,
CommandKind::prewrite,
keys.clone(),
)?;
check_key_size!(keys, self.max_key_size, callback);
}
_ => {}
}
fail_point!("storage_drop_message", |_| Ok(()));
cmd.incr_cmd_metric();
self.sched.run_cmd(cmd, T::callback(callback));
Ok(())
}
/// Delete all keys in the range [`start_key`, `end_key`).
///
/// All keys in the range will be deleted permanently regardless of their timestamps.
/// This means that deleted keys will not be retrievable by specifying an older timestamp.
/// If `notify_only` is set, the data will not be immediately deleted, but the operation will
/// still be replicated via Raft. This is used to notify that the data will be deleted by
/// [`unsafe_destroy_range`](crate::server::gc_worker::GcTask::UnsafeDestroyRange) soon.
pub fn delete_range(
&self,
ctx: Context,
raw_start_key: Vec<u8>,
raw_end_key: Vec<u8>,
notify_only: bool,
callback: Callback<()>,
) -> Result<()> {
let (start_key, end_key) = (Key::from_raw(&raw_start_key), Key::from_raw(&raw_end_key));
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::delete_range,
[raw_start_key, raw_end_key],
)?;
let mut modifies = Vec::with_capacity(DATA_CFS.len());
for cf in DATA_CFS {
modifies.push(Modify::DeleteRange(
cf,
start_key.clone(),
end_key.clone(),
notify_only,
));
}
let mut batch = WriteData::from_modifies(modifies);
batch.set_allowed_on_disk_almost_full();
self.engine.async_write(
&ctx,
batch,
Box::new(|res| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.delete_range.inc();
Ok(())
}
/// Get the value of a raw key.
pub fn raw_get(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
) -> impl Future<Output = Result<Option<Vec<u8>>>> {
const CMD: CommandKind = CommandKind::raw_get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
tls_collect_query(
ctx.get_region_id(),
ctx.get_peer(),
&key,
&key,
false,
QueryKind::Get,
);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
Self::check_api_version(api_version, ctx.api_version, CMD, [&key])?;
let command_duration = tikv_util::time::Instant::now_coarse();
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
let store = RawStore::new(snapshot, api_version);
let cf = Self::rawkv_cf(&cf, api_version)?;
{
let begin_instant = Instant::now_coarse();
let mut stats = Statistics::default();
let r = store.raw_get_key_value(cf, &Key::from_encoded(key), &mut stats);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC.get(CMD).observe(1_f64);
tls_collect_read_flow(ctx.get_region_id(), &stats);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
r
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Get the values of a set of raw keys, return a list of `Result`s.
pub fn raw_batch_get_command<P: 'static + ResponseBatchConsumer<Option<Vec<u8>>>>(
&self,
gets: Vec<RawGetRequest>,
ids: Vec<u64>,
consumer: P,
) -> impl Future<Output = Result<()>> {
const CMD: CommandKind = CommandKind::raw_batch_get_command;
// all requests in a batch have the same region, epoch, term, replica_read
let priority = gets[0].get_context().get_priority();
let priority_tag = get_priority_tag(priority);
let api_version = self.api_version;
// The resource tags of these batched requests are not the same, and it is quite expensive
// to distinguish them, so we can find random one of them as a representative.
let rand_index = rand::thread_rng().gen_range(0, gets.len());
let resource_tag = self
.resource_tag_factory
.new_tag(gets[rand_index].get_context());
let res = self.read_pool.spawn_handle(
async move {
for get in &gets {
let key = get.key.to_owned();
let region_id = get.get_context().get_region_id();
let peer = get.get_context().get_peer();
tls_collect_query(region_id, peer, &key, &key, false, QueryKind::Get);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(gets.len() as f64);
for get in &gets {
Self::check_api_version(
api_version,
get.get_context().api_version,
CMD,
[get.get_key()],
)?;
}
let command_duration = tikv_util::time::Instant::now_coarse();
let read_id = Some(ThreadReadId::new());
let mut snaps = vec![];
for (req, id) in gets.into_iter().zip(ids) {
let snap_ctx = SnapContext {
pb_ctx: req.get_context(),
read_id: read_id.clone(),
..Default::default()
};
let snap = Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx));
snaps.push((id, req, snap));
}
Self::with_tls_engine(|engine| engine.release_snapshot());
let begin_instant = Instant::now_coarse();
for (id, mut req, snap) in snaps {
let ctx = req.take_context();
let cf = req.take_cf();
let key = req.take_key();
match snap.await {
Ok(snapshot) => {
let mut stats = Statistics::default();
let store = RawStore::new(snapshot, api_version);
match Self::rawkv_cf(&cf, api_version) {
Ok(cf) => {
consumer.consume(
id,
store.raw_get_key_value(
cf,
&Key::from_encoded(key),
&mut stats,
),
begin_instant,
);
tls_collect_read_flow(ctx.get_region_id(), &stats);
}
Err(e) => {
consumer.consume(id, Err(e), begin_instant);
}
}
}
Err(e) => {
consumer.consume(id, Err(e), begin_instant);
}
}
}
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
Ok(())
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Get the values of some raw keys in a batch.
pub fn raw_batch_get(
&self,
ctx: Context,
cf: String,
keys: Vec<Vec<u8>>,
) -> impl Future<Output = Result<Vec<Result<KvPair>>>> {
const CMD: CommandKind = CommandKind::raw_batch_get;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
let mut key_ranges = vec![];
for key in &keys {
key_ranges.push(build_key_range(key, key, false));
}
tls_collect_query_batch(
ctx.get_region_id(),
ctx.get_peer(),
key_ranges,
QueryKind::Get,
);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
Self::check_api_version(api_version, ctx.api_version, CMD, &keys)?;
let command_duration = tikv_util::time::Instant::now_coarse();
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
let store = RawStore::new(snapshot, api_version);
{
let begin_instant = Instant::now_coarse();
let cf = Self::rawkv_cf(&cf, api_version)?;
// no scan_count for this kind of op.
let mut stats = Statistics::default();
let result: Vec<Result<KvPair>> = keys
.into_iter()
.map(Key::from_encoded)
.map(|k| {
let v = store.raw_get_key_value(cf, &k, &mut stats);
(k, v)
})
.filter(|&(_, ref v)| !(v.is_ok() && v.as_ref().unwrap().is_none()))
.map(|(k, v)| match v {
Ok(v) => Ok((k.into_encoded(), v.unwrap())),
Err(v) => Err(v),
})
.collect();
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(stats.data.flow_stats.read_keys as f64);
tls_collect_read_flow(ctx.get_region_id(), &stats);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
Ok(result)
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Write a raw key to the storage.
pub fn raw_put(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
value: Vec<u8>,
ttl: u64,
callback: Callback<()>,
) -> Result<()> {
const CMD: CommandKind = CommandKind::raw_put;
let api_version = self.api_version;
Self::check_api_version(api_version, ctx.api_version, CMD, [&key])?;
check_key_size!(Some(&key).into_iter(), self.max_key_size, callback);
if self.api_version == ApiVersion::V1 && ttl != 0 {
return Err(Error::from(ErrorInner::TTLNotEnabled));
}
let raw_value = RawValue {
user_value: value,
expire_ts: ttl_to_expire_ts(ttl),
};
let m = Modify::Put(
Self::rawkv_cf(&cf, self.api_version)?,
Key::from_encoded(key),
raw_value.to_bytes(self.api_version),
);
let mut batch = WriteData::from_modifies(vec![m]);
batch.set_allowed_on_disk_almost_full();
self.engine.async_write(
&ctx,
batch,
Box::new(|res| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_put.inc();
Ok(())
}
/// Write some keys to the storage in a batch.
pub fn raw_batch_put(
&self,
ctx: Context,
cf: String,
pairs: Vec<KvPair>,
ttls: Vec<u64>,
callback: Callback<()>,
) -> Result<()> {
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_batch_put,
pairs.iter().map(|(ref k, _)| k),
)?;
let cf = Self::rawkv_cf(&cf, self.api_version)?;
check_key_size!(
pairs.iter().map(|(ref k, _)| k),
self.max_key_size,
callback
);
if self.api_version == ApiVersion::V1 {
if ttls.iter().any(|&x| x != 0) {
return Err(Error::from(ErrorInner::TTLNotEnabled));
}
} else if ttls.len() != pairs.len() {
return Err(Error::from(ErrorInner::TTLsLenNotEqualsToPairs));
}
let modifies = pairs
.into_iter()
.zip(ttls)
.map(|((k, v), ttl)| {
let raw_value = RawValue {
user_value: v,
expire_ts: ttl_to_expire_ts(ttl),
};
Modify::Put(
cf,
Key::from_encoded(k),
raw_value.to_bytes(self.api_version),
)
})
.collect();
let mut batch = WriteData::from_modifies(modifies);
batch.set_allowed_on_disk_almost_full();
self.engine.async_write(
&ctx,
batch,
Box::new(|res| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_batch_put.inc();
Ok(())
}
/// Delete a raw key from the storage.
pub fn raw_delete(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
callback: Callback<()>,
) -> Result<()> {
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_delete,
[&key],
)?;
check_key_size!(Some(&key).into_iter(), self.max_key_size, callback);
let mut batch = WriteData::from_modifies(vec![Modify::Delete(
Self::rawkv_cf(&cf, self.api_version)?,
Key::from_encoded(key),
)]);
batch.set_allowed_on_disk_almost_full();
self.engine.async_write(
&ctx,
batch,
Box::new(|res| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_delete.inc();
Ok(())
}
/// Delete all raw keys in [`start_key`, `end_key`).
pub fn raw_delete_range(
&self,
ctx: Context,
cf: String,
start_key: Vec<u8>,
end_key: Vec<u8>,
callback: Callback<()>,
) -> Result<()> {
let keys = [&start_key, &end_key];
check_key_size!(keys.iter(), self.max_key_size, callback);
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_delete_range,
keys,
)?;
let cf = Self::rawkv_cf(&cf, self.api_version)?;
let start_key = Key::from_encoded(start_key);
let end_key = Key::from_encoded(end_key);
let mut batch =
WriteData::from_modifies(vec![Modify::DeleteRange(cf, start_key, end_key, false)]);
batch.set_allowed_on_disk_almost_full();
self.engine.async_write(
&ctx,
batch,
Box::new(|res| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_delete_range.inc();
Ok(())
}
/// Delete some raw keys in a batch.
pub fn raw_batch_delete(
&self,
ctx: Context,
cf: String,
keys: Vec<Vec<u8>>,
callback: Callback<()>,
) -> Result<()> {
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_batch_delete,
&keys,
)?;
let cf = Self::rawkv_cf(&cf, self.api_version)?;
check_key_size!(keys.iter(), self.max_key_size, callback);
let modifies = keys
.into_iter()
.map(|k| Modify::Delete(cf, Key::from_encoded(k)))
.collect();
let mut batch = WriteData::from_modifies(modifies);
batch.set_allowed_on_disk_almost_full();
self.engine.async_write(
&ctx,
batch,
Box::new(|res| callback(res.map_err(Error::from))),
)?;
KV_COMMAND_COUNTER_VEC_STATIC.raw_batch_delete.inc();
Ok(())
}
/// Scan raw keys in a range.
///
/// If `reverse_scan` is false, the range is [`start_key`, `end_key`); otherwise, the range is
/// [`end_key`, `start_key`) and it scans from `start_key` and goes backwards. If `end_key` is `None`, it
/// means unbounded.
///
/// This function scans at most `limit` keys.
///
/// If `key_only` is true, the value
/// corresponding to the key will not be read out. Only scanned keys will be returned.
pub fn raw_scan(
&self,
ctx: Context,
cf: String,
start_key: Vec<u8>,
end_key: Option<Vec<u8>>,
limit: usize,
key_only: bool,
reverse_scan: bool,
) -> impl Future<Output = Result<Vec<Result<KvPair>>>> {
const CMD: CommandKind = CommandKind::raw_scan;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
{
tls_collect_query(
ctx.get_region_id(),
ctx.get_peer(),
&start_key,
end_key.as_ref().unwrap_or(&vec![]),
reverse_scan,
QueryKind::Scan,
);
}
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
Self::check_api_version(
api_version,
ctx.api_version,
CMD,
[&start_key, end_key.as_ref().unwrap_or(&vec![])], // Api V2 prohibit unbounded range. Empty key will be treated as invalid prefix.
)?;
let command_duration = tikv_util::time::Instant::now_coarse();
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
let cf = Self::rawkv_cf(&cf, api_version)?;
{
let store = RawStore::new(snapshot, api_version);
let begin_instant = Instant::now_coarse();
let start_key = Key::from_encoded(start_key);
let end_key = end_key.map(Key::from_encoded);
let mut statistics = Statistics::default();
let result = if reverse_scan {
store
.reverse_raw_scan(
cf,
&start_key,
end_key.as_ref(),
limit,
&mut statistics,
key_only,
)
.await
} else {
store
.forward_raw_scan(
cf,
&start_key,
end_key.as_ref(),
limit,
&mut statistics,
key_only,
)
.await
}
.map_err(Error::from);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(statistics.data.flow_stats.read_keys as f64);
metrics::tls_collect_scan_details(CMD, &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
result
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Scan raw keys in multiple ranges in a batch.
pub fn raw_batch_scan(
&self,
ctx: Context,
cf: String,
mut ranges: Vec<KeyRange>,
each_limit: usize,
key_only: bool,
reverse_scan: bool,
) -> impl Future<Output = Result<Vec<Result<KvPair>>>> {
const CMD: CommandKind = CommandKind::raw_batch_scan;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
let keys = ranges
.iter()
.flat_map(|range| [range.get_start_key(), range.get_end_key()]);
Self::check_api_version(api_version, ctx.api_version, CMD, keys)?;
let command_duration = tikv_util::time::Instant::now_coarse();
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
let cf = Self::rawkv_cf(&cf, api_version)?;
{
let store = RawStore::new(snapshot, api_version);
let begin_instant = Instant::now();
let mut statistics = Statistics::default();
if !Self::check_key_ranges(&ranges, reverse_scan) {
return Err(box_err!("Invalid KeyRanges"));
};
let mut result = Vec::new();
let mut key_ranges = vec![];
for range in &ranges {
key_ranges.push(build_key_range(
&range.start_key,
&range.end_key,
reverse_scan,
));
}
let ranges_len = ranges.len();
for i in 0..ranges_len {
let start_key = Key::from_encoded(ranges[i].take_start_key());
let end_key = ranges[i].take_end_key();
let end_key = if end_key.is_empty() {
if i + 1 == ranges_len {
None
} else {
Some(Key::from_encoded_slice(ranges[i + 1].get_start_key()))
}
} else {
Some(Key::from_encoded(end_key))
};
let pairs: Vec<Result<KvPair>> = if reverse_scan {
store
.reverse_raw_scan(
cf,
&start_key,
end_key.as_ref(),
each_limit,
&mut statistics,
key_only,
)
.await
} else {
store
.forward_raw_scan(
cf,
&start_key,
end_key.as_ref(),
each_limit,
&mut statistics,
key_only,
)
.await
}?;
result.extend(pairs.into_iter());
}
tls_collect_query_batch(
ctx.get_region_id(),
ctx.get_peer(),
key_ranges,
QueryKind::Scan,
);
metrics::tls_collect_read_flow(ctx.get_region_id(), &statistics);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC
.get(CMD)
.observe(statistics.data.flow_stats.read_keys as f64);
metrics::tls_collect_scan_details(CMD, &statistics);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
Ok(result)
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
/// Get the value of a raw key.
pub fn raw_get_key_ttl(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
) -> impl Future<Output = Result<Option<u64>>> {
const CMD: CommandKind = CommandKind::raw_get_key_ttl;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
tls_collect_query(
ctx.get_region_id(),
ctx.get_peer(),
&key,
&key,
false,
QueryKind::Get,
);
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
Self::check_api_version(api_version, ctx.api_version, CMD, [&key])?;
let command_duration = tikv_util::time::Instant::now_coarse();
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
let store = RawStore::new(snapshot, api_version);
let cf = Self::rawkv_cf(&cf, api_version)?;
{
let begin_instant = Instant::now_coarse();
let mut stats = Statistics::default();
let r = store.raw_get_key_ttl(cf, &Key::from_encoded(key), &mut stats);
KV_COMMAND_KEYREAD_HISTOGRAM_STATIC.get(CMD).observe(1_f64);
tls_collect_read_flow(ctx.get_region_id(), &stats);
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed_secs());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed_secs());
r
}
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
pub fn raw_compare_and_swap_atomic(
&self,
ctx: Context,
cf: String,
key: Vec<u8>,
previous_value: Option<Vec<u8>>,
value: Vec<u8>,
ttl: u64,
cb: Callback<(Option<Value>, bool)>,
) -> Result<()> {
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_compare_and_swap,
[&key],
)?;
let cf = Self::rawkv_cf(&cf, self.api_version)?;
if self.api_version == ApiVersion::V1 && ttl != 0 {
return Err(Error::from(ErrorInner::TTLNotEnabled));
}
let cmd = RawCompareAndSwap::new(
cf,
Key::from_encoded(key),
previous_value,
value,
ttl,
self.api_version,
ctx,
);
self.sched_txn_command(cmd, cb)
}
pub fn raw_batch_put_atomic(
&self,
ctx: Context,
cf: String,
pairs: Vec<KvPair>,
ttls: Vec<u64>,
callback: Callback<()>,
) -> Result<()> {
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_atomic_store,
pairs.iter().map(|(ref k, _)| k),
)?;
let cf = Self::rawkv_cf(&cf, self.api_version)?;
let mutations = match self.api_version {
ApiVersion::V1 => {
if ttls.iter().any(|&x| x != 0) {
return Err(Error::from(ErrorInner::TTLNotEnabled));
}
pairs
.into_iter()
.map(|(k, v)| RawMutation::Put {
key: Key::from_encoded(k),
value: v,
ttl: 0,
})
.collect()
}
ApiVersion::V1ttl | ApiVersion::V2 => {
if ttls.len() != pairs.len() {
return Err(Error::from(ErrorInner::TTLsLenNotEqualsToPairs));
}
pairs
.iter()
.zip(ttls)
.into_iter()
.map(|((k, v), ttl)| RawMutation::Put {
key: Key::from_encoded(k.to_vec()),
value: v.to_vec(),
ttl,
})
.collect()
}
};
let cmd = RawAtomicStore::new(cf, mutations, self.api_version, ctx);
self.sched_txn_command(cmd, callback)
}
pub fn raw_batch_delete_atomic(
&self,
ctx: Context,
cf: String,
keys: Vec<Vec<u8>>,
callback: Callback<()>,
) -> Result<()> {
Self::check_api_version(
self.api_version,
ctx.api_version,
CommandKind::raw_atomic_store,
&keys,
)?;
let cf = Self::rawkv_cf(&cf, self.api_version)?;
let muations = keys
.into_iter()
.map(|k| RawMutation::Delete {
key: Key::from_encoded(k),
})
.collect();
let cmd = RawAtomicStore::new(cf, muations, self.api_version, ctx);
self.sched_txn_command(cmd, callback)
}
pub fn raw_checksum(
&self,
ctx: Context,
algorithm: ChecksumAlgorithm,
ranges: Vec<KeyRange>,
) -> impl Future<Output = Result<(u64, u64, u64)>> {
const CMD: CommandKind = CommandKind::raw_checksum;
let priority = ctx.get_priority();
let priority_tag = get_priority_tag(priority);
let resource_tag = self.resource_tag_factory.new_tag(&ctx);
let api_version = self.api_version;
let res = self.read_pool.spawn_handle(
async move {
KV_COMMAND_COUNTER_VEC_STATIC.get(CMD).inc();
SCHED_COMMANDS_PRI_COUNTER_VEC_STATIC
.get(priority_tag)
.inc();
if let ApiVersion::V1 | ApiVersion::V1ttl = api_version {
return Err(box_err!("raw_checksum is only available in API V2"));
}
if algorithm != ChecksumAlgorithm::Crc64Xor {
return Err(box_err!("unknown checksum algorithm {:?}", algorithm));
}
let keys = ranges
.iter()
.flat_map(|range| [range.get_start_key(), range.get_end_key()]);
Self::check_api_version(api_version, ctx.api_version, CMD, keys)?;
let command_duration = tikv_util::time::Instant::now_coarse();
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot =
Self::with_tls_engine(|engine| Self::snapshot(engine, snap_ctx)).await?;
let begin_instant = tikv_util::time::Instant::now_coarse();
// raw_checksum are only available in API V2, where TTL must be enabled.
let ret = raw::raw_checksum_ranges(
RawEncodeSnapshot::from_snapshot(snapshot, api_version),
ranges,
)
.await;
SCHED_PROCESSING_READ_HISTOGRAM_STATIC
.get(CMD)
.observe(begin_instant.saturating_elapsed().as_secs_f64());
SCHED_HISTOGRAM_VEC_STATIC
.get(CMD)
.observe(command_duration.saturating_elapsed().as_secs_f64());
ret
}
.in_resource_metering_tag(resource_tag),
priority,
thread_rng().next_u64(),
);
async move {
res.map_err(|_| Error::from(ErrorInner::SchedTooBusy))
.await?
}
}
}
pub struct DynamicConfigs {
pub pipelined_pessimistic_lock: Arc<AtomicBool>,
pub in_memory_pessimistic_lock: Arc<AtomicBool>,
}
fn get_priority_tag(priority: CommandPri) -> CommandPriority {
match priority {
CommandPri::Low => CommandPriority::low,
CommandPri::Normal => CommandPriority::normal,
CommandPri::High => CommandPriority::high,
}
}
fn prepare_snap_ctx<'a>(
pb_ctx: &'a Context,
keys: impl IntoIterator<Item = &'a Key> + Clone,
start_ts: TimeStamp,
bypass_locks: &'a TsSet,
concurrency_manager: &ConcurrencyManager,
cmd: CommandKind,
) -> Result<SnapContext<'a>> {
// Update max_ts and check the in-memory lock table before getting the snapshot
if !pb_ctx.get_stale_read() {
concurrency_manager.update_max_ts(start_ts);
}
fail_point!("before-storage-check-memory-locks");
let isolation_level = pb_ctx.get_isolation_level();
if isolation_level == IsolationLevel::Si {
let begin_instant = Instant::now();
for key in keys.clone() {
concurrency_manager
.read_key_check(key, |lock| {
// No need to check access_locks because they are committed which means they
// can't be in memory lock table.
Lock::check_ts_conflict(Cow::Borrowed(lock), key, start_ts, bypass_locks)
})
.map_err(|e| {
CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC
.get(cmd)
.locked
.observe(begin_instant.saturating_elapsed().as_secs_f64());
txn::Error::from_mvcc(e)
})?;
}
CHECK_MEM_LOCK_DURATION_HISTOGRAM_VEC
.get(cmd)
.unlocked
.observe(begin_instant.saturating_elapsed().as_secs_f64());
}
let mut snap_ctx = SnapContext {
pb_ctx,
start_ts,
..Default::default()
};
if need_check_locks_in_replica_read(pb_ctx) {
snap_ctx.key_ranges = keys
.into_iter()
.map(|k| point_key_range(k.clone()))
.collect();
}
Ok(snap_ctx)
}
pub fn need_check_locks_in_replica_read(ctx: &Context) -> bool {
ctx.get_replica_read() && ctx.get_isolation_level() == IsolationLevel::Si
}
pub fn point_key_range(key: Key) -> KeyRange {
let mut end_key = key.as_encoded().to_vec();
end_key.push(0);
let end_key = Key::from_encoded(end_key);
let mut key_range = KeyRange::default();
key_range.set_start_key(key.into_encoded());
key_range.set_end_key(end_key.into_encoded());
key_range
}
/// A builder to build a temporary `Storage<E>`.
///
/// Only used for test purpose.
#[must_use]
pub struct TestStorageBuilder<E: Engine, L: LockManager> {
engine: E,
config: Config,
pipelined_pessimistic_lock: Arc<AtomicBool>,
in_memory_pessimistic_lock: Arc<AtomicBool>,
lock_mgr: L,
resource_tag_factory: ResourceTagFactory,
}
impl TestStorageBuilder<RocksEngine, DummyLockManager> {
/// Build `Storage<RocksEngine>`.
pub fn new(lock_mgr: DummyLockManager, api_version: ApiVersion) -> Self {
let engine = TestEngineBuilder::new()
.api_version(api_version)
.build()
.unwrap();
Self::from_engine_and_lock_mgr(engine, lock_mgr, api_version)
}
}
/// An `Engine` with `TxnExt`. It is used for test purpose.
#[derive(Clone)]
pub struct TxnTestEngine<E: Engine> {
engine: E,
txn_ext: Arc<TxnExt>,
}
impl<E: Engine> Engine for TxnTestEngine<E> {
type Snap = TxnTestSnapshot<E::Snap>;
type Local = E::Local;
fn kv_engine(&self) -> Self::Local {
self.engine.kv_engine()
}
fn snapshot_on_kv_engine(
&self,
start_key: &[u8],
end_key: &[u8],
) -> tikv_kv::Result<Self::Snap> {
let snapshot = self.engine.snapshot_on_kv_engine(start_key, end_key)?;
Ok(TxnTestSnapshot {
snapshot,
txn_ext: self.txn_ext.clone(),
})
}
fn modify_on_kv_engine(&self, modifies: Vec<Modify>) -> tikv_kv::Result<()> {
self.engine.modify_on_kv_engine(modifies)
}
fn async_snapshot(
&self,
ctx: SnapContext<'_>,
cb: tikv_kv::Callback<Self::Snap>,
) -> tikv_kv::Result<()> {
let txn_ext = self.txn_ext.clone();
self.engine.async_snapshot(
ctx,
Box::new(move |snapshot| {
cb(snapshot.map(|snapshot| TxnTestSnapshot { snapshot, txn_ext }))
}),
)
}
fn async_write(
&self,
ctx: &Context,
batch: WriteData,
write_cb: tikv_kv::Callback<()>,
) -> tikv_kv::Result<()> {
self.engine.async_write(ctx, batch, write_cb)
}
}
#[derive(Clone)]
pub struct TxnTestSnapshot<S: Snapshot> {
snapshot: S,
txn_ext: Arc<TxnExt>,
}
impl<S: Snapshot> Snapshot for TxnTestSnapshot<S> {
type Iter = S::Iter;
type Ext<'a> = TxnTestSnapshotExt<'a>;
fn get(&self, key: &Key) -> tikv_kv::Result<Option<Value>> {
self.snapshot.get(key)
}
fn get_cf(&self, cf: CfName, key: &Key) -> tikv_kv::Result<Option<Value>> {
self.snapshot.get_cf(cf, key)
}
fn get_cf_opt(
&self,
opts: engine_traits::ReadOptions,
cf: CfName,
key: &Key,
) -> tikv_kv::Result<Option<Value>> {
self.snapshot.get_cf_opt(opts, cf, key)
}
fn iter(&self, iter_opt: engine_traits::IterOptions) -> tikv_kv::Result<Self::Iter> {
self.snapshot.iter(iter_opt)
}
fn iter_cf(
&self,
cf: CfName,
iter_opt: engine_traits::IterOptions,
) -> tikv_kv::Result<Self::Iter> {
self.snapshot.iter_cf(cf, iter_opt)
}
fn ext(&self) -> Self::Ext<'_> {
TxnTestSnapshotExt(&self.txn_ext)
}
}
pub struct TxnTestSnapshotExt<'a>(&'a Arc<TxnExt>);
impl<'a> SnapshotExt for TxnTestSnapshotExt<'a> {
fn get_txn_ext(&self) -> Option<&Arc<TxnExt>> {
Some(self.0)
}
}
#[derive(Clone)]
struct DummyReporter;
impl FlowStatsReporter for DummyReporter {
fn report_read_stats(&self, _read_stats: ReadStats) {}
fn report_write_stats(&self, _write_stats: WriteStats) {}
}
impl<E: Engine, L: LockManager> TestStorageBuilder<E, L> {
pub fn from_engine_and_lock_mgr(engine: E, lock_mgr: L, api_version: ApiVersion) -> Self {
let mut config = Config::default();
config.set_api_version(api_version);
Self {
engine,
config,
pipelined_pessimistic_lock: Arc::new(AtomicBool::new(false)),
in_memory_pessimistic_lock: Arc::new(AtomicBool::new(false)),
lock_mgr,
resource_tag_factory: ResourceTagFactory::new_for_test(),
}
}
/// Customize the config of the `Storage`.
///
/// By default, `Config::default()` will be used.
pub fn config(mut self, config: Config) -> Self {
self.config = config;
self
}
pub fn pipelined_pessimistic_lock(self, enabled: bool) -> Self {
self.pipelined_pessimistic_lock
.store(enabled, atomic::Ordering::Relaxed);
self
}
pub fn async_apply_prewrite(mut self, enabled: bool) -> Self {
self.config.enable_async_apply_prewrite = enabled;
self
}
pub fn in_memory_pessimistic_lock(self, enabled: bool) -> Self {
self.in_memory_pessimistic_lock
.store(enabled, atomic::Ordering::Relaxed);
self
}
pub fn set_api_version(mut self, api_version: ApiVersion) -> Self {
self.config.set_api_version(api_version);
self
}
pub fn set_resource_tag_factory(mut self, resource_tag_factory: ResourceTagFactory) -> Self {
self.resource_tag_factory = resource_tag_factory;
self
}
/// Build a `Storage<E>`.
pub fn build(self) -> Result<Storage<E, L>> {
let read_pool = build_read_pool_for_test(
&crate::config::StorageReadPoolConfig::default_for_test(),
self.engine.clone(),
);
Storage::from_engine(
self.engine,
&self.config,
ReadPool::from(read_pool).handle(),
self.lock_mgr,
ConcurrencyManager::new(1.into()),
DynamicConfigs {
pipelined_pessimistic_lock: self.pipelined_pessimistic_lock,
in_memory_pessimistic_lock: self.in_memory_pessimistic_lock,
},
Arc::new(FlowController::empty()),
DummyReporter,
self.resource_tag_factory,
)
}
pub fn build_for_txn(self, txn_ext: Arc<TxnExt>) -> Result<Storage<TxnTestEngine<E>, L>> {
let engine = TxnTestEngine {
engine: self.engine,
txn_ext,
};
let read_pool = build_read_pool_for_test(
&crate::config::StorageReadPoolConfig::default_for_test(),
engine.clone(),
);
Storage::from_engine(
engine,
&self.config,
ReadPool::from(read_pool).handle(),
self.lock_mgr,
ConcurrencyManager::new(1.into()),
DynamicConfigs {
pipelined_pessimistic_lock: self.pipelined_pessimistic_lock,
in_memory_pessimistic_lock: self.in_memory_pessimistic_lock,
},
Arc::new(FlowController::empty()),
DummyReporter,
ResourceTagFactory::new_for_test(),
)
}
}
pub trait ResponseBatchConsumer<ConsumeResponse: Sized>: Send {
fn consume(&self, id: u64, res: Result<ConsumeResponse>, begin: Instant);
}
pub mod test_util {
use super::*;
use crate::storage::txn::commands;
use std::sync::Mutex;
use std::{
fmt::Debug,
sync::mpsc::{channel, Sender},
};
pub fn expect_none(x: Option<Value>) {
assert_eq!(x, None);
}
pub fn expect_value(v: Vec<u8>, x: Option<Value>) {
assert_eq!(x.unwrap(), v);
}
pub fn expect_multi_values(v: Vec<Option<KvPair>>, x: Vec<Result<KvPair>>) {
let x: Vec<Option<KvPair>> = x.into_iter().map(Result::ok).collect();
assert_eq!(x, v);
}
pub fn expect_error<T, F>(err_matcher: F, x: Result<T>)
where
F: FnOnce(Error) + Send + 'static,
{
match x {
Err(e) => err_matcher(e),
_ => panic!("expect result to be an error"),
}
}
pub fn expect_ok_callback<T: Debug>(done: Sender<i32>, id: i32) -> Callback<T> {
Box::new(move |x: Result<T>| {
x.unwrap();
done.send(id).unwrap();
})
}
pub fn expect_fail_callback<T, F>(done: Sender<i32>, id: i32, err_matcher: F) -> Callback<T>
where
F: FnOnce(Error) + Send + 'static,
{
Box::new(move |x: Result<T>| {
expect_error(err_matcher, x);
done.send(id).unwrap();
})
}
pub fn expect_too_busy_callback<T>(done: Sender<i32>, id: i32) -> Callback<T> {
Box::new(move |x: Result<T>| {
expect_error(
|err| match err {
Error(box ErrorInner::SchedTooBusy) => {}
e => panic!("unexpected error chain: {:?}, expect too busy", e),
},
x,
);
done.send(id).unwrap();
})
}
pub fn expect_value_callback<T: PartialEq + Debug + Send + 'static>(
done: Sender<i32>,
id: i32,
value: T,
) -> Callback<T> {
Box::new(move |x: Result<T>| {
assert_eq!(x.unwrap(), value);
done.send(id).unwrap();
})
}
pub fn expect_pessimistic_lock_res_callback(
done: Sender<i32>,
pessimistic_lock_res: PessimisticLockRes,
) -> Callback<Result<PessimisticLockRes>> {
Box::new(move |res: Result<Result<PessimisticLockRes>>| {
assert_eq!(res.unwrap().unwrap(), pessimistic_lock_res);
done.send(0).unwrap();
})
}
pub fn expect_secondary_locks_status_callback(
done: Sender<i32>,
secondary_locks_status: SecondaryLocksStatus,
) -> Callback<SecondaryLocksStatus> {
Box::new(move |res: Result<SecondaryLocksStatus>| {
assert_eq!(res.unwrap(), secondary_locks_status);
done.send(0).unwrap();
})
}
type PessimisticLockCommand = TypedCommand<Result<PessimisticLockRes>>;
pub fn new_acquire_pessimistic_lock_command(
keys: Vec<(Key, bool)>,
start_ts: impl Into<TimeStamp>,
for_update_ts: impl Into<TimeStamp>,
return_values: bool,
check_existence: bool,
) -> PessimisticLockCommand {
let primary = keys[0].0.clone().to_raw().unwrap();
let for_update_ts: TimeStamp = for_update_ts.into();
commands::AcquirePessimisticLock::new(
keys,
primary,
start_ts.into(),
3000,
false,
for_update_ts,
None,
return_values,
for_update_ts.next(),
OldValues::default(),
check_existence,
Context::default(),
)
}
pub fn delete_pessimistic_lock<E: Engine, L: LockManager>(
storage: &Storage<E, L>,
key: Key,
start_ts: u64,
for_update_ts: u64,
) {
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::PessimisticRollback::new(
vec![key],
start_ts.into(),
for_update_ts.into(),
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
}
pub struct GetResult {
id: u64,
res: Result<Option<Vec<u8>>>,
}
#[derive(Clone)]
pub struct GetConsumer {
pub data: Arc<Mutex<Vec<GetResult>>>,
}
impl GetConsumer {
pub fn new() -> Self {
Self {
data: Arc::new(Mutex::new(vec![])),
}
}
pub fn take_data(self) -> Vec<Result<Option<Vec<u8>>>> {
let mut data = self.data.lock().unwrap();
let mut results = std::mem::take(&mut *data);
results.sort_by_key(|k| k.id);
results.into_iter().map(|v| v.res).collect()
}
}
impl Default for GetConsumer {
fn default() -> Self {
Self::new()
}
}
impl ResponseBatchConsumer<(Option<Vec<u8>>, Statistics, PerfStatisticsDelta)> for GetConsumer {
fn consume(
&self,
id: u64,
res: Result<(Option<Vec<u8>>, Statistics, PerfStatisticsDelta)>,
_: tikv_util::time::Instant,
) {
self.data.lock().unwrap().push(GetResult {
id,
res: res.map(|(v, ..)| v),
});
}
}
impl ResponseBatchConsumer<Option<Vec<u8>>> for GetConsumer {
fn consume(&self, id: u64, res: Result<Option<Vec<u8>>>, _: tikv_util::time::Instant) {
self.data.lock().unwrap().push(GetResult { id, res });
}
}
}
/// All statistics related to KvGet/KvBatchGet.
#[derive(Debug, Default, Clone)]
pub struct KvGetStatistics {
pub stats: Statistics,
pub perf_stats: PerfStatisticsDelta,
pub latency_stats: StageLatencyStats,
}
#[cfg(test)]
mod tests {
use super::{
mvcc::tests::{must_unlocked, must_written},
test_util::*,
*,
};
use crate::config::TitanDBConfig;
use crate::storage::kv::{ExpectedWrite, MockEngineBuilder};
use crate::storage::lock_manager::DiagnosticContext;
use crate::storage::mvcc::LockType;
use crate::storage::txn::commands::{AcquirePessimisticLock, Prewrite};
use crate::storage::txn::tests::must_rollback;
use crate::storage::{
config::BlockCacheConfig,
kv::{Error as KvError, ErrorInner as EngineErrorInner},
lock_manager::{Lock, WaitTimeout},
mvcc::{Error as MvccError, ErrorInner as MvccErrorInner},
txn::{commands, Error as TxnError, ErrorInner as TxnErrorInner},
};
use collections::HashMap;
use engine_rocks::raw_util::CFOptions;
use engine_traits::{raw_value::ttl_current_ts, ALL_CFS, CF_LOCK, CF_RAFT, CF_WRITE};
use error_code::ErrorCodeExt;
use errors::extract_key_error;
use futures::executor::block_on;
use kvproto::kvrpcpb::{AssertionLevel, CommandPri, Op};
use std::{
sync::{
atomic::{AtomicBool, Ordering},
mpsc::{channel, Sender},
Arc,
},
time::Duration,
};
use tikv_util::config::ReadableSize;
use txn_types::{Mutation, PessimisticLock, WriteType};
#[test]
fn test_prewrite_blocks_read() {
use kvproto::kvrpcpb::ExtraOp;
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
// We have to do the prewrite manually so that the mem locks don't get released.
let snapshot = storage.engine.snapshot(Default::default()).unwrap();
let mutations = vec![Mutation::make_put(Key::from_raw(b"x"), b"z".to_vec())];
let mut cmd = commands::Prewrite::with_defaults(mutations, vec![1, 2, 3], 10.into());
if let Command::Prewrite(p) = &mut cmd.cmd {
p.secondary_keys = Some(vec![]);
}
let wr = cmd
.cmd
.process_write(
snapshot,
commands::WriteContext {
lock_mgr: &DummyLockManager {},
concurrency_manager: storage.concurrency_manager.clone(),
extra_op: ExtraOp::Noop,
statistics: &mut Statistics::default(),
async_apply_prewrite: false,
},
)
.unwrap();
assert_eq!(wr.lock_guards.len(), 1);
let result = block_on(storage.get(Context::default(), b"x".to_vec(), 100.into()));
assert!(matches!(
result,
Err(Error(box ErrorInner::Txn(txn::Error(
box txn::ErrorInner::Mvcc(mvcc::Error(box mvcc::ErrorInner::KeyIsLocked { .. }))
))))
));
}
#[test]
fn test_get_put() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), 100.into()))
.unwrap()
.0,
);
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"x"), b"100".to_vec())],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::KeyIsLocked { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
block_on(storage.get(Context::default(), b"x".to_vec(), 101.into())),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"x")],
100.into(),
101.into(),
Context::default(),
),
expect_ok_callback(tx, 3),
)
.unwrap();
rx.recv().unwrap();
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), 100.into()))
.unwrap()
.0,
);
expect_value(
b"100".to_vec(),
block_on(storage.get(Context::default(), b"x".to_vec(), 101.into()))
.unwrap()
.0,
);
}
#[test]
fn test_cf_error() {
// New engine lacks normal column families.
let engine = TestEngineBuilder::new().cfs(["foo"]).build().unwrap();
let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr(
engine,
DummyLockManager {},
ApiVersion::V1,
)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"aa".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"bb".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"cc".to_vec()),
],
b"a".to_vec(),
1.into(),
),
expect_fail_callback(tx, 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Kv(KvError(box EngineErrorInner::Request(..))),
))))) => {}
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Kv(KvError(box EngineErrorInner::Request(..))),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
block_on(storage.get(Context::default(), b"x".to_vec(), 1.into())),
);
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Kv(KvError(box EngineErrorInner::Request(..))),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
block_on(storage.scan(
Context::default(),
b"x".to_vec(),
None,
1000,
0,
1.into(),
false,
false,
)),
);
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Kv(KvError(box EngineErrorInner::Request(..))),
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
},
block_on(storage.batch_get(
Context::default(),
vec![b"c".to_vec(), b"d".to_vec()],
1.into(),
)),
);
let consumer = GetConsumer::new();
block_on(storage.batch_get_command(
vec![create_get_request(b"c", 1), create_get_request(b"d", 1)],
vec![1, 2],
consumer.clone(),
Instant::now(),
))
.unwrap();
let data = consumer.take_data();
for v in data {
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::Kv(KvError(box EngineErrorInner::Request(..))),
))))) => {}
e => panic!("unexpected error chain: {:?}", e),
},
v,
);
}
}
#[test]
fn test_scan() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"aa".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"bb".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"cc".to_vec()),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![None, None, None],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
1000,
0,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward
expect_multi_values(
vec![None, None, None],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
1000,
0,
5.into(),
false,
true,
))
.unwrap(),
);
// Forward with bound
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
Some(b"c".to_vec()),
1000,
0,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward with bound
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
Some(b"b".to_vec()),
1000,
0,
5.into(),
false,
true,
))
.unwrap(),
);
// Forward with limit
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
2,
0,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward with limit
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
2,
0,
5.into(),
false,
true,
))
.unwrap(),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"c".to_vec(), b"cc".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
1000,
0,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
Some((b"a".to_vec(), b"aa".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
1000,
0,
5.into(),
false,
true,
))
.unwrap(),
);
// Forward with sample step
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"c".to_vec(), b"cc".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
1000,
2,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward with sample step
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"a".to_vec(), b"aa".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
1000,
2,
5.into(),
false,
true,
))
.unwrap(),
);
// Forward with sample step and limit
expect_multi_values(
vec![Some((b"a".to_vec(), b"aa".to_vec()))],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
1,
2,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward with sample step and limit
expect_multi_values(
vec![Some((b"c".to_vec(), b"cc".to_vec()))],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
1,
2,
5.into(),
false,
true,
))
.unwrap(),
);
// Forward with bound
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
Some(b"c".to_vec()),
1000,
0,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward with bound
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
Some(b"b".to_vec()),
1000,
0,
5.into(),
false,
true,
))
.unwrap(),
);
// Forward with limit
expect_multi_values(
vec![
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
2,
0,
5.into(),
false,
false,
))
.unwrap(),
);
// Backward with limit
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
2,
0,
5.into(),
false,
true,
))
.unwrap(),
);
}
#[test]
fn test_scan_with_key_only() {
let db_config = crate::config::DbConfig {
titan: TitanDBConfig {
enabled: true,
..Default::default()
},
..Default::default()
};
let engine = {
let path = "".to_owned();
let cfs = ALL_CFS.to_vec();
let cfg_rocksdb = db_config;
let cache = BlockCacheConfig::default().build_shared_cache();
let cfs_opts = vec![
CFOptions::new(
CF_DEFAULT,
cfg_rocksdb
.defaultcf
.build_opt(&cache, None, ApiVersion::V1),
),
CFOptions::new(CF_LOCK, cfg_rocksdb.lockcf.build_opt(&cache)),
CFOptions::new(CF_WRITE, cfg_rocksdb.writecf.build_opt(&cache, None)),
CFOptions::new(CF_RAFT, cfg_rocksdb.raftcf.build_opt(&cache)),
];
RocksEngine::new(
&path,
&cfs,
Some(cfs_opts),
cache.is_some(),
None, /*io_rate_limiter*/
)
}
.unwrap();
let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr(
engine,
DummyLockManager {},
ApiVersion::V1,
)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"aa".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"bb".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"cc".to_vec()),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![None, None, None],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
1000,
0,
5.into(),
true,
false,
))
.unwrap(),
);
// Backward
expect_multi_values(
vec![None, None, None],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
1000,
0,
5.into(),
true,
true,
))
.unwrap(),
);
// Forward with bound
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
Some(b"c".to_vec()),
1000,
0,
5.into(),
true,
false,
))
.unwrap(),
);
// Backward with bound
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
Some(b"b".to_vec()),
1000,
0,
5.into(),
true,
true,
))
.unwrap(),
);
// Forward with limit
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
2,
0,
5.into(),
true,
false,
))
.unwrap(),
);
// Backward with limit
expect_multi_values(
vec![None, None],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
2,
0,
5.into(),
true,
true,
))
.unwrap(),
);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
// Forward
expect_multi_values(
vec![
Some((b"a".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"c".to_vec(), vec![])),
],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
1000,
0,
5.into(),
true,
false,
))
.unwrap(),
);
// Backward
expect_multi_values(
vec![
Some((b"c".to_vec(), vec![])),
Some((b"b".to_vec(), vec![])),
Some((b"a".to_vec(), vec![])),
],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
1000,
0,
5.into(),
true,
true,
))
.unwrap(),
);
// Forward with bound
expect_multi_values(
vec![Some((b"a".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
Some(b"c".to_vec()),
1000,
0,
5.into(),
true,
false,
))
.unwrap(),
);
// Backward with bound
expect_multi_values(
vec![Some((b"c".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
Some(b"b".to_vec()),
1000,
0,
5.into(),
true,
true,
))
.unwrap(),
);
// Forward with limit
expect_multi_values(
vec![Some((b"a".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
block_on(storage.scan(
Context::default(),
b"\x00".to_vec(),
None,
2,
0,
5.into(),
true,
false,
))
.unwrap(),
);
// Backward with limit
expect_multi_values(
vec![Some((b"c".to_vec(), vec![])), Some((b"b".to_vec(), vec![]))],
block_on(storage.scan(
Context::default(),
b"\xff".to_vec(),
None,
2,
0,
5.into(),
true,
true,
))
.unwrap(),
);
}
#[test]
fn test_batch_get() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"aa".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"bb".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"cc".to_vec()),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
expect_multi_values(
vec![None],
block_on(storage.batch_get(
Context::default(),
vec![b"c".to_vec(), b"d".to_vec()],
2.into(),
))
.unwrap()
.0,
);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
expect_multi_values(
vec![
Some((b"c".to_vec(), b"cc".to_vec())),
Some((b"a".to_vec(), b"aa".to_vec())),
Some((b"b".to_vec(), b"bb".to_vec())),
],
block_on(storage.batch_get(
Context::default(),
vec![b"c".to_vec(), b"x".to_vec(), b"a".to_vec(), b"b".to_vec()],
5.into(),
))
.unwrap()
.0,
);
}
fn create_get_request(key: &[u8], start_ts: u64) -> GetRequest {
let mut req = GetRequest::default();
req.set_key(key.to_owned());
req.set_version(start_ts);
req
}
#[test]
fn test_batch_get_command() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"aa".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"bb".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"cc".to_vec()),
],
b"a".to_vec(),
1.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let consumer = GetConsumer::new();
block_on(storage.batch_get_command(
vec![create_get_request(b"c", 2), create_get_request(b"d", 2)],
vec![1, 2],
consumer.clone(),
Instant::now(),
))
.unwrap();
let mut x = consumer.take_data();
expect_error(
|e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::KeyIsLocked(..),
))))) => {}
e => panic!("unexpected error chain: {:?}", e),
},
x.remove(0),
);
assert_eq!(x.remove(0).unwrap(), None);
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
],
1.into(),
2.into(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
let consumer = GetConsumer::new();
block_on(storage.batch_get_command(
vec![
create_get_request(b"c", 5),
create_get_request(b"x", 5),
create_get_request(b"a", 5),
create_get_request(b"b", 5),
],
vec![1, 2, 3, 4],
consumer.clone(),
Instant::now(),
))
.unwrap();
let x: Vec<Option<Vec<u8>>> = consumer
.take_data()
.into_iter()
.map(|x| x.unwrap())
.collect();
assert_eq!(
x,
vec![
Some(b"cc".to_vec()),
None,
Some(b"aa".to_vec()),
Some(b"bb".to_vec())
]
);
}
#[test]
fn test_txn() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"x"), b"100".to_vec())],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"y"), b"101".to_vec())],
b"y".to_vec(),
101.into(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"x")],
100.into(),
110.into(),
Context::default(),
),
expect_value_callback(tx.clone(), 2, TxnStatus::committed(110.into())),
)
.unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"y")],
101.into(),
111.into(),
Context::default(),
),
expect_value_callback(tx.clone(), 3, TxnStatus::committed(111.into())),
)
.unwrap();
rx.recv().unwrap();
rx.recv().unwrap();
expect_value(
b"100".to_vec(),
block_on(storage.get(Context::default(), b"x".to_vec(), 120.into()))
.unwrap()
.0,
);
expect_value(
b"101".to_vec(),
block_on(storage.get(Context::default(), b"y".to_vec(), 120.into()))
.unwrap()
.0,
);
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"x"), b"105".to_vec())],
b"x".to_vec(),
105.into(),
),
expect_fail_callback(tx, 6, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::WriteConflict { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_sched_too_busy() {
let config = Config {
scheduler_pending_write_threshold: ReadableSize(1),
..Default::default()
};
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.config(config)
.build()
.unwrap();
let (tx, rx) = channel();
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), 100.into()))
.unwrap()
.0,
);
storage
.sched_txn_command::<()>(
commands::Pause::new(vec![Key::from_raw(b"x")], 1000, Context::default()),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"y"), b"101".to_vec())],
b"y".to_vec(),
101.into(),
),
expect_too_busy_callback(tx.clone(), 2),
)
.unwrap();
rx.recv().unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"z"), b"102".to_vec())],
b"y".to_vec(),
102.into(),
),
expect_ok_callback(tx, 3),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_cleanup() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let cm = storage.concurrency_manager.clone();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"x"), b"100".to_vec())],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Cleanup::new(
Key::from_raw(b"x"),
100.into(),
TimeStamp::zero(),
Context::default(),
),
expect_ok_callback(tx, 1),
)
.unwrap();
rx.recv().unwrap();
assert_eq!(cm.max_ts(), 100.into());
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), 105.into()))
.unwrap()
.0,
);
}
#[test]
fn test_cleanup_check_ttl() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
let ts = TimeStamp::compose;
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::make_put(Key::from_raw(b"x"), b"110".to_vec())],
b"x".to_vec(),
ts(110, 0),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Cleanup::new(
Key::from_raw(b"x"),
ts(110, 0),
ts(120, 0),
Context::default(),
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::KeyIsLocked(info),
))))) => assert_eq!(info.get_lock_ttl(), 100),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Cleanup::new(
Key::from_raw(b"x"),
ts(110, 0),
ts(220, 0),
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), ts(230, 0)))
.unwrap()
.0,
);
}
#[test]
fn test_high_priority_get_put() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_none(
block_on(storage.get(ctx, b"x".to_vec(), 100.into()))
.unwrap()
.0,
);
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
storage
.sched_txn_command(
commands::Prewrite::with_context(
vec![Mutation::make_put(Key::from_raw(b"x"), b"100".to_vec())],
b"x".to_vec(),
100.into(),
ctx,
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
storage
.sched_txn_command(
commands::Commit::new(vec![Key::from_raw(b"x")], 100.into(), 101.into(), ctx),
expect_ok_callback(tx, 2),
)
.unwrap();
rx.recv().unwrap();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_none(
block_on(storage.get(ctx, b"x".to_vec(), 100.into()))
.unwrap()
.0,
);
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_value(
b"100".to_vec(),
block_on(storage.get(ctx, b"x".to_vec(), 101.into()))
.unwrap()
.0,
);
}
#[test]
fn test_high_priority_no_block() {
let config = Config {
scheduler_worker_pool_size: 1,
..Default::default()
};
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.config(config)
.build()
.unwrap();
let (tx, rx) = channel();
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), 100.into()))
.unwrap()
.0,
);
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(b"x"), b"100".to_vec())],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"x")],
100.into(),
101.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 2),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Pause::new(vec![Key::from_raw(b"y")], 1000, Context::default()),
expect_ok_callback(tx, 3),
)
.unwrap();
let mut ctx = Context::default();
ctx.set_priority(CommandPri::High);
expect_value(
b"100".to_vec(),
block_on(storage.get(ctx, b"x".to_vec(), 101.into()))
.unwrap()
.0,
);
// Command Get with high priority not block by command Pause.
assert_eq!(rx.recv().unwrap(), 3);
}
#[test]
fn test_delete_range() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
// Write x and y.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"x"), b"100".to_vec()),
Mutation::make_put(Key::from_raw(b"y"), b"100".to_vec()),
Mutation::make_put(Key::from_raw(b"z"), b"100".to_vec()),
],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![
Key::from_raw(b"x"),
Key::from_raw(b"y"),
Key::from_raw(b"z"),
],
100.into(),
101.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
expect_value(
b"100".to_vec(),
block_on(storage.get(Context::default(), b"x".to_vec(), 101.into()))
.unwrap()
.0,
);
expect_value(
b"100".to_vec(),
block_on(storage.get(Context::default(), b"y".to_vec(), 101.into()))
.unwrap()
.0,
);
expect_value(
b"100".to_vec(),
block_on(storage.get(Context::default(), b"z".to_vec(), 101.into()))
.unwrap()
.0,
);
// Delete range [x, z)
storage
.delete_range(
Context::default(),
b"x".to_vec(),
b"z".to_vec(),
false,
expect_ok_callback(tx.clone(), 5),
)
.unwrap();
rx.recv().unwrap();
expect_none(
block_on(storage.get(Context::default(), b"x".to_vec(), 101.into()))
.unwrap()
.0,
);
expect_none(
block_on(storage.get(Context::default(), b"y".to_vec(), 101.into()))
.unwrap()
.0,
);
expect_value(
b"100".to_vec(),
block_on(storage.get(Context::default(), b"z".to_vec(), 101.into()))
.unwrap()
.0,
);
storage
.delete_range(
Context::default(),
b"".to_vec(),
vec![255],
false,
expect_ok_callback(tx, 9),
)
.unwrap();
rx.recv().unwrap();
expect_none(
block_on(storage.get(Context::default(), b"z".to_vec(), 101.into()))
.unwrap()
.0,
);
}
#[test]
fn test_raw_delete_range() {
test_raw_delete_range_impl(ApiVersion::V1);
test_raw_delete_range_impl(ApiVersion::V1ttl);
test_raw_delete_range_impl(ApiVersion::V2);
}
fn test_raw_delete_range_impl(api_version: ApiVersion) {
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = [
(b"r\0a", b"001"),
(b"r\0b", b"002"),
(b"r\0c", b"003"),
(b"r\0d", b"004"),
(b"r\0e", b"005"),
];
// Write some key-value pairs to the db
for kv in &test_data {
storage
.raw_put(
ctx.clone(),
"".to_string(),
kv.0.to_vec(),
kv.1.to_vec(),
0,
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
expect_value(
b"004".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0d".to_vec())).unwrap(),
);
// Delete ["d", "e")
storage
.raw_delete_range(
ctx.clone(),
"".to_string(),
b"r\0d".to_vec(),
b"r\0e".to_vec(),
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
// Assert key "d" has gone
expect_value(
b"003".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0c".to_vec())).unwrap(),
);
expect_none(
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0d".to_vec())).unwrap(),
);
expect_value(
b"005".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0e".to_vec())).unwrap(),
);
// Delete ["aa", "ab")
storage
.raw_delete_range(
ctx.clone(),
"".to_string(),
b"r\0aa".to_vec(),
b"r\0ab".to_vec(),
expect_ok_callback(tx.clone(), 2),
)
.unwrap();
rx.recv().unwrap();
// Assert nothing happened
expect_value(
b"001".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0a".to_vec())).unwrap(),
);
expect_value(
b"002".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0b".to_vec())).unwrap(),
);
// Delete all
storage
.raw_delete_range(
ctx.clone(),
"".to_string(),
b"r\0a".to_vec(),
b"r\0z".to_vec(),
expect_ok_callback(tx, 3),
)
.unwrap();
rx.recv().unwrap();
// Assert now no key remains
for kv in &test_data {
expect_none(
block_on(storage.raw_get(ctx.clone(), "".to_string(), kv.0.to_vec())).unwrap(),
);
}
rx.recv().unwrap();
}
#[test]
fn test_raw_batch_put() {
test_raw_batch_put_impl(ApiVersion::V1);
test_raw_batch_put_impl(ApiVersion::V1ttl);
test_raw_batch_put_impl(ApiVersion::V2);
}
fn test_raw_batch_put_impl(api_version: ApiVersion) {
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec(), 10),
(b"r\0b".to_vec(), b"bb".to_vec(), 20),
(b"r\0c".to_vec(), b"cc".to_vec(), 30),
(b"r\0d".to_vec(), b"dd".to_vec(), 0),
(b"r\0e".to_vec(), b"ee".to_vec(), 40),
];
let kvpairs = test_data
.clone()
.into_iter()
.map(|(key, value, _)| (key, value))
.collect();
let ttls = if let ApiVersion::V1ttl | ApiVersion::V2 = api_version {
test_data
.clone()
.into_iter()
.map(|(_, _, ttl)| ttl)
.collect()
} else {
vec![0; test_data.len()]
};
// Write key-value pairs in a batch
storage
.raw_batch_put(
ctx.clone(),
"".to_string(),
kvpairs,
ttls,
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Verify pairs one by one
for (key, val, _) in &test_data {
expect_value(
val.to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), key.to_vec())).unwrap(),
);
}
}
#[test]
fn test_raw_batch_get() {
test_raw_batch_get_impl(ApiVersion::V1);
test_raw_batch_get_impl(ApiVersion::V1ttl);
test_raw_batch_get_impl(ApiVersion::V2);
}
fn test_raw_batch_get_impl(api_version: ApiVersion) {
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec()),
(b"r\0b".to_vec(), b"bb".to_vec()),
(b"r\0c".to_vec(), b"cc".to_vec()),
(b"r\0d".to_vec(), b"dd".to_vec()),
(b"r\0e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs one by one
for &(ref key, ref value) in &test_data {
storage
.raw_put(
ctx.clone(),
"".to_string(),
key.clone(),
value.clone(),
0,
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
rx.recv().unwrap();
// Verify pairs in a batch
let keys = test_data.iter().map(|&(ref k, _)| k.clone()).collect();
let results = test_data.into_iter().map(|(k, v)| Some((k, v))).collect();
expect_multi_values(
results,
block_on(storage.raw_batch_get(ctx, "".to_string(), keys)).unwrap(),
);
}
#[test]
fn test_batch_raw_get() {
test_batch_raw_get_impl(ApiVersion::V1);
test_batch_raw_get_impl(ApiVersion::V1ttl);
test_batch_raw_get_impl(ApiVersion::V2);
}
fn test_batch_raw_get_impl(api_version: ApiVersion) {
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec()),
(b"r\0b".to_vec(), b"bb".to_vec()),
(b"r\0c".to_vec(), b"cc".to_vec()),
(b"r\0d".to_vec(), b"dd".to_vec()),
(b"r\0e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs one by one
for &(ref key, ref value) in &test_data {
storage
.raw_put(
ctx.clone(),
"".to_string(),
key.clone(),
value.clone(),
0,
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
rx.recv().unwrap();
// Verify pairs in a batch
let mut ids = vec![];
let cmds = test_data
.iter()
.map(|&(ref k, _)| {
let mut req = RawGetRequest::default();
req.set_context(ctx.clone());
req.set_key(k.clone());
ids.push(ids.len() as u64);
req
})
.collect();
let results: Vec<Option<Vec<u8>>> = test_data.into_iter().map(|(_, v)| Some(v)).collect();
let consumer = GetConsumer::new();
block_on(storage.raw_batch_get_command(cmds, ids, consumer.clone())).unwrap();
let x: Vec<Option<Vec<u8>>> = consumer
.take_data()
.into_iter()
.map(|x| x.unwrap())
.collect();
assert_eq!(x, results);
}
#[test]
fn test_raw_batch_delete() {
test_raw_batch_delete_impl(ApiVersion::V1);
test_raw_batch_delete_impl(ApiVersion::V1ttl);
test_raw_batch_delete_impl(ApiVersion::V2);
}
fn test_raw_batch_delete_impl(api_version: ApiVersion) {
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec()),
(b"r\0b".to_vec(), b"bb".to_vec()),
(b"r\0c".to_vec(), b"cc".to_vec()),
(b"r\0d".to_vec(), b"dd".to_vec()),
(b"r\0e".to_vec(), b"ee".to_vec()),
];
// Write key-value pairs in batch
storage
.raw_batch_put(
ctx.clone(),
"".to_string(),
test_data.clone(),
vec![0; test_data.len()],
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Verify pairs exist
let keys = test_data.iter().map(|&(ref k, _)| k.clone()).collect();
let results = test_data
.iter()
.map(|&(ref k, ref v)| Some((k.clone(), v.clone())))
.collect();
expect_multi_values(
results,
block_on(storage.raw_batch_get(ctx.clone(), "".to_string(), keys)).unwrap(),
);
// Delete ["b", "d"]
storage
.raw_batch_delete(
ctx.clone(),
"".to_string(),
vec![b"r\0b".to_vec(), b"r\0d".to_vec()],
expect_ok_callback(tx.clone(), 1),
)
.unwrap();
rx.recv().unwrap();
// Assert "b" and "d" are gone
expect_value(
b"aa".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0a".to_vec())).unwrap(),
);
expect_none(
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0b".to_vec())).unwrap(),
);
expect_value(
b"cc".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0c".to_vec())).unwrap(),
);
expect_none(
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0d".to_vec())).unwrap(),
);
expect_value(
b"ee".to_vec(),
block_on(storage.raw_get(ctx.clone(), "".to_string(), b"r\0e".to_vec())).unwrap(),
);
// Delete ["a", "c", "e"]
storage
.raw_batch_delete(
ctx.clone(),
"".to_string(),
vec![b"r\0a".to_vec(), b"r\0c".to_vec(), b"r\0e".to_vec()],
expect_ok_callback(tx, 2),
)
.unwrap();
rx.recv().unwrap();
// Assert no key remains
for (k, _) in test_data {
expect_none(block_on(storage.raw_get(ctx.clone(), "".to_string(), k)).unwrap());
}
}
#[test]
fn test_raw_scan() {
test_raw_scan_impl(ApiVersion::V1);
test_raw_scan_impl(ApiVersion::V1ttl);
test_raw_scan_impl(ApiVersion::V2);
}
fn test_raw_scan_impl(api_version: ApiVersion) {
let (end_key, end_key_reverse_scan) = if let ApiVersion::V2 = api_version {
(Some(b"r\0z".to_vec()), Some(b"r\0\0".to_vec()))
} else {
(None, None)
};
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec()),
(b"r\0a1".to_vec(), b"aa11".to_vec()),
(b"r\0a2".to_vec(), b"aa22".to_vec()),
(b"r\0a3".to_vec(), b"aa33".to_vec()),
(b"r\0b".to_vec(), b"bb".to_vec()),
(b"r\0b1".to_vec(), b"bb11".to_vec()),
(b"r\0b2".to_vec(), b"bb22".to_vec()),
(b"r\0b3".to_vec(), b"bb33".to_vec()),
(b"r\0c".to_vec(), b"cc".to_vec()),
(b"r\0c1".to_vec(), b"cc11".to_vec()),
(b"r\0c2".to_vec(), b"cc22".to_vec()),
(b"r\0c3".to_vec(), b"cc33".to_vec()),
(b"r\0d".to_vec(), b"dd".to_vec()),
(b"r\0d1".to_vec(), b"dd11".to_vec()),
(b"r\0d2".to_vec(), b"dd22".to_vec()),
(b"r\0d3".to_vec(), b"dd33".to_vec()),
(b"r\0e".to_vec(), b"ee".to_vec()),
(b"r\0e1".to_vec(), b"ee11".to_vec()),
(b"r\0e2".to_vec(), b"ee22".to_vec()),
(b"r\0e3".to_vec(), b"ee33".to_vec()),
];
// Write key-value pairs in batch
storage
.raw_batch_put(
ctx.clone(),
"".to_string(),
test_data.clone(),
vec![0; test_data.len()],
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Scan pairs with key only
let mut results: Vec<Option<KvPair>> = test_data
.iter()
.map(|&(ref k, _)| Some((k.clone(), vec![])))
.collect();
expect_multi_values(
results.clone(),
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0".to_vec(),
end_key.clone(),
20,
true,
false,
))
.unwrap(),
);
results = results.split_off(10);
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0c2".to_vec(),
end_key.clone(),
20,
true,
false,
))
.unwrap(),
);
let mut results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results.clone(),
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0".to_vec(),
end_key.clone(),
20,
false,
false,
))
.unwrap(),
);
results = results.split_off(10);
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0c2".to_vec(),
end_key,
20,
false,
false,
))
.unwrap(),
);
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.map(|(k, v)| Some((k, v)))
.rev()
.collect();
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0z".to_vec(),
end_key_reverse_scan.clone(),
20,
false,
true,
))
.unwrap(),
);
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.map(|(k, v)| Some((k, v)))
.rev()
.take(5)
.collect();
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0z".to_vec(),
end_key_reverse_scan,
5,
false,
true,
))
.unwrap(),
);
// Scan with end_key
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.skip(6)
.take(4)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0b2".to_vec(),
Some(b"r\0c2".to_vec()),
20,
false,
false,
))
.unwrap(),
);
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.skip(6)
.take(1)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0b2".to_vec(),
Some(b"r\0b2\x00".to_vec()),
20,
false,
false,
))
.unwrap(),
);
// Reverse scan with end_key
let results: Vec<Option<KvPair>> = test_data
.clone()
.into_iter()
.rev()
.skip(10)
.take(4)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0c2".to_vec(),
Some(b"r\0b2".to_vec()),
20,
false,
true,
))
.unwrap(),
);
let results: Vec<Option<KvPair>> = test_data
.into_iter()
.skip(6)
.take(1)
.map(|(k, v)| Some((k, v)))
.collect();
expect_multi_values(
results,
block_on(storage.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0b2\x00".to_vec(),
Some(b"r\0b2".to_vec()),
20,
false,
true,
))
.unwrap(),
);
// End key tests. Confirm that lower/upper bound works correctly.
let results = vec![
(b"r\0c1".to_vec(), b"cc11".to_vec()),
(b"r\0c2".to_vec(), b"cc22".to_vec()),
(b"r\0c3".to_vec(), b"cc33".to_vec()),
(b"r\0d".to_vec(), b"dd".to_vec()),
(b"r\0d1".to_vec(), b"dd11".to_vec()),
(b"r\0d2".to_vec(), b"dd22".to_vec()),
]
.into_iter()
.map(|(k, v)| Some((k, v)));
expect_multi_values(
results.clone().collect(),
block_on(async {
storage
.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0c1".to_vec(),
Some(b"r\0d3".to_vec()),
20,
false,
false,
)
.await
})
.unwrap(),
);
expect_multi_values(
results.rev().collect(),
block_on(async {
storage
.raw_scan(
ctx.clone(),
"".to_string(),
b"r\0d3".to_vec(),
Some(b"r\0c1".to_vec()),
20,
false,
true,
)
.await
})
.unwrap(),
);
}
#[test]
fn test_check_key_ranges() {
fn make_ranges(ranges: Vec<(Vec<u8>, Vec<u8>)>) -> Vec<KeyRange> {
ranges
.into_iter()
.map(|(s, e)| {
let mut range = KeyRange::default();
range.set_start_key(s);
if !e.is_empty() {
range.set_end_key(e);
}
range
})
.collect()
}
let ranges = make_ranges(vec![
(b"a".to_vec(), b"a3".to_vec()),
(b"b".to_vec(), b"b3".to_vec()),
(b"c".to_vec(), b"c3".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false,),
true
);
let ranges = make_ranges(vec![
(b"a".to_vec(), vec![]),
(b"b".to_vec(), vec![]),
(b"c".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false,),
true
);
let ranges = make_ranges(vec![
(b"a3".to_vec(), b"a".to_vec()),
(b"b3".to_vec(), b"b".to_vec()),
(b"c3".to_vec(), b"c".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false,),
false
);
// if end_key is omitted, the next start_key is used instead. so, false is returned.
let ranges = make_ranges(vec![
(b"c".to_vec(), vec![]),
(b"b".to_vec(), vec![]),
(b"a".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, false,),
false
);
let ranges = make_ranges(vec![
(b"a3".to_vec(), b"a".to_vec()),
(b"b3".to_vec(), b"b".to_vec()),
(b"c3".to_vec(), b"c".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true,),
true
);
let ranges = make_ranges(vec![
(b"c3".to_vec(), vec![]),
(b"b3".to_vec(), vec![]),
(b"a3".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true,),
true
);
let ranges = make_ranges(vec![
(b"a".to_vec(), b"a3".to_vec()),
(b"b".to_vec(), b"b3".to_vec()),
(b"c".to_vec(), b"c3".to_vec()),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true,),
false
);
let ranges = make_ranges(vec![
(b"a3".to_vec(), vec![]),
(b"b3".to_vec(), vec![]),
(b"c3".to_vec(), vec![]),
]);
assert_eq!(
<Storage<RocksEngine, DummyLockManager>>::check_key_ranges(&ranges, true,),
false
);
}
#[test]
fn test_raw_batch_scan() {
test_raw_batch_scan_impl(ApiVersion::V1);
test_raw_batch_scan_impl(ApiVersion::V1ttl);
test_raw_batch_scan_impl(ApiVersion::V2);
}
fn test_raw_batch_scan_impl(api_version: ApiVersion) {
let make_ranges = |delimiters: Vec<Vec<u8>>| -> Vec<KeyRange> {
delimiters
.windows(2)
.map(|key_pair| {
let mut range = KeyRange::default();
range.set_start_key(key_pair[0].clone());
if let ApiVersion::V2 = api_version {
range.set_end_key(key_pair[1].clone());
};
range
})
.collect()
};
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec()),
(b"r\0a1".to_vec(), b"aa11".to_vec()),
(b"r\0a2".to_vec(), b"aa22".to_vec()),
(b"r\0a3".to_vec(), b"aa33".to_vec()),
(b"r\0b".to_vec(), b"bb".to_vec()),
(b"r\0b1".to_vec(), b"bb11".to_vec()),
(b"r\0b2".to_vec(), b"bb22".to_vec()),
(b"r\0b3".to_vec(), b"bb33".to_vec()),
(b"r\0c".to_vec(), b"cc".to_vec()),
(b"r\0c1".to_vec(), b"cc11".to_vec()),
(b"r\0c2".to_vec(), b"cc22".to_vec()),
(b"r\0c3".to_vec(), b"cc33".to_vec()),
(b"r\0d".to_vec(), b"dd".to_vec()),
(b"r\0d1".to_vec(), b"dd11".to_vec()),
(b"r\0d2".to_vec(), b"dd22".to_vec()),
(b"r\0d3".to_vec(), b"dd33".to_vec()),
(b"r\0e".to_vec(), b"ee".to_vec()),
(b"r\0e1".to_vec(), b"ee11".to_vec()),
(b"r\0e2".to_vec(), b"ee22".to_vec()),
(b"r\0e3".to_vec(), b"ee33".to_vec()),
];
// Write key-value pairs in batch
storage
.raw_batch_put(
ctx.clone(),
"".to_string(),
test_data.clone(),
vec![0; test_data.len()],
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Verify pairs exist
let keys = test_data.iter().map(|&(ref k, _)| k.clone()).collect();
let results = test_data.into_iter().map(|(k, v)| Some((k, v))).collect();
expect_multi_values(
results,
block_on(storage.raw_batch_get(ctx.clone(), "".to_string(), keys)).unwrap(),
);
let results = vec![
Some((b"r\0a".to_vec(), b"aa".to_vec())),
Some((b"r\0a1".to_vec(), b"aa11".to_vec())),
Some((b"r\0a2".to_vec(), b"aa22".to_vec())),
Some((b"r\0a3".to_vec(), b"aa33".to_vec())),
Some((b"r\0b".to_vec(), b"bb".to_vec())),
Some((b"r\0b1".to_vec(), b"bb11".to_vec())),
Some((b"r\0b2".to_vec(), b"bb22".to_vec())),
Some((b"r\0b3".to_vec(), b"bb33".to_vec())),
Some((b"r\0c".to_vec(), b"cc".to_vec())),
Some((b"r\0c1".to_vec(), b"cc11".to_vec())),
Some((b"r\0c2".to_vec(), b"cc22".to_vec())),
Some((b"r\0c3".to_vec(), b"cc33".to_vec())),
Some((b"r\0d".to_vec(), b"dd".to_vec())),
];
let ranges: Vec<KeyRange> = make_ranges(vec![
b"r\0a".to_vec(),
b"r\0b".to_vec(),
b"r\0c".to_vec(),
b"r\0z".to_vec(),
]);
expect_multi_values(
results,
block_on(storage.raw_batch_scan(
ctx.clone(),
"".to_string(),
ranges.clone(),
5,
false,
false,
))
.unwrap(),
);
let results = vec![
Some((b"r\0a".to_vec(), vec![])),
Some((b"r\0a1".to_vec(), vec![])),
Some((b"r\0a2".to_vec(), vec![])),
Some((b"r\0a3".to_vec(), vec![])),
Some((b"r\0b".to_vec(), vec![])),
Some((b"r\0b1".to_vec(), vec![])),
Some((b"r\0b2".to_vec(), vec![])),
Some((b"r\0b3".to_vec(), vec![])),
Some((b"r\0c".to_vec(), vec![])),
Some((b"r\0c1".to_vec(), vec![])),
Some((b"r\0c2".to_vec(), vec![])),
Some((b"r\0c3".to_vec(), vec![])),
Some((b"r\0d".to_vec(), vec![])),
];
expect_multi_values(
results,
block_on(storage.raw_batch_scan(
ctx.clone(),
"".to_string(),
ranges.clone(),
5,
true,
false,
))
.unwrap(),
);
let results = vec![
Some((b"r\0a".to_vec(), b"aa".to_vec())),
Some((b"r\0a1".to_vec(), b"aa11".to_vec())),
Some((b"r\0a2".to_vec(), b"aa22".to_vec())),
Some((b"r\0b".to_vec(), b"bb".to_vec())),
Some((b"r\0b1".to_vec(), b"bb11".to_vec())),
Some((b"r\0b2".to_vec(), b"bb22".to_vec())),
Some((b"r\0c".to_vec(), b"cc".to_vec())),
Some((b"r\0c1".to_vec(), b"cc11".to_vec())),
Some((b"r\0c2".to_vec(), b"cc22".to_vec())),
];
expect_multi_values(
results,
block_on(storage.raw_batch_scan(
ctx.clone(),
"".to_string(),
ranges.clone(),
3,
false,
false,
))
.unwrap(),
);
let results = vec![
Some((b"r\0a".to_vec(), vec![])),
Some((b"r\0a1".to_vec(), vec![])),
Some((b"r\0a2".to_vec(), vec![])),
Some((b"r\0b".to_vec(), vec![])),
Some((b"r\0b1".to_vec(), vec![])),
Some((b"r\0b2".to_vec(), vec![])),
Some((b"r\0c".to_vec(), vec![])),
Some((b"r\0c1".to_vec(), vec![])),
Some((b"r\0c2".to_vec(), vec![])),
];
expect_multi_values(
results,
block_on(storage.raw_batch_scan(ctx.clone(), "".to_string(), ranges, 3, true, false))
.unwrap(),
);
let results = vec![
Some((b"r\0a2".to_vec(), b"aa22".to_vec())),
Some((b"r\0a1".to_vec(), b"aa11".to_vec())),
Some((b"r\0a".to_vec(), b"aa".to_vec())),
Some((b"r\0b2".to_vec(), b"bb22".to_vec())),
Some((b"r\0b1".to_vec(), b"bb11".to_vec())),
Some((b"r\0b".to_vec(), b"bb".to_vec())),
Some((b"r\0c2".to_vec(), b"cc22".to_vec())),
Some((b"r\0c1".to_vec(), b"cc11".to_vec())),
Some((b"r\0c".to_vec(), b"cc".to_vec())),
];
let ranges: Vec<KeyRange> = vec![
(b"r\0a3".to_vec(), b"r\0a".to_vec()),
(b"r\0b3".to_vec(), b"r\0b".to_vec()),
(b"r\0c3".to_vec(), b"r\0c".to_vec()),
]
.into_iter()
.map(|(s, e)| {
let mut range = KeyRange::default();
range.set_start_key(s);
range.set_end_key(e);
range
})
.collect();
expect_multi_values(
results,
block_on(storage.raw_batch_scan(ctx.clone(), "".to_string(), ranges, 5, false, true))
.unwrap(),
);
let results = vec![
Some((b"r\0c2".to_vec(), b"cc22".to_vec())),
Some((b"r\0c1".to_vec(), b"cc11".to_vec())),
Some((b"r\0b2".to_vec(), b"bb22".to_vec())),
Some((b"r\0b1".to_vec(), b"bb11".to_vec())),
Some((b"r\0a2".to_vec(), b"aa22".to_vec())),
Some((b"r\0a1".to_vec(), b"aa11".to_vec())),
];
let ranges: Vec<KeyRange> = make_ranges(vec![
b"r\0c3".to_vec(),
b"r\0b3".to_vec(),
b"r\0a3".to_vec(),
b"r\0".to_vec(),
]);
expect_multi_values(
results,
block_on(storage.raw_batch_scan(ctx.clone(), "".to_string(), ranges, 2, false, true))
.unwrap(),
);
let results = vec![
Some((b"r\0a2".to_vec(), vec![])),
Some((b"r\0a1".to_vec(), vec![])),
Some((b"r\0a".to_vec(), vec![])),
Some((b"r\0b2".to_vec(), vec![])),
Some((b"r\0b1".to_vec(), vec![])),
Some((b"r\0b".to_vec(), vec![])),
Some((b"r\0c2".to_vec(), vec![])),
Some((b"r\0c1".to_vec(), vec![])),
Some((b"r\0c".to_vec(), vec![])),
];
let ranges: Vec<KeyRange> = vec![
(b"r\0a3".to_vec(), b"r\0a".to_vec()),
(b"r\0b3".to_vec(), b"r\0b".to_vec()),
(b"r\0c3".to_vec(), b"r\0c".to_vec()),
]
.into_iter()
.map(|(s, e)| {
let mut range = KeyRange::default();
range.set_start_key(s);
range.set_end_key(e);
range
})
.collect();
expect_multi_values(
results,
block_on(storage.raw_batch_scan(ctx, "".to_string(), ranges, 5, true, true)).unwrap(),
);
}
#[test]
fn test_raw_get_key_ttl() {
test_raw_get_key_ttl_impl(ApiVersion::V1ttl);
test_raw_get_key_ttl_impl(ApiVersion::V2);
}
fn test_raw_get_key_ttl_impl(api_version: ApiVersion) {
let storage = TestStorageBuilder::new(DummyLockManager {}, api_version)
.build()
.unwrap();
let (tx, rx) = channel();
let req_api_version = if api_version == ApiVersion::V1ttl {
ApiVersion::V1
} else {
api_version
};
let ctx = Context {
api_version: req_api_version,
..Default::default()
};
let test_data = vec![
(b"r\0a".to_vec(), b"aa".to_vec(), 10),
(b"r\0b".to_vec(), b"bb".to_vec(), 20),
(b"r\0c".to_vec(), b"cc".to_vec(), 0),
(b"r\0d".to_vec(), b"dd".to_vec(), 10),
(b"r\0e".to_vec(), b"ee".to_vec(), 20),
(b"r\0f".to_vec(), b"ff".to_vec(), u64::MAX),
];
// Write key-value pairs one by one
for &(ref key, ref value, ttl) in &test_data {
storage
.raw_put(
ctx.clone(),
"".to_string(),
key.clone(),
value.clone(),
ttl,
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
}
rx.recv().unwrap();
for &(ref key, _, ttl) in &test_data {
let res = block_on(storage.raw_get_key_ttl(ctx.clone(), "".to_string(), key.clone()))
.unwrap();
if ttl != 0 {
if ttl > u64::MAX - ttl_current_ts() {
assert_eq!(res, Some(u64::MAX - ttl_current_ts()));
} else {
assert_eq!(res, Some(ttl));
}
} else {
assert_eq!(res, Some(0));
}
}
}
#[test]
fn test_scan_lock() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"x"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"y"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"z"), b"foo".to_vec()),
],
b"x".to_vec(),
100.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::new(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"foo".to_vec()),
],
b"c".to_vec(),
101.into(),
123,
false,
3,
TimeStamp::default(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
let (lock_a, lock_b, lock_c, lock_x, lock_y, lock_z) = (
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"a".to_vec());
lock.set_lock_ttl(123);
lock.set_txn_size(3);
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"b".to_vec());
lock.set_lock_ttl(123);
lock.set_txn_size(3);
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"c".to_vec());
lock.set_lock_ttl(123);
lock.set_txn_size(3);
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"x".to_vec());
lock.set_lock_version(100);
lock.set_key(b"x".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"x".to_vec());
lock.set_lock_version(100);
lock.set_key(b"y".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"x".to_vec());
lock.set_lock_version(100);
lock.set_key(b"z".to_vec());
lock
},
);
let cm = storage.concurrency_manager.clone();
let res =
block_on(storage.scan_lock(Context::default(), 99.into(), None, None, 10)).unwrap();
assert_eq!(res, vec![]);
assert_eq!(cm.max_ts(), 99.into());
let res =
block_on(storage.scan_lock(Context::default(), 100.into(), None, None, 10)).unwrap();
assert_eq!(res, vec![lock_x.clone(), lock_y.clone(), lock_z.clone()]);
assert_eq!(cm.max_ts(), 100.into());
let res = block_on(storage.scan_lock(
Context::default(),
100.into(),
Some(b"a".to_vec()),
None,
10,
))
.unwrap();
assert_eq!(res, vec![lock_x.clone(), lock_y.clone(), lock_z.clone()]);
let res = block_on(storage.scan_lock(
Context::default(),
100.into(),
Some(b"y".to_vec()),
None,
10,
))
.unwrap();
assert_eq!(res, vec![lock_y.clone(), lock_z.clone()]);
let res =
block_on(storage.scan_lock(Context::default(), 101.into(), None, None, 10)).unwrap();
assert_eq!(
res,
vec![
lock_a.clone(),
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone(),
lock_z.clone(),
]
);
assert_eq!(cm.max_ts(), 101.into());
let res =
block_on(storage.scan_lock(Context::default(), 101.into(), None, None, 4)).unwrap();
assert_eq!(
res,
vec![lock_a, lock_b.clone(), lock_c.clone(), lock_x.clone()]
);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
None,
4,
))
.unwrap();
assert_eq!(
res,
vec![
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone(),
]
);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
None,
0,
))
.unwrap();
assert_eq!(
res,
vec![
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone(),
lock_z
]
);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
Some(b"c".to_vec()),
0,
))
.unwrap();
assert_eq!(res, vec![lock_b.clone()]);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
Some(b"z".to_vec()),
4,
))
.unwrap();
assert_eq!(
res,
vec![
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone()
]
);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
Some(b"z".to_vec()),
3,
))
.unwrap();
assert_eq!(res, vec![lock_b.clone(), lock_c.clone(), lock_x.clone()]);
let mem_lock = |k: &[u8], ts: u64, lock_type| {
let key = Key::from_raw(k);
let guard = block_on(cm.lock_key(&key));
guard.with_lock(|lock| {
*lock = Some(txn_types::Lock::new(
lock_type,
k.to_vec(),
ts.into(),
100,
None,
0.into(),
1,
20.into(),
));
});
guard
};
let guard = mem_lock(b"z", 80, LockType::Put);
block_on(storage.scan_lock(Context::default(), 101.into(), None, None, 1)).unwrap_err();
let guard2 = mem_lock(b"a", 80, LockType::Put);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
Some(b"z".to_vec()),
0,
))
.unwrap();
assert_eq!(
res,
vec![
lock_b.clone(),
lock_c.clone(),
lock_x.clone(),
lock_y.clone()
]
);
drop(guard);
drop(guard2);
// LockType::Lock can't be ignored by scan_lock
let guard = mem_lock(b"c", 80, LockType::Lock);
block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
Some(b"z".to_vec()),
1,
))
.unwrap_err();
drop(guard);
let guard = mem_lock(b"c", 102, LockType::Put);
let res = block_on(storage.scan_lock(
Context::default(),
101.into(),
Some(b"b".to_vec()),
Some(b"z".to_vec()),
0,
))
.unwrap();
assert_eq!(res, vec![lock_b, lock_c, lock_x, lock_y]);
drop(guard);
}
#[test]
fn test_resolve_lock() {
use crate::storage::txn::RESOLVE_LOCK_BATCH_SIZE;
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
// These locks (transaction ts=99) are not going to be resolved.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"foo".to_vec()),
],
b"c".to_vec(),
99.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let (lock_a, lock_b, lock_c) = (
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"a".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"b".to_vec());
lock
},
{
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"c".to_vec());
lock
},
);
// We should be able to resolve all locks for transaction ts=100 when there are this
// many locks.
let scanned_locks_coll = vec![
1,
RESOLVE_LOCK_BATCH_SIZE,
RESOLVE_LOCK_BATCH_SIZE - 1,
RESOLVE_LOCK_BATCH_SIZE + 1,
RESOLVE_LOCK_BATCH_SIZE * 2,
RESOLVE_LOCK_BATCH_SIZE * 2 - 1,
RESOLVE_LOCK_BATCH_SIZE * 2 + 1,
];
let is_rollback_coll = vec![
false, // commit
true, // rollback
];
let mut ts = 100.into();
for scanned_locks in scanned_locks_coll {
for is_rollback in &is_rollback_coll {
let mut mutations = vec![];
for i in 0..scanned_locks {
mutations.push(Mutation::make_put(
Key::from_raw(format!("x{:08}", i).as_bytes()),
b"foo".to_vec(),
));
}
storage
.sched_txn_command(
commands::Prewrite::with_defaults(mutations, b"x".to_vec(), ts),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let mut txn_status = HashMap::default();
txn_status.insert(
ts,
if *is_rollback {
TimeStamp::zero() // rollback
} else {
(ts.into_inner() + 5).into() // commit, commit_ts = start_ts + 5
},
);
storage
.sched_txn_command(
commands::ResolveLockReadPhase::new(txn_status, None, Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// All locks should be resolved except for a, b and c.
let res =
block_on(storage.scan_lock(Context::default(), ts, None, None, 0)).unwrap();
assert_eq!(res, vec![lock_a.clone(), lock_b.clone(), lock_c.clone()]);
ts = (ts.into_inner() + 10).into();
}
}
}
#[test]
fn test_resolve_lock_lite() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"foo".to_vec()),
],
b"c".to_vec(),
99.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Rollback key 'b' and key 'c' and left key 'a' still locked.
let resolve_keys = vec![Key::from_raw(b"b"), Key::from_raw(b"c")];
storage
.sched_txn_command(
commands::ResolveLockLite::new(
99.into(),
TimeStamp::zero(),
resolve_keys,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Check lock for key 'a'.
let lock_a = {
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(99);
lock.set_key(b"a".to_vec());
lock
};
let res =
block_on(storage.scan_lock(Context::default(), 99.into(), None, None, 0)).unwrap();
assert_eq!(res, vec![lock_a]);
// Resolve lock for key 'a'.
storage
.sched_txn_command(
commands::ResolveLockLite::new(
99.into(),
TimeStamp::zero(),
vec![Key::from_raw(b"a")],
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"foo".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"foo".to_vec()),
],
b"c".to_vec(),
101.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Commit key 'b' and key 'c' and left key 'a' still locked.
let resolve_keys = vec![Key::from_raw(b"b"), Key::from_raw(b"c")];
storage
.sched_txn_command(
commands::ResolveLockLite::new(
101.into(),
102.into(),
resolve_keys,
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// Check lock for key 'a'.
let lock_a = {
let mut lock = LockInfo::default();
lock.set_primary_lock(b"c".to_vec());
lock.set_lock_version(101);
lock.set_key(b"a".to_vec());
lock
};
let res =
block_on(storage.scan_lock(Context::default(), 101.into(), None, None, 0)).unwrap();
assert_eq!(res, vec![lock_a]);
}
#[test]
fn test_txn_heart_beat() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
let k = Key::from_raw(b"k");
let v = b"v".to_vec();
let uncommitted = TxnStatus::uncommitted;
// No lock.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k.clone(), 10.into(), 100, Context::default()),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::make_put(k.clone(), v.clone())],
b"k".to_vec(),
10.into(),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let lock_with_ttl = |ttl| {
txn_types::Lock::new(
LockType::Put,
b"k".to_vec(),
10.into(),
ttl,
Some(v.clone()),
0.into(),
0,
0.into(),
)
};
// `advise_ttl` = 90, which is less than current ttl 100. The lock's ttl will remains 100.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k.clone(), 10.into(), 90, Context::default()),
expect_value_callback(tx.clone(), 0, uncommitted(lock_with_ttl(100), false)),
)
.unwrap();
rx.recv().unwrap();
// `advise_ttl` = 110, which is greater than current ttl. The lock's ttl will be updated to
// 110.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k.clone(), 10.into(), 110, Context::default()),
expect_value_callback(tx.clone(), 0, uncommitted(lock_with_ttl(110), false)),
)
.unwrap();
rx.recv().unwrap();
// Lock not match. Nothing happens except throwing an error.
storage
.sched_txn_command(
commands::TxnHeartBeat::new(k, 11.into(), 150, Context::default()),
expect_fail_callback(tx, 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_check_txn_status() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let cm = storage.concurrency_manager.clone();
let (tx, rx) = channel();
let k = Key::from_raw(b"k");
let v = b"b".to_vec();
let ts = TimeStamp::compose;
use TxnStatus::*;
let uncommitted = TxnStatus::uncommitted;
let committed = TxnStatus::committed;
// No lock and no commit info. Gets an error.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(9, 0),
ts(9, 1),
ts(9, 1),
false,
false,
false,
Context::default(),
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
assert_eq!(cm.max_ts(), ts(9, 1));
// No lock and no commit info. If specified rollback_if_not_exist, the key will be rolled
// back.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(9, 0),
ts(9, 1),
ts(9, 1),
true,
false,
false,
Context::default(),
),
expect_value_callback(tx.clone(), 0, LockNotExist),
)
.unwrap();
rx.recv().unwrap();
// A rollback will be written, so an later-arriving prewrite will fail.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(k.clone(), v.clone())],
k.as_encoded().to_vec(),
ts(9, 0),
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::WriteConflict { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::new(
vec![Mutation::make_put(k.clone(), v.clone())],
b"k".to_vec(),
ts(10, 0),
100,
false,
3,
ts(10, 1),
TimeStamp::default(),
Some(vec![b"k1".to_vec(), b"k2".to_vec()]),
false,
AssertionLevel::Off,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// If lock exists and not expired, returns the lock's information.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(10, 0),
0.into(),
0.into(),
true,
false,
false,
Context::default(),
),
expect_value_callback(
tx.clone(),
0,
uncommitted(
txn_types::Lock::new(
LockType::Put,
b"k".to_vec(),
ts(10, 0),
100,
Some(v.clone()),
0.into(),
3,
ts(10, 1),
)
.use_async_commit(vec![b"k1".to_vec(), b"k2".to_vec()]),
false,
),
),
)
.unwrap();
rx.recv().unwrap();
// TODO: Check the lock's min_commit_ts field.
storage
.sched_txn_command(
commands::Commit::new(vec![k.clone()], ts(10, 0), ts(20, 0), Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// If the transaction is committed, returns the commit_ts.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(10, 0),
ts(12, 0),
ts(15, 0),
true,
false,
false,
Context::default(),
),
expect_value_callback(tx.clone(), 0, committed(ts(20, 0))),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::make_put(k.clone(), v)],
k.as_encoded().to_vec(),
ts(25, 0),
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// If the lock has expired, cleanup it.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
k.clone(),
ts(25, 0),
ts(126, 0),
ts(127, 0),
true,
false,
false,
Context::default(),
),
expect_value_callback(tx.clone(), 0, TtlExpire),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(vec![k], ts(25, 0), ts(28, 0), Context::default()),
expect_fail_callback(tx, 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnLockNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
#[test]
fn test_check_secondary_locks() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let cm = storage.concurrency_manager.clone();
let (tx, rx) = channel();
let k1 = Key::from_raw(b"k1");
let k2 = Key::from_raw(b"k2");
storage
.sched_txn_command(
commands::Prewrite::new(
vec![
Mutation::make_lock(k1.clone()),
Mutation::make_lock(k2.clone()),
],
b"k".to_vec(),
10.into(),
100,
false,
2,
TimeStamp::zero(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// All locks exist
let mut lock1 = LockInfo::default();
lock1.set_primary_lock(b"k".to_vec());
lock1.set_lock_version(10);
lock1.set_key(b"k1".to_vec());
lock1.set_txn_size(2);
lock1.set_lock_ttl(100);
lock1.set_lock_type(Op::Lock);
let mut lock2 = lock1.clone();
lock2.set_key(b"k2".to_vec());
storage
.sched_txn_command(
commands::CheckSecondaryLocks::new(
vec![k1.clone(), k2.clone()],
10.into(),
Context::default(),
),
expect_secondary_locks_status_callback(
tx.clone(),
SecondaryLocksStatus::Locked(vec![lock1, lock2]),
),
)
.unwrap();
rx.recv().unwrap();
// One of the locks are committed
storage
.sched_txn_command(
commands::Commit::new(vec![k1.clone()], 10.into(), 20.into(), Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::CheckSecondaryLocks::new(vec![k1, k2], 10.into(), Context::default()),
expect_secondary_locks_status_callback(
tx.clone(),
SecondaryLocksStatus::Committed(20.into()),
),
)
.unwrap();
rx.recv().unwrap();
assert_eq!(cm.max_ts(), 10.into());
// Some of the locks do not exist
let k3 = Key::from_raw(b"k3");
let k4 = Key::from_raw(b"k4");
storage
.sched_txn_command(
commands::Prewrite::new(
vec![Mutation::make_lock(k3.clone())],
b"k".to_vec(),
30.into(),
100,
false,
2,
TimeStamp::zero(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::CheckSecondaryLocks::new(vec![k3, k4], 10.into(), Context::default()),
expect_secondary_locks_status_callback(tx, SecondaryLocksStatus::RolledBack),
)
.unwrap();
rx.recv().unwrap();
}
fn test_pessimistic_lock_impl(pipelined_pessimistic_lock: bool) {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.pipelined_pessimistic_lock(pipelined_pessimistic_lock)
.build()
.unwrap();
let cm = storage.concurrency_manager.clone();
let (tx, rx) = channel();
let (key, val) = (Key::from_raw(b"key"), b"val".to_vec());
let (key2, val2) = (Key::from_raw(b"key2"), b"val2".to_vec());
// Key not exist
for &(return_values, check_existence) in
&[(false, false), (false, true), (true, false), (true, true)]
{
let pessimistic_lock_res = if return_values {
PessimisticLockRes::Values(vec![None])
} else if check_existence {
PessimisticLockRes::Existence(vec![false])
} else {
PessimisticLockRes::Empty
};
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
10,
10,
return_values,
check_existence,
),
expect_pessimistic_lock_res_callback(tx.clone(), pessimistic_lock_res.clone()),
)
.unwrap();
rx.recv().unwrap();
if return_values || check_existence {
assert_eq!(cm.max_ts(), 10.into());
}
// Duplicated command
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
10,
10,
return_values,
check_existence,
),
expect_pessimistic_lock_res_callback(tx.clone(), pessimistic_lock_res.clone()),
)
.unwrap();
rx.recv().unwrap();
delete_pessimistic_lock(&storage, key.clone(), 10, 10);
}
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
10,
10,
false,
false,
),
expect_pessimistic_lock_res_callback(tx.clone(), PessimisticLockRes::Empty),
)
.unwrap();
rx.recv().unwrap();
// KeyIsLocked
for &(return_values, check_existence) in
&[(false, false), (false, true), (true, false), (true, true)]
{
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
20,
20,
return_values,
check_existence,
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
mvcc::Error(box mvcc::ErrorInner::KeyIsLocked(_)),
)))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
// The DummyLockManager consumes the Msg::WaitForLock.
rx.recv_timeout(Duration::from_millis(100)).unwrap_err();
}
// Needn't update max_ts when failing to read value
assert_eq!(cm.max_ts(), 10.into());
// Put key and key2.
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![
(Mutation::make_put(key.clone(), val.clone()), true),
(Mutation::make_put(key2.clone(), val2.clone()), false),
],
key.to_raw().unwrap(),
10.into(),
3000,
10.into(),
1,
TimeStamp::zero(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
storage
.sched_txn_command(
commands::Commit::new(
vec![key.clone(), key2.clone()],
10.into(),
20.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// WriteConflict
for &(return_values, check_existence) in
&[(false, false), (false, true), (true, false), (true, true)]
{
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(key.clone(), false)],
15,
15,
return_values,
check_existence,
),
expect_fail_callback(tx.clone(), 0, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
mvcc::Error(box mvcc::ErrorInner::WriteConflict { .. }),
)))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
// Needn't update max_ts when failing to read value
assert_eq!(cm.max_ts(), 10.into());
// Return multiple values
for &(return_values, check_existence) in
&[(false, false), (false, true), (true, false), (true, true)]
{
let pessimistic_lock_res = if return_values {
PessimisticLockRes::Values(vec![Some(val.clone()), Some(val2.clone()), None])
} else if check_existence {
PessimisticLockRes::Existence(vec![true, true, false])
} else {
PessimisticLockRes::Empty
};
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![
(key.clone(), false),
(key2.clone(), false),
(Key::from_raw(b"key3"), false),
],
30,
30,
return_values,
check_existence,
),
expect_pessimistic_lock_res_callback(tx.clone(), pessimistic_lock_res),
)
.unwrap();
rx.recv().unwrap();
if return_values || check_existence {
assert_eq!(cm.max_ts(), 30.into());
}
delete_pessimistic_lock(&storage, key.clone(), 30, 30);
}
}
#[test]
fn test_pessimistic_lock() {
test_pessimistic_lock_impl(false);
test_pessimistic_lock_impl(true);
}
pub enum Msg {
WaitFor {
start_ts: TimeStamp,
cb: StorageCallback,
pr: ProcessResult,
lock: Lock,
is_first_lock: bool,
timeout: Option<WaitTimeout>,
diag_ctx: DiagnosticContext,
},
WakeUp {
lock_ts: TimeStamp,
hashes: Vec<u64>,
commit_ts: TimeStamp,
is_pessimistic_txn: bool,
},
}
// `ProxyLockMgr` sends all msgs it received to `Sender`.
// It's used to check whether we send right messages to lock manager.
#[derive(Clone)]
pub struct ProxyLockMgr {
tx: Sender<Msg>,
has_waiter: Arc<AtomicBool>,
}
impl ProxyLockMgr {
pub fn new(tx: Sender<Msg>) -> Self {
Self {
tx,
has_waiter: Arc::new(AtomicBool::new(false)),
}
}
pub fn set_has_waiter(&mut self, has_waiter: bool) {
self.has_waiter.store(has_waiter, Ordering::Relaxed);
}
}
impl LockManager for ProxyLockMgr {
fn wait_for(
&self,
start_ts: TimeStamp,
cb: StorageCallback,
pr: ProcessResult,
lock: Lock,
is_first_lock: bool,
timeout: Option<WaitTimeout>,
diag_ctx: DiagnosticContext,
) {
self.tx
.send(Msg::WaitFor {
start_ts,
cb,
pr,
lock,
is_first_lock,
timeout,
diag_ctx,
})
.unwrap();
}
fn wake_up(
&self,
lock_ts: TimeStamp,
hashes: Vec<u64>,
commit_ts: TimeStamp,
is_pessimistic_txn: bool,
) {
self.tx
.send(Msg::WakeUp {
lock_ts,
hashes,
commit_ts,
is_pessimistic_txn,
})
.unwrap();
}
fn has_waiter(&self) -> bool {
self.has_waiter.load(Ordering::Relaxed)
}
fn dump_wait_for_entries(&self, _cb: waiter_manager::Callback) {
unimplemented!()
}
}
// Test whether `Storage` sends right wait-for-lock msgs to `LockManager`.
#[test]
fn validate_wait_for_lock_msg() {
let (msg_tx, msg_rx) = channel();
let storage = TestStorageBuilder::from_engine_and_lock_mgr(
TestEngineBuilder::new().build().unwrap(),
ProxyLockMgr::new(msg_tx),
ApiVersion::V1,
)
.build()
.unwrap();
let (k, v) = (b"k".to_vec(), b"v".to_vec());
let (tx, rx) = channel();
// Write lock-k.
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![Mutation::make_put(Key::from_raw(&k), v)],
k.clone(),
10.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// No wait for msg
assert!(msg_rx.try_recv().is_err());
// Meet lock-k.
storage
.sched_txn_command(
commands::AcquirePessimisticLock::new(
vec![(Key::from_raw(b"foo"), false), (Key::from_raw(&k), false)],
k.clone(),
20.into(),
3000,
true,
20.into(),
Some(WaitTimeout::Millis(100)),
false,
21.into(),
OldValues::default(),
false,
Context::default(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
// The transaction should be waiting for lock released so cb won't be called.
rx.recv_timeout(Duration::from_millis(500)).unwrap_err();
let msg = msg_rx.try_recv().unwrap();
// Check msg validation.
match msg {
Msg::WaitFor {
start_ts,
pr,
lock,
is_first_lock,
timeout,
..
} => {
assert_eq!(start_ts, TimeStamp::new(20));
assert_eq!(
lock,
Lock {
ts: 10.into(),
hash: Key::from_raw(&k).gen_hash(),
}
);
assert_eq!(is_first_lock, true);
assert_eq!(timeout, Some(WaitTimeout::Millis(100)));
match pr {
ProcessResult::PessimisticLockRes { res } => match res {
Err(Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(
MvccError(box MvccErrorInner::KeyIsLocked(info)),
))))) => {
assert_eq!(info.get_key(), k.as_slice());
assert_eq!(info.get_primary_lock(), k.as_slice());
assert_eq!(info.get_lock_version(), 10);
}
_ => panic!("unexpected error"),
},
_ => panic!("unexpected process result"),
};
}
_ => panic!("unexpected msg"),
}
}
// Test whether `Storage` sends right wake-up msgs to `LockManager`
#[test]
fn validate_wake_up_msg() {
fn assert_wake_up_msg_eq(
msg: Msg,
expected_lock_ts: TimeStamp,
expected_hashes: Vec<u64>,
expected_commit_ts: TimeStamp,
expected_is_pessimistic_txn: bool,
) {
match msg {
Msg::WakeUp {
lock_ts,
hashes,
commit_ts,
is_pessimistic_txn,
} => {
assert_eq!(lock_ts, expected_lock_ts);
assert_eq!(hashes, expected_hashes);
assert_eq!(commit_ts, expected_commit_ts);
assert_eq!(is_pessimistic_txn, expected_is_pessimistic_txn);
}
_ => panic!("unexpected msg"),
}
}
let (msg_tx, msg_rx) = channel();
let mut lock_mgr = ProxyLockMgr::new(msg_tx);
lock_mgr.set_has_waiter(true);
let storage = TestStorageBuilder::from_engine_and_lock_mgr(
TestEngineBuilder::new().build().unwrap(),
lock_mgr,
ApiVersion::V1,
)
.build()
.unwrap();
let (tx, rx) = channel();
let prewrite_locks = |keys: &[Key], ts: TimeStamp| {
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
keys.iter()
.map(|k| Mutation::make_put(k.clone(), b"v".to_vec()))
.collect(),
keys[0].to_raw().unwrap(),
ts,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
};
let acquire_pessimistic_locks = |keys: &[Key], ts: TimeStamp| {
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
keys.iter().map(|k| (k.clone(), false)).collect(),
ts,
ts,
false,
false,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
};
let keys = vec![
Key::from_raw(b"a"),
Key::from_raw(b"b"),
Key::from_raw(b"c"),
];
let key_hashes: Vec<u64> = keys.iter().map(|k| k.gen_hash()).collect();
// Commit
prewrite_locks(&keys, 10.into());
// If locks don't exsit, hashes of released locks should be empty.
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::Commit::new(keys.clone(), 10.into(), 20.into(), Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let hashes = if *empty_hashes {
Vec::new()
} else {
key_hashes.clone()
};
assert_wake_up_msg_eq(msg, 10.into(), hashes, 20.into(), false);
}
// Cleanup
for pessimistic in &[false, true] {
let mut ts = TimeStamp::new(30);
if *pessimistic {
ts.incr();
acquire_pessimistic_locks(&keys[..1], ts);
} else {
prewrite_locks(&keys[..1], ts);
}
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::Cleanup::new(
keys[0].clone(),
ts,
TimeStamp::max(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let (hashes, pessimistic) = if *empty_hashes {
(Vec::new(), false)
} else {
(key_hashes[..1].to_vec(), *pessimistic)
};
assert_wake_up_msg_eq(msg, ts, hashes, 0.into(), pessimistic);
}
}
// Rollback
for pessimistic in &[false, true] {
let mut ts = TimeStamp::new(40);
if *pessimistic {
ts.incr();
acquire_pessimistic_locks(&keys, ts);
} else {
prewrite_locks(&keys, ts);
}
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::Rollback::new(keys.clone(), ts, Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let (hashes, pessimistic) = if *empty_hashes {
(Vec::new(), false)
} else {
(key_hashes.clone(), *pessimistic)
};
assert_wake_up_msg_eq(msg, ts, hashes, 0.into(), pessimistic);
}
}
// PessimisticRollback
acquire_pessimistic_locks(&keys, 50.into());
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::PessimisticRollback::new(
keys.clone(),
50.into(),
50.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let (hashes, pessimistic) = if *empty_hashes {
(Vec::new(), false)
} else {
(key_hashes.clone(), true)
};
assert_wake_up_msg_eq(msg, 50.into(), hashes, 0.into(), pessimistic);
}
// ResolveLockLite
for commit in &[false, true] {
let mut start_ts = TimeStamp::new(60);
let commit_ts = if *commit {
start_ts.incr();
start_ts.next()
} else {
TimeStamp::zero()
};
prewrite_locks(&keys, start_ts);
for empty_hashes in &[false, true] {
storage
.sched_txn_command(
commands::ResolveLockLite::new(
start_ts,
commit_ts,
keys.clone(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let msg = msg_rx.recv().unwrap();
let hashes = if *empty_hashes {
Vec::new()
} else {
key_hashes.clone()
};
assert_wake_up_msg_eq(msg, start_ts, hashes, commit_ts, false);
}
}
// ResolveLock
let mut txn_status = HashMap::default();
acquire_pessimistic_locks(&keys, 70.into());
// Rollback start_ts=70
txn_status.insert(TimeStamp::new(70), TimeStamp::zero());
let committed_keys = vec![
Key::from_raw(b"d"),
Key::from_raw(b"e"),
Key::from_raw(b"f"),
];
let committed_key_hashes: Vec<u64> = committed_keys.iter().map(|k| k.gen_hash()).collect();
// Commit start_ts=75
prewrite_locks(&committed_keys, 75.into());
txn_status.insert(TimeStamp::new(75), TimeStamp::new(76));
storage
.sched_txn_command(
commands::ResolveLockReadPhase::new(txn_status, None, Context::default()),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
let mut msg1 = msg_rx.recv().unwrap();
let mut msg2 = msg_rx.recv().unwrap();
match msg1 {
Msg::WakeUp { lock_ts, .. } => {
if lock_ts != TimeStamp::new(70) {
// Let msg1 be the msg of rolled back transaction.
std::mem::swap(&mut msg1, &mut msg2);
}
assert_wake_up_msg_eq(msg1, 70.into(), key_hashes, 0.into(), true);
assert_wake_up_msg_eq(msg2, 75.into(), committed_key_hashes, 76.into(), false);
}
_ => panic!("unexpect msg"),
}
// CheckTxnStatus
let key = Key::from_raw(b"k");
let start_ts = TimeStamp::compose(100, 0);
storage
.sched_txn_command(
commands::Prewrite::with_lock_ttl(
vec![Mutation::make_put(key.clone(), b"v".to_vec())],
key.to_raw().unwrap(),
start_ts,
100,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Not expire
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
key.clone(),
start_ts,
TimeStamp::compose(110, 0),
TimeStamp::compose(150, 0),
false,
false,
false,
Context::default(),
),
expect_value_callback(
tx.clone(),
0,
TxnStatus::uncommitted(
txn_types::Lock::new(
LockType::Put,
b"k".to_vec(),
start_ts,
100,
Some(b"v".to_vec()),
0.into(),
0,
0.into(),
),
false,
),
),
)
.unwrap();
rx.recv().unwrap();
// No msg
assert!(msg_rx.try_recv().is_err());
// Expired
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
key.clone(),
start_ts,
TimeStamp::compose(110, 0),
TimeStamp::compose(201, 0),
false,
false,
false,
Context::default(),
),
expect_value_callback(tx.clone(), 0, TxnStatus::TtlExpire),
)
.unwrap();
rx.recv().unwrap();
assert_wake_up_msg_eq(
msg_rx.recv().unwrap(),
start_ts,
vec![key.gen_hash()],
0.into(),
false,
);
}
#[test]
fn test_check_memory_locks() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let cm = storage.get_concurrency_manager();
let key = Key::from_raw(b"key");
let guard = block_on(cm.lock_key(&key));
guard.with_lock(|lock| {
*lock = Some(txn_types::Lock::new(
LockType::Put,
b"key".to_vec(),
10.into(),
100,
Some(vec![]),
0.into(),
1,
20.into(),
));
});
let mut ctx = Context::default();
ctx.set_isolation_level(IsolationLevel::Si);
// Test get
let key_error = extract_key_error(
&block_on(storage.get(ctx.clone(), b"key".to_vec(), 100.into())).unwrap_err(),
);
assert_eq!(key_error.get_locked().get_key(), b"key");
// Ignore memory locks in resolved or committed locks.
ctx.set_resolved_locks(vec![10]);
assert!(block_on(storage.get(ctx.clone(), b"key".to_vec(), 100.into())).is_ok());
ctx.take_resolved_locks();
// Test batch_get
let batch_get = |ctx| {
block_on(storage.batch_get(ctx, vec![b"a".to_vec(), b"key".to_vec()], 100.into()))
};
let key_error = extract_key_error(&batch_get(ctx.clone()).unwrap_err());
assert_eq!(key_error.get_locked().get_key(), b"key");
// Ignore memory locks in resolved locks.
ctx.set_resolved_locks(vec![10]);
assert!(batch_get(ctx.clone()).is_ok());
ctx.take_resolved_locks();
// Test scan
let scan = |ctx, start_key, end_key, reverse| {
block_on(storage.scan(ctx, start_key, end_key, 10, 0, 100.into(), false, reverse))
};
let key_error =
extract_key_error(&scan(ctx.clone(), b"a".to_vec(), None, false).unwrap_err());
assert_eq!(key_error.get_locked().get_key(), b"key");
ctx.set_resolved_locks(vec![10]);
assert!(scan(ctx.clone(), b"a".to_vec(), None, false).is_ok());
ctx.take_resolved_locks();
let key_error =
extract_key_error(&scan(ctx.clone(), b"\xff".to_vec(), None, true).unwrap_err());
assert_eq!(key_error.get_locked().get_key(), b"key");
ctx.set_resolved_locks(vec![10]);
assert!(scan(ctx.clone(), b"\xff".to_vec(), None, false).is_ok());
ctx.take_resolved_locks();
// Ignore memory locks in resolved or committed locks.
// Test batch_get_command
let mut req1 = GetRequest::default();
req1.set_context(ctx.clone());
req1.set_key(b"a".to_vec());
req1.set_version(50);
let mut req2 = GetRequest::default();
req2.set_context(ctx);
req2.set_key(b"key".to_vec());
req2.set_version(100);
let batch_get_command = |req2| {
let consumer = GetConsumer::new();
block_on(storage.batch_get_command(
vec![req1.clone(), req2],
vec![1, 2],
consumer.clone(),
Instant::now(),
))
.unwrap();
consumer.take_data()
};
let res = batch_get_command(req2.clone());
assert!(res[0].is_ok());
let key_error = extract_key_error(res[1].as_ref().unwrap_err());
assert_eq!(key_error.get_locked().get_key(), b"key");
// Ignore memory locks in resolved or committed locks.
req2.mut_context().set_resolved_locks(vec![10]);
let res = batch_get_command(req2.clone());
assert!(res[0].is_ok());
assert!(res[1].is_ok());
req2.mut_context().take_resolved_locks();
}
#[test]
fn test_read_access_locks() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (k1, v1) = (b"k1".to_vec(), b"v1".to_vec());
let (k2, v2) = (b"k2".to_vec(), b"v2".to_vec());
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::with_defaults(
vec![
Mutation::make_put(Key::from_raw(&k1), v1.clone()),
Mutation::make_put(Key::from_raw(&k2), v2.clone()),
],
k1.clone(),
100.into(),
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
let mut ctx = Context::default();
ctx.set_isolation_level(IsolationLevel::Si);
ctx.set_committed_locks(vec![100]);
// get
assert_eq!(
block_on(storage.get(ctx.clone(), k1.clone(), 110.into()))
.unwrap()
.0,
Some(v1.clone())
);
// batch get
let res =
block_on(storage.batch_get(ctx.clone(), vec![k1.clone(), k2.clone()], 110.into()))
.unwrap()
.0;
if res[0].as_ref().unwrap().0 == k1 {
assert_eq!(&res[0].as_ref().unwrap().1, &v1);
assert_eq!(&res[1].as_ref().unwrap().1, &v2);
} else {
assert_eq!(&res[0].as_ref().unwrap().1, &v2);
assert_eq!(&res[1].as_ref().unwrap().1, &v1);
}
// batch get commands
let mut req = GetRequest::default();
req.set_context(ctx.clone());
req.set_key(k1.clone());
req.set_version(110);
let consumer = GetConsumer::new();
block_on(storage.batch_get_command(vec![req], vec![1], consumer.clone(), Instant::now()))
.unwrap();
let res = consumer.take_data();
assert_eq!(res.len(), 1);
assert_eq!(res[0].as_ref().unwrap(), &Some(v1.clone()));
// scan
for desc in &[false, true] {
let mut values = vec![
Some((k1.clone(), v1.clone())),
Some((k2.clone(), v2.clone())),
];
let mut key = b"\x00".to_vec();
if *desc {
key = b"\xff".to_vec();
values.reverse();
}
expect_multi_values(
values,
block_on(storage.scan(ctx.clone(), key, None, 1000, 0, 110.into(), false, *desc))
.unwrap(),
);
}
}
#[test]
fn test_async_commit_prewrite() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let cm = storage.concurrency_manager.clone();
cm.update_max_ts(10.into());
// Optimistic prewrite
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::Prewrite::new(
vec![
Mutation::make_put(Key::from_raw(b"a"), b"v".to_vec()),
Mutation::make_put(Key::from_raw(b"b"), b"v".to_vec()),
Mutation::make_put(Key::from_raw(b"c"), b"v".to_vec()),
],
b"c".to_vec(),
100.into(),
1000,
false,
3,
TimeStamp::default(),
TimeStamp::default(),
Some(vec![b"a".to_vec(), b"b".to_vec()]),
false,
AssertionLevel::Off,
Context::default(),
),
Box::new(move |res| {
tx.send(res).unwrap();
}),
)
.unwrap();
let res = rx.recv().unwrap().unwrap();
assert_eq!(res.min_commit_ts, 101.into());
// Pessimistic prewrite
let (tx, rx) = channel();
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(Key::from_raw(b"d"), false), (Key::from_raw(b"e"), false)],
200,
300,
false,
false,
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
cm.update_max_ts(1000.into());
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![
(Mutation::make_put(Key::from_raw(b"d"), b"v".to_vec()), true),
(Mutation::make_put(Key::from_raw(b"e"), b"v".to_vec()), true),
],
b"d".to_vec(),
200.into(),
1000,
400.into(),
2,
401.into(),
TimeStamp::default(),
Some(vec![b"e".to_vec()]),
false,
AssertionLevel::Off,
Context::default(),
),
Box::new(move |res| {
tx.send(res).unwrap();
}),
)
.unwrap();
let res = rx.recv().unwrap().unwrap();
assert_eq!(res.min_commit_ts, 1001.into());
}
// This is one of the series of tests to test overlapped timestamps.
// Overlapped ts means there is a rollback record and a commit record with the same ts.
// In this test we check that if rollback happens before commit, then they should not have overlapped ts,
// which is an expected property.
#[test]
fn test_overlapped_ts_rollback_before_prewrite() {
let engine = TestEngineBuilder::new().build().unwrap();
let storage = TestStorageBuilder::<_, DummyLockManager>::from_engine_and_lock_mgr(
engine.clone(),
DummyLockManager {},
ApiVersion::V1,
)
.build()
.unwrap();
let (k1, v1) = (b"key1", b"v1");
let (k2, v2) = (b"key2", b"v2");
let key1 = Key::from_raw(k1);
let key2 = Key::from_raw(k2);
let value1 = v1.to_vec();
let value2 = v2.to_vec();
let (tx, rx) = channel();
// T1 acquires lock on k1, start_ts = 1, for_update_ts = 3
storage
.sched_txn_command(
commands::AcquirePessimisticLock::new(
vec![(key1.clone(), false)],
k1.to_vec(),
1.into(),
0,
true,
3.into(),
None,
false,
0.into(),
OldValues::default(),
false,
Default::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// T2 acquires lock on k2, start_ts = 10, for_update_ts = 15
storage
.sched_txn_command(
commands::AcquirePessimisticLock::new(
vec![(key2.clone(), false)],
k2.to_vec(),
10.into(),
0,
true,
15.into(),
None,
false,
0.into(),
OldValues::default(),
false,
Default::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// T2 pessimistically prewrites, start_ts = 10, lock ttl = 0
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![(Mutation::make_put(key2.clone(), value2.clone()), true)],
k2.to_vec(),
10.into(),
0,
15.into(),
1,
0.into(),
100.into(),
None,
false,
AssertionLevel::Off,
Default::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// T3 checks T2, which rolls back key2 and pushes max_ts to 10
// use a large timestamp to make the lock expire so key2 will be rolled back.
storage
.sched_txn_command(
commands::CheckTxnStatus::new(
key2.clone(),
10.into(),
((1 << 18) + 8).into(),
((1 << 18) + 8).into(),
true,
false,
false,
Default::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
must_unlocked(&engine, k2);
must_written(&engine, k2, 10, 10, WriteType::Rollback);
// T1 prewrites, start_ts = 1, for_update_ts = 3
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![
(Mutation::make_put(key1.clone(), value1), true),
(Mutation::make_put(key2.clone(), value2), false),
],
k1.to_vec(),
1.into(),
0,
3.into(),
2,
0.into(),
(1 << 19).into(),
Some(vec![k2.to_vec()]),
false,
AssertionLevel::Off,
Default::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// T1.commit_ts must be pushed to be larger than T2.start_ts (if we resolve T1)
storage
.sched_txn_command(
commands::CheckSecondaryLocks::new(vec![key1, key2], 1.into(), Default::default()),
Box::new(move |res| {
let pr = res.unwrap();
match pr {
SecondaryLocksStatus::Locked(l) => {
let min_commit_ts = l
.iter()
.map(|lock_info| lock_info.min_commit_ts)
.max()
.unwrap();
tx.send(min_commit_ts as i32).unwrap();
}
_ => unreachable!(),
}
}),
)
.unwrap();
assert!(rx.recv().unwrap() > 10);
}
// this test shows that the scheduler take `response_policy` in `WriteResult` serious,
// ie. call the callback at expected stage when writing to the engine
#[test]
fn test_scheduler_response_policy() {
struct Case<T: 'static + StorageCallbackType + Send> {
expected_writes: Vec<ExpectedWrite>,
command: TypedCommand<T>,
pipelined_pessimistic_lock: bool,
}
impl<T: 'static + StorageCallbackType + Send> Case<T> {
fn run(self) {
let mut builder =
MockEngineBuilder::from_rocks_engine(TestEngineBuilder::new().build().unwrap());
for expected_write in self.expected_writes {
builder = builder.add_expected_write(expected_write)
}
let engine = builder.build();
let mut builder = TestStorageBuilder::from_engine_and_lock_mgr(
engine,
DummyLockManager {},
ApiVersion::V1,
);
builder.config.enable_async_apply_prewrite = true;
if self.pipelined_pessimistic_lock {
builder
.pipelined_pessimistic_lock
.store(true, Ordering::Relaxed);
}
let storage = builder.build().unwrap();
let (tx, rx) = channel();
storage
.sched_txn_command(
self.command,
Box::new(move |res| {
tx.send(res).unwrap();
}),
)
.unwrap();
rx.recv().unwrap().unwrap();
}
}
let keys = [b"k1", b"k2"];
let values = [b"v1", b"v2"];
let mutations = vec![
Mutation::make_put(Key::from_raw(keys[0]), keys[0].to_vec()),
Mutation::make_put(Key::from_raw(keys[1]), values[1].to_vec()),
];
let on_applied_case = Case {
// this case's command return ResponsePolicy::OnApplied
// tested by `test_response_stage` in command::prewrite
expected_writes: vec![
ExpectedWrite::new()
.expect_no_committed_cb()
.expect_no_proposed_cb(),
ExpectedWrite::new()
.expect_no_committed_cb()
.expect_no_proposed_cb(),
],
command: Prewrite::new(
mutations.clone(),
keys[0].to_vec(),
TimeStamp::new(10),
0,
false,
1,
TimeStamp::default(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
pipelined_pessimistic_lock: false,
};
let on_commited_case = Case {
// this case's command return ResponsePolicy::OnCommitted
// tested by `test_response_stage` in command::prewrite
expected_writes: vec![
ExpectedWrite::new().expect_committed_cb(),
ExpectedWrite::new().expect_committed_cb(),
],
command: Prewrite::new(
mutations,
keys[0].to_vec(),
TimeStamp::new(10),
0,
false,
1,
TimeStamp::default(),
TimeStamp::default(),
Some(vec![]),
false,
AssertionLevel::Off,
Context::default(),
),
pipelined_pessimistic_lock: false,
};
let on_proposed_case = Case {
// this case's command return ResponsePolicy::OnProposed
// untested, but all AcquirePessimisticLock should return ResponsePolicy::OnProposed now
// and the scheduler expected to take OnProposed serious when
// enable pipelined pessimistic lock
expected_writes: vec![
ExpectedWrite::new().expect_proposed_cb(),
ExpectedWrite::new().expect_proposed_cb(),
],
command: AcquirePessimisticLock::new(
keys.iter().map(|&it| (Key::from_raw(it), true)).collect(),
keys[0].to_vec(),
TimeStamp::new(10),
0,
false,
TimeStamp::new(11),
None,
false,
TimeStamp::new(12),
OldValues::default(),
false,
Context::default(),
),
pipelined_pessimistic_lock: true,
};
let on_proposed_fallback_case = Case {
// this case's command return ResponsePolicy::OnProposed
// but when pipelined pessimistic lock is off,
// the scheduler should fallback to use OnApplied
expected_writes: vec![
ExpectedWrite::new().expect_no_proposed_cb(),
ExpectedWrite::new().expect_no_proposed_cb(),
],
command: AcquirePessimisticLock::new(
keys.iter().map(|&it| (Key::from_raw(it), true)).collect(),
keys[0].to_vec(),
TimeStamp::new(10),
0,
false,
TimeStamp::new(11),
None,
false,
TimeStamp::new(12),
OldValues::default(),
false,
Context::default(),
),
pipelined_pessimistic_lock: false,
};
on_applied_case.run();
on_commited_case.run();
on_proposed_case.run();
on_proposed_fallback_case.run();
}
#[test]
fn test_resolve_commit_pessimistic_locks() {
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.build()
.unwrap();
let (tx, rx) = channel();
// Pessimistically lock k1, k2, k3, k4, after the pessimistic retry k2 is no longer needed
// and the pessimistic lock on k2 is left.
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![
(Key::from_raw(b"k1"), false),
(Key::from_raw(b"k2"), false),
(Key::from_raw(b"k3"), false),
(Key::from_raw(b"k4"), false),
(Key::from_raw(b"k5"), false),
(Key::from_raw(b"k6"), false),
],
10,
10,
false,
false,
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Prewrite keys except the k2.
storage
.sched_txn_command(
commands::PrewritePessimistic::with_defaults(
vec![
(
Mutation::make_put(Key::from_raw(b"k1"), b"v1".to_vec()),
true,
),
(
Mutation::make_put(Key::from_raw(b"k3"), b"v2".to_vec()),
true,
),
(
Mutation::make_put(Key::from_raw(b"k4"), b"v4".to_vec()),
true,
),
(
Mutation::make_put(Key::from_raw(b"k5"), b"v5".to_vec()),
true,
),
(
Mutation::make_put(Key::from_raw(b"k6"), b"v6".to_vec()),
true,
),
],
b"k1".to_vec(),
10.into(),
10.into(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Commit the primary key.
storage
.sched_txn_command(
commands::Commit::new(
vec![Key::from_raw(b"k1")],
10.into(),
20.into(),
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Pessimistically rollback the k2 lock.
// Non lite lock resolve on k1 and k2, there should no errors as lock on k2 is pessimistic type.
must_rollback(&storage.engine, b"k2", 10, false);
let mut temp_map = HashMap::default();
temp_map.insert(10.into(), 20.into());
storage
.sched_txn_command(
commands::ResolveLock::new(
temp_map.clone(),
None,
vec![
(
Key::from_raw(b"k1"),
mvcc::Lock::new(
mvcc::LockType::Put,
b"k1".to_vec(),
10.into(),
20,
Some(b"v1".to_vec()),
10.into(),
0,
11.into(),
),
),
(
Key::from_raw(b"k2"),
mvcc::Lock::new(
mvcc::LockType::Pessimistic,
b"k1".to_vec(),
10.into(),
20,
None,
10.into(),
0,
11.into(),
),
),
],
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Non lite lock resolve on k3 and k4, there should be no errors.
storage
.sched_txn_command(
commands::ResolveLock::new(
temp_map.clone(),
None,
vec![
(
Key::from_raw(b"k3"),
mvcc::Lock::new(
mvcc::LockType::Put,
b"k1".to_vec(),
10.into(),
20,
Some(b"v3".to_vec()),
10.into(),
0,
11.into(),
),
),
(
Key::from_raw(b"k4"),
mvcc::Lock::new(
mvcc::LockType::Put,
b"k1".to_vec(),
10.into(),
20,
Some(b"v4".to_vec()),
10.into(),
0,
11.into(),
),
),
],
Context::default(),
),
expect_ok_callback(tx.clone(), 0),
)
.unwrap();
rx.recv().unwrap();
// Unlock the k6 first.
// Non lite lock resolve on k5 and k6, error should be reported.
must_rollback(&storage.engine, b"k6", 10, true);
storage
.sched_txn_command(
commands::ResolveLock::new(
temp_map,
None,
vec![
(
Key::from_raw(b"k5"),
mvcc::Lock::new(
mvcc::LockType::Put,
b"k1".to_vec(),
10.into(),
20,
Some(b"v5".to_vec()),
10.into(),
0,
11.into(),
),
),
(
Key::from_raw(b"k6"),
mvcc::Lock::new(
mvcc::LockType::Put,
b"k1".to_vec(),
10.into(),
20,
Some(b"v6".to_vec()),
10.into(),
0,
11.into(),
),
),
],
Context::default(),
),
expect_fail_callback(tx, 6, |e| match e {
Error(box ErrorInner::Txn(TxnError(box TxnErrorInner::Mvcc(mvcc::Error(
box mvcc::ErrorInner::TxnLockNotFound { .. },
))))) => (),
e => panic!("unexpected error chain: {:?}", e),
}),
)
.unwrap();
rx.recv().unwrap();
}
// Test check_api_version.
// See the following for detail:
// * rfc: https://github.com/tikv/rfcs/blob/master/text/0069-api-v2.md.
// * proto: https://github.com/pingcap/kvproto/blob/master/proto/kvrpcpb.proto, enum APIVersion.
#[test]
fn test_check_api_version() {
use error_code::storage::*;
const TIDB_KEY_CASE: &[u8] = b"t_a";
const TXN_KEY_CASE: &[u8] = &[key_prefix::TXN_KEY_PREFIX, 0, b'a'];
const RAW_KEY_CASE: &[u8] = &[key_prefix::RAW_KEY_PREFIX, 0, b'a'];
let test_data = vec![
// storage api_version = V1, for backward compatible.
(
ApiVersion::V1, // storage api_version
ApiVersion::V1, // request api_version
CommandKind::get, // command kind
vec![TIDB_KEY_CASE, RAW_KEY_CASE], // keys
None, // expected error code
),
(
ApiVersion::V1,
ApiVersion::V1,
CommandKind::raw_get,
vec![RAW_KEY_CASE, TXN_KEY_CASE],
None,
),
// storage api_version = V1ttl, allow RawKV request only.
(
ApiVersion::V1ttl,
ApiVersion::V1,
CommandKind::raw_get,
vec![RAW_KEY_CASE],
None,
),
(
ApiVersion::V1ttl,
ApiVersion::V1,
CommandKind::get,
vec![TIDB_KEY_CASE],
Some(API_VERSION_NOT_MATCHED),
),
// storage api_version = V1, reject V2 request.
(
ApiVersion::V1,
ApiVersion::V2,
CommandKind::get,
vec![TIDB_KEY_CASE],
Some(API_VERSION_NOT_MATCHED),
),
// storage api_version = V2.
// backward compatible for TiDB request, and TiDB request only.
(
ApiVersion::V2,
ApiVersion::V1,
CommandKind::get,
vec![TIDB_KEY_CASE, TIDB_KEY_CASE],
None,
),
(
ApiVersion::V2,
ApiVersion::V1,
CommandKind::raw_get,
vec![TIDB_KEY_CASE, TIDB_KEY_CASE],
Some(API_VERSION_NOT_MATCHED),
),
(
ApiVersion::V2,
ApiVersion::V1,
CommandKind::get,
vec![TIDB_KEY_CASE, TXN_KEY_CASE],
Some(INVALID_KEY_PREFIX),
),
(
ApiVersion::V2,
ApiVersion::V1,
CommandKind::get,
vec![RAW_KEY_CASE],
Some(INVALID_KEY_PREFIX),
),
// V2 api validation.
(
ApiVersion::V2,
ApiVersion::V2,
CommandKind::get,
vec![TXN_KEY_CASE],
None,
),
(
ApiVersion::V2,
ApiVersion::V2,
CommandKind::raw_get,
vec![RAW_KEY_CASE, RAW_KEY_CASE],
None,
),
(
ApiVersion::V2,
ApiVersion::V2,
CommandKind::get,
vec![RAW_KEY_CASE, TXN_KEY_CASE],
Some(INVALID_KEY_PREFIX),
),
(
ApiVersion::V2,
ApiVersion::V2,
CommandKind::raw_get,
vec![RAW_KEY_CASE, TXN_KEY_CASE],
Some(INVALID_KEY_PREFIX),
),
(
ApiVersion::V2,
ApiVersion::V2,
CommandKind::get,
vec![TIDB_KEY_CASE],
Some(INVALID_KEY_PREFIX),
),
];
for (i, (storage_api_version, req_api_version, cmd, keys, err)) in
test_data.into_iter().enumerate()
{
let res = Storage::<RocksEngine, DummyLockManager>::check_api_version(
storage_api_version,
req_api_version,
cmd,
keys,
);
if let Some(err) = err {
assert!(res.is_err(), "case {}", i);
assert_eq!(res.unwrap_err().error_code(), err, "case {}", i);
} else {
assert!(res.is_ok(), "case {}", i);
}
}
}
#[test]
fn test_write_in_memory_pessimistic_locks() {
let txn_ext = Arc::new(TxnExt::default());
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.pipelined_pessimistic_lock(true)
.in_memory_pessimistic_lock(true)
.build_for_txn(txn_ext.clone())
.unwrap();
let (tx, rx) = channel();
let k1 = Key::from_raw(b"k1");
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(k1.clone(), false)],
10,
10,
false,
false,
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
{
let pessimistic_locks = txn_ext.pessimistic_locks.read();
let lock = pessimistic_locks.map.get(&k1).unwrap();
assert_eq!(
lock,
&PessimisticLock {
primary: Box::new(*b"k1"),
start_ts: 10.into(),
ttl: 3000,
for_update_ts: 10.into(),
min_commit_ts: 11.into(),
}
);
}
let (tx, rx) = channel();
// The written in-memory pessimistic lock should be visible, so the new lock request should fail.
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(k1.clone(), false)],
20,
20,
false,
false,
),
Box::new(move |res| {
tx.send(res).unwrap();
}),
)
.unwrap();
// DummyLockManager just drops the callback, so it will fail to receive anything.
assert!(rx.recv().is_err());
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![(Mutation::make_put(k1.clone(), b"v".to_vec()), true)],
b"k1".to_vec(),
10.into(),
3000,
10.into(),
1,
20.into(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
Box::new(move |res| {
tx.send(res).unwrap();
}),
)
.unwrap();
assert!(rx.recv().unwrap().is_ok());
// After prewrite, the memory lock should be removed.
{
let pessimistic_locks = txn_ext.pessimistic_locks.read();
assert!(!pessimistic_locks.map.contains_key(&k1));
}
}
#[test]
fn test_disable_in_memory_pessimistic_locks() {
let txn_ext = Arc::new(TxnExt::default());
let storage = TestStorageBuilder::new(DummyLockManager {}, ApiVersion::V1)
.pipelined_pessimistic_lock(true)
.in_memory_pessimistic_lock(false)
.build_for_txn(txn_ext.clone())
.unwrap();
let (tx, rx) = channel();
let k1 = Key::from_raw(b"k1");
storage
.sched_txn_command(
new_acquire_pessimistic_lock_command(
vec![(k1.clone(), false)],
10,
10,
false,
false,
),
expect_ok_callback(tx, 0),
)
.unwrap();
rx.recv().unwrap();
// When disabling in-memory pessimistic lock, the lock map should remain unchanged.
assert!(txn_ext.pessimistic_locks.read().map.is_empty());
let (tx, rx) = channel();
storage
.sched_txn_command(
commands::PrewritePessimistic::new(
vec![(Mutation::make_put(k1, b"v".to_vec()), true)],
b"k1".to_vec(),
10.into(),
3000,
10.into(),
1,
20.into(),
TimeStamp::default(),
None,
false,
AssertionLevel::Off,
Context::default(),
),
Box::new(move |res| {
tx.send(res).unwrap();
}),
)
.unwrap();
// Prewrite still succeeds
assert!(rx.recv().unwrap().is_ok());
}
}
| 34.539411 | 151 | 0.445704 |
9caf79a42cd5eae6e2428281829f072519035c42 | 3,340 | //! Low-Power Timer wakeup.
#![no_main]
#![no_std]
extern crate panic_semihosting;
use nb::block;
use cortex_m::{asm, peripheral::NVIC};
use cortex_m_rt::entry;
use stm32l0xx_hal::{
prelude::*,
exti::{Exti, DirectLine},
gpio::{
Output,
PushPull,
gpiob::PB,
},
lptim::{
self,
LpTimer,
ClockSrc,
},
pac,
pwr::{
self,
PWR,
},
rcc,
};
#[entry]
fn main() -> ! {
let cp = pac::CorePeripherals::take().unwrap();
let dp = pac::Peripherals::take().unwrap();
let mut scb = cp.SCB;
let mut rcc = dp.RCC.freeze(rcc::Config::msi(rcc::MSIRange::Range0));
let mut exti = Exti::new(dp.EXTI);
let mut pwr = PWR::new(dp.PWR, &mut rcc);
let gpiob = dp.GPIOB.split(&mut rcc);
let mut led = gpiob.pb2.into_push_pull_output().downgrade();
let mut lptim = LpTimer::init_periodic(dp.LPTIM, &mut pwr, &mut rcc, ClockSrc::Lse);
let exti_line = DirectLine::Lptim1;
lptim.enable_interrupts(lptim::Interrupts {
autoreload_match: true,
..lptim::Interrupts::default()
});
exti.listen_direct(exti_line);
// Blink twice to signal the start of the program
blink(&mut led);
blink(&mut led);
// 1 seconds of regular run mode
lptim.start(1.hz());
block!(lptim.wait()).unwrap();
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of low-power run mode
pwr.enter_low_power_run_mode(rcc.clocks);
block!(lptim.wait()).unwrap();
pwr.exit_low_power_run_mode();
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of sleep mode
exti.wait_for_irq(
exti_line,
pwr.sleep_mode(&mut scb),
);
lptim.wait().unwrap(); // returns immediately; we just got the interrupt
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of low-power sleep mode
exti.wait_for_irq(
exti_line,
pwr.low_power_sleep_mode(&mut scb, &mut rcc),
);
lptim.wait().unwrap(); // returns immediately; we just got the interrupt
Exti::unpend(exti_line);
NVIC::unpend(pac::Interrupt::LPTIM1);
blink(&mut led);
// 1 seconds of stop mode
exti.wait_for_irq(
exti_line,
pwr.stop_mode(
&mut scb,
&mut rcc,
pwr::StopModeConfig {
ultra_low_power: true,
},
),
);
lptim.wait().unwrap(); // returns immediately; we just got the interrupt
blink(&mut led);
// 1 second of standby mode
NVIC::unpend(pac::Interrupt::LPTIM1);
exti.wait_for_irq(
exti_line,
pwr.standby_mode(&mut scb),
);
// The microcontroller resets after leaving standby mode. We should never
// reach this point.
loop {
blink(&mut led);
}
}
fn blink(led: &mut PB<Output<PushPull>>) {
led.set_high().unwrap();
delay();
led.set_low().unwrap();
delay();
}
fn delay() {
// We can't use `Delay`, as that requires a frequency of at least one MHz.
// Given our clock selection, the following loop should give us a nice delay
// when compiled in release mode.
for _ in 0 .. 1_000 { asm::nop() }
}
| 23.194444 | 88 | 0.591317 |
dd345f9ac7a410b88e482bc9ac4ace7d598c9726 | 19,649 | use crate::domain::RegisteredAccount;
use crate::errors::account_management::INSUFFICIENT_STORAGE_FEE;
use crate::errors::asserts::ATTACHED_DEPOSIT_IS_REQUIRED;
use crate::interface::{AccountManagement, AccountStorage, AccountStorageBalance, YoctoNear};
use crate::near::assert_yocto_near_attached;
use crate::*;
use near_sdk::{json_types::ValidAccountId, near_bindgen, Promise};
#[near_bindgen]
impl AccountStorage for Contract {
/// To be compliant with the expected behavior for the Account Storage Standard API (NEP-145):
/// - if overpayment is attached, then it is simply stored in the account storage escrow balance
///
/// NOTE: We never want the function to panic.
#[payable]
fn storage_deposit(&mut self, account_id: Option<ValidAccountId>) -> AccountStorageBalance {
assert!(env::attached_deposit() > 0, ATTACHED_DEPOSIT_IS_REQUIRED);
let account_id = account_id.map_or_else(
|| env::predecessor_account_id(),
|account_id| account_id.as_ref().to_string(),
);
match self.lookup_registered_account(&account_id) {
None => self._register_account(&account_id),
// deposit funds into account storage escrow
Some(mut account) => {
account
.storage_escrow
.credit(env::attached_deposit().into());
self.save_registered_account(&account);
}
}
// track total account storage escrow balance at contract level
self.total_account_storage_escrow += env::attached_deposit().into();
self._storage_balance_of(&account_id)
}
#[payable]
fn storage_withdraw(&mut self, amount: Option<YoctoNear>) -> AccountStorageBalance {
assert_yocto_near_attached();
if let Some(amount) = amount.as_ref() {
assert!(
amount.value() > 0,
"withdraw amount must be greater than zero"
);
}
let mut account = self.predecessor_registered_account();
let account_storage_balance = self.account_storage_balance(&account);
let withdraw_amount = amount.unwrap_or(account_storage_balance.available.clone());
assert!(
withdraw_amount.value() <= account_storage_balance.available.value(),
"ERR: account storage available balance is insufficient"
);
// update balances
let withdraw_amount = withdraw_amount.into();
account.storage_escrow.debit(withdraw_amount);
self.save_registered_account(&account);
self.total_account_storage_escrow -= withdraw_amount;
// get updated account storage balance
let account_storage_balance = self.account_storage_balance(&account);
// transfer the withdrawal amount + the attached yoctoNEAR
Promise::new(env::predecessor_account_id()).transfer(withdraw_amount.value() + 1);
account_storage_balance
}
fn storage_minimum_balance(&self) -> YoctoNear {
self.account_storage_fee()
}
fn storage_balance_of(&self, account_id: ValidAccountId) -> AccountStorageBalance {
self._storage_balance_of(account_id.as_ref())
}
}
impl Contract {
fn _register_account(&mut self, account_id: &str) {
assert!(
env::attached_deposit() >= self.account_storage_fee().value(),
INSUFFICIENT_STORAGE_FEE,
);
let account = Account::new(env::attached_deposit().into());
self.save_registered_account(&RegisteredAccount {
account,
id: Hash::from(account_id),
});
}
/// accounts for changes in storage storage fees, i.e., if storage prices are lowered, then this
/// will be reflected in the available balance.
fn _storage_balance_of(&self, account_id: &str) -> AccountStorageBalance {
match self.lookup_registered_account(account_id) {
None => AccountStorageBalance::default(),
Some(account) => self.account_storage_balance(&account),
}
}
fn account_storage_balance(&self, account: &RegisteredAccount) -> AccountStorageBalance {
AccountStorageBalance {
total: account.storage_escrow.amount().into(),
available: {
let account_storage_fee = self.account_storage_fee().value();
let storage_escrow_amount = account.storage_escrow.amount().value();
if account_storage_fee > storage_escrow_amount {
0.into()
} else {
(storage_escrow_amount - account_storage_fee).into()
}
},
}
}
}
#[cfg(test)]
mod test_storage_deposit {
use super::*;
use crate::test_utils::*;
use near_sdk::{testing_env, MockedBlockchain};
#[test]
fn account_id_not_registered_with_exact_deposit() {
// Arrange
let mut test_context = TestContext::new();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = test_context.storage_minimum_balance().value();
testing_env!(context);
// Act
let balance = test_context.storage_deposit(Some(to_valid_account_id(&account_id)));
// Assert
assert_eq!(
balance.total.value(),
test_context.storage_minimum_balance().value()
);
assert_eq!(
balance.total.value(),
test_context.total_account_storage_escrow.value()
);
assert_eq!(balance.available.value(), 0);
assert!(
test_context.account_registered(to_valid_account_id(&account_id)),
"the initial deposit should have registered the account"
);
assert_eq!(
balance,
test_context.storage_balance_of(to_valid_account_id(&account_id))
);
}
#[test]
fn account_id_not_registered_with_extra_deposit() {
// Arrange
let mut test_context = TestContext::new();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = test_context.storage_minimum_balance().value() * 3;
testing_env!(context.clone());
// Act
let balance = test_context.storage_deposit(Some(to_valid_account_id(&account_id)));
// Assert
assert_eq!(balance.total.value(), context.attached_deposit);
assert_eq!(
balance.total.value(),
test_context.total_account_storage_escrow.value()
);
assert_eq!(
balance.available.value(),
test_context.storage_minimum_balance().value() * 2
);
assert!(
test_context.account_registered(to_valid_account_id(&account_id)),
"the initial deposit should have registered the account"
);
assert_eq!(
balance,
test_context.storage_balance_of(to_valid_account_id(&account_id))
);
}
#[test]
#[should_panic(expected = "attached deposit is required")]
fn account_id_not_registered_with_no_deposit() {
// Arrange
let mut test_context = TestContext::new();
let account_id = test_context.account_id.to_string();
// Act
test_context.storage_deposit(Some(to_valid_account_id(&account_id)));
}
#[test]
#[should_panic(expected = "sufficient deposit is required to pay for account storage fees")]
fn account_id_not_registered_with_insufficient_deposit() {
// Arrange
let mut test_context = TestContext::new();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = test_context.storage_minimum_balance().value() - 1;
testing_env!(context.clone());
// Act
test_context.storage_deposit(Some(to_valid_account_id(&account_id)));
}
//
#[test]
fn predecessor_account_id_not_registered_with_exact_deposit() {
// Arrange
let mut test_context = TestContext::new();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = test_context.storage_minimum_balance().value();
testing_env!(context);
// Act
let balance = test_context.storage_deposit(None);
// Assert
assert_eq!(
balance.total.value(),
test_context.storage_minimum_balance().value()
);
assert_eq!(
balance.total.value(),
test_context.total_account_storage_escrow.value()
);
assert_eq!(balance.available.value(), 0);
assert!(
test_context.account_registered(to_valid_account_id(&account_id)),
"the initial deposit should have registered the account"
);
assert_eq!(
balance,
test_context.storage_balance_of(to_valid_account_id(&account_id))
);
}
#[test]
fn predecessor_account_id_not_registered_with_extra_deposit() {
// Arrange
let mut test_context = TestContext::new();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = test_context.storage_minimum_balance().value() * 3;
testing_env!(context.clone());
// Act
let balance = test_context.storage_deposit(None);
// Assert
assert_eq!(balance.total.value(), context.attached_deposit);
assert_eq!(
balance.total.value(),
test_context.total_account_storage_escrow.value()
);
assert_eq!(
balance.available.value(),
test_context.storage_minimum_balance().value() * 2
);
assert!(
test_context.account_registered(to_valid_account_id(&account_id)),
"the initial deposit should have registered the account"
);
assert_eq!(
balance,
test_context.storage_balance_of(to_valid_account_id(&account_id))
);
}
#[test]
#[should_panic(expected = "attached deposit is required")]
fn predecessor_account_id_not_registered_with_no_deposit() {
// Arrange
let mut test_context = TestContext::new();
// Act
test_context.storage_deposit(None);
}
#[test]
#[should_panic(expected = "sufficient deposit is required to pay for account storage fees")]
fn predecessor_account_id_not_registered_with_insufficient_deposit() {
// Arrange
let mut test_context = TestContext::new();
let mut context = test_context.context.clone();
context.attached_deposit = test_context.storage_minimum_balance().value() - 1;
testing_env!(context.clone());
// Act
test_context.storage_deposit(None);
}
//
#[test]
#[should_panic(expected = "attached deposit is required")]
fn account_id_registered_with_no_deposit() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let account_id = test_context.account_id.to_string();
test_context.storage_deposit(Some(to_valid_account_id(&account_id)));
}
#[test]
fn account_id_registered_with_deposit() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
let balance = test_context.storage_deposit(Some(to_valid_account_id(&account_id)));
// Assert
assert_eq!(balance.available.value(), context.attached_deposit);
assert_eq!(
test_context.total_account_storage_escrow.value(),
test_context.account_storage_fee().value() + context.attached_deposit
);
assert_eq!(
balance,
test_context.storage_balance_of(to_valid_account_id(&account_id))
);
}
//
#[test]
#[should_panic(expected = "attached deposit is required")]
fn predecessor_account_id_registered_with_no_deposit() {
// Arrange
let mut test_context = TestContext::with_registered_account();
test_context.storage_deposit(None);
}
#[test]
fn predecessor_account_id_registered_with_deposit() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let account_id = test_context.account_id.to_string();
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
let balance = test_context.storage_deposit(None);
// Assert
assert_eq!(balance.available.value(), context.attached_deposit);
assert_eq!(
test_context.total_account_storage_escrow.value(),
test_context.account_storage_fee().value() + context.attached_deposit
);
assert_eq!(
balance,
test_context.storage_balance_of(to_valid_account_id(&account_id))
);
}
}
#[cfg(test)]
mod test_storage_withdraw {
use super::*;
use crate::near::YOCTO;
use crate::test_utils::*;
use near_sdk::{testing_env, MockedBlockchain};
#[test]
#[should_panic(expected = "exactly 1 yoctoNEAR must be attached")]
fn no_amount_no_attached_deposit() {
// Arrange
let mut test_context = TestContext::with_registered_account();
// Act
test_context.storage_withdraw(None);
}
#[test]
fn no_amount_specified_has_available_balance() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let mut context = test_context.context.clone();
context.attached_deposit = YOCTO;
testing_env!(context);
test_context.storage_deposit(None);
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
let balance = test_context.storage_withdraw(None);
// Assert
assert_eq!(balance.total, test_context.storage_minimum_balance());
assert_eq!(balance.available.value(), 0);
let receipts = deserialize_receipts();
assert_eq!(receipts.len(), 1);
let receipt = &receipts[0];
assert_eq!(receipt.receiver_id, context.predecessor_account_id);
match &receipt.actions[0] {
Action::Transfer { deposit } => assert_eq!(*deposit, YOCTO + 1),
_ => panic!("expected transfer"),
}
}
#[test]
fn no_amount_specified_has_zero_available_balance() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
let balance = test_context.storage_withdraw(None);
// Assert
assert_eq!(balance.total, test_context.storage_minimum_balance());
assert_eq!(balance.available.value(), 0);
let receipts = deserialize_receipts();
assert_eq!(receipts.len(), 1);
let receipt = &receipts[0];
assert_eq!(receipt.receiver_id, context.predecessor_account_id);
match &receipt.actions[0] {
Action::Transfer { deposit } => assert_eq!(*deposit, 1),
_ => panic!("expected transfer"),
}
}
#[test]
#[should_panic(expected = "account is not registered")]
fn no_amount_account_not_registered() {
// Arrange
let mut test_context = TestContext::new();
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
test_context.storage_withdraw(None);
}
//
#[test]
#[should_panic(expected = "exactly 1 yoctoNEAR must be attached")]
fn amount_specified_with_no_attached_deposit() {
// Arrange
let mut test_context = TestContext::with_registered_account();
// Act
test_context.storage_withdraw(Some(100.into()));
}
#[test]
fn amount_less_than_available_balance() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let mut context = test_context.context.clone();
context.attached_deposit = 300;
testing_env!(context.clone());
test_context.storage_deposit(None);
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
let balance = test_context.storage_withdraw(Some(100.into()));
assert_eq!(balance.available.value(), 200);
let receipts = deserialize_receipts();
assert_eq!(receipts.len(), 1);
let receipt = &receipts[0];
assert_eq!(receipt.receiver_id, context.predecessor_account_id);
match &receipt.actions[0] {
Action::Transfer { deposit } => assert_eq!(*deposit, 101),
_ => panic!("expected transfer"),
}
}
#[test]
#[should_panic(expected = "ERR: account storage available balance is insufficient")]
fn amount_more_than_available_balance() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
test_context.storage_withdraw(Some(100.into()));
}
#[test]
fn amount_matches_available_balance() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let mut context = test_context.context.clone();
context.attached_deposit = 100;
testing_env!(context.clone());
test_context.storage_deposit(None);
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
let balance = test_context.storage_withdraw(Some(100.into()));
assert_eq!(balance.available.value(), 0);
let receipts = deserialize_receipts();
assert_eq!(receipts.len(), 1);
let receipt = &receipts[0];
assert_eq!(receipt.receiver_id, context.predecessor_account_id);
match &receipt.actions[0] {
Action::Transfer { deposit } => assert_eq!(*deposit, 101),
_ => panic!("expected transfer"),
}
}
#[test]
#[should_panic(expected = "withdraw amount must be greater than zero")]
fn amount_is_zero() {
// Arrange
let mut test_context = TestContext::with_registered_account();
let mut context = test_context.context.clone();
context.attached_deposit = 1;
testing_env!(context.clone());
// Act
test_context.storage_withdraw(Some(0.into()));
}
}
#[cfg(test)]
mod test_storage_minimum_balance {
use super::*;
use crate::test_utils::*;
#[test]
fn storage_min_balance_should_match_account_storage_fee() {
let test_context = TestContext::new();
assert_eq!(
test_context.account_storage_fee(),
test_context.storage_minimum_balance()
);
}
}
| 33.645548 | 100 | 0.633009 |
226747289e117661ef62d1608fb5dbc1b4178813 | 588 | extern crate protobuf_codegen_pure;
use std::io::Write;
static MOD_RS: &[u8] = b"
/// Generated from protobuf.
pub mod fileformat;
/// Generated from protobuf.
pub mod osmformat;
";
fn main() -> Result<(), Box<dyn std::error::Error>> {
let out_dir = std::env::var("OUT_DIR")?;
protobuf_codegen_pure::Codegen::new()
.out_dir(&out_dir)
.inputs(&["protos/fileformat.proto", "protos/osmformat.proto"])
.include("protos")
.run()
.expect("Codegen failed.");
std::fs::File::create(out_dir + "/mod.rs")?.write_all(MOD_RS)?;
Ok(())
}
| 21.777778 | 71 | 0.613946 |
e4f675c025f23d66ca6066a9f6b11d79de2b5fe6 | 17,211 | // Copyright 2016 Martin Ankerl.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Differential Evolution optimizer for rust.
//!
//! Simple and powerful global optimization using a
//! [Self-Adapting Differential Evolution](http://bit.ly/2cMPiMj)
//! for Rust. See Wikipedia's article on
//! [Differential Evolution](https://en.wikipedia.org/wiki/Differential_evolution)
//! for more information.
//!
//! ## Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! differential-evolution = "*"
//! ```
//!
//! and this to your crate root:
//!
//! ```rust
//! extern crate differential_evolution;
//! ```
//!
//! ## Examples
//!
//! Differential Evolution is a global optimization algorithm that
//! tries to iteratively improve candidate solutions with regards to
//! a user-defined cost function.
//!
//! ### Quick Start: Sum of Squares
//! This example finds the minimum of a simple 5-dimensional function.
//!
//! ```
//! extern crate differential_evolution;
//!
//! use differential_evolution::self_adaptive_de;
//!
//! fn main() {
//! // create a self adaptive DE with an inital search area
//! // from -10 to 10 in 5 dimensions.
//! let mut de = self_adaptive_de(vec![(-10.0, 10.0); 5], |pos| {
//! // cost function to minimize: sum of squares
//! pos.iter().fold(0.0, |sum, x| sum + x*x)
//! });
//!
//! // perform 10000 cost evaluations
//! de.iter().nth(10000);
//!
//! // show the result
//! let (cost, pos) = de.best().unwrap();
//! println!("cost: {}", cost);
//! println!("pos: {:?}", pos);
//! }
//! ```
//!
//! ### Tutorial: Rastrigin
//!
//! The population supports an `Iterator` for evaluating. Each call
//! of `next()` evaluates the cost function and returns the
//! fitness value of the current global best. This way it is possible
//! to use all the iterator's features for optimizig. Here are a few
//! examples.
//!
//! Let's say we have the [Rastrigin](https://en.wikipedia.org/wiki/Rastrigin_function)
//! cost function:
//!
//! ```
//! use std::f32::consts::PI;
//!
//! fn rastrigin(pos: &[f32]) -> f32 {
//! pos.iter().fold(0.0, |sum, x|
//! sum + x * x - 10.0 * (2.0 * PI * x).cos() + 10.0)
//! }
//! ```
//!
//! We'd like to search for the minimum in the range -5.12 to 5.12, for
//! 30 dimensions:
//!
//! ```
//! let initial_min_max = vec![(-5.12, 5.12); 30];
//! ```
//!
//! We can create a self adaptive DE, and search until the cost
//! reaches a given minimum:
//!
//! ```
//! # use differential_evolution::self_adaptive_de;
//! # fn rastrigin(pos: &[f32]) -> f32 { 0.0 }
//! # let initial_min_max = vec![(-5.12, 5.12); 2];
//! let mut de = self_adaptive_de(initial_min_max, rastrigin);
//! de.iter().find(|&cost| cost < 0.1);
//! ```
//!
//! This is a bit dangerous though, because the optimizer might never reach that minimum.
//! It is safer to just let it run for a given number of evaluations:
//!
//! ```
//! # use differential_evolution::self_adaptive_de;
//! # fn rastrigin(pos: &[f32]) -> f32 { 0.0 }
//! # let initial_min_max = vec![(-5.0, 5.0); 2];
//! let mut de = self_adaptive_de(initial_min_max, rastrigin);
//! de.iter().nth(10000);
//! ```
//!
//! If is possible to do some smart combinations: run until cost is below a threshold, or until
//! the maximum number of iterations have been reached:
//!
//! ```
//! # use differential_evolution::self_adaptive_de;
//! # fn sum_of_squares(pos: &[f32]) -> f32 { 0.0 }
//! # let initial_min_max = vec![(-5.12, 5.12); 2];
//! let mut de = self_adaptive_de(initial_min_max, sum_of_squares);
//! de.iter().take(100000).find(|&cost| cost < 0.1);
//! ```
//!
//! When you are finished with iterating, you can extract the best solution found so far with
//! `de.best()`. This retrieves the minimum cost and the position vector that has lead to this
//! cost:
//!
//! ```
//! # use differential_evolution::self_adaptive_de;
//! # fn sum_of_squares(pos: &[f32]) -> f32 { 0.0 }
//! # let initial_min_max = vec![(-5.12, 5.12); 2];
//! # let mut de = self_adaptive_de(initial_min_max, sum_of_squares);
//! # de.iter().nth(1000);
//! let (cost, pos) = de.best().unwrap();
//! println!("{} best cost", cost);
//! println!("{:?} best position", pos);
//! ```
//!
//! # Similar Crates
//!
//! - [darwin-rs](https://github.com/willi-kappler/darwin-rs)
//! - [Rs Genetic](https://github.com/m-decoster/RsGenetic)
//!
extern crate rand;
use rand::{distributions::Uniform, prelude::*};
use rand_xorshift::XorShiftRng;
/// Holds all settings for the self adaptive differential evolution
/// algorithm.
pub struct Settings<F, R, C>
where
F: Fn(&[f32]) -> C,
R: rand::Rng,
C: PartialOrd + Clone,
{
/// The population is initialized with uniform random
/// for each dimension between the tuple's size.
/// Beware that this is only the initial state, the DE
/// will search outside of this initial search space.
pub min_max_pos: Vec<(f32, f32)>,
/// Minimum and maximum value for `cr`, the crossover control parameter.
/// a good value is (0, 1) so cr is randomly choosen between in the full
/// range of usable CR's from `[0, 1)`.
pub cr_min_max: (f32, f32),
/// Probability to change the `cr` value of an individual. Tests with
/// 0.05, 0.1, 0.2 and 0.3 did not show any significant different
/// results. So 0.1 seems to be a reasonable choice.
pub cr_change_probability: f32,
/// Minimum and maximum value for `f`, the amplification factor of the
/// difference vector. DE is more sensitive to `F` than it is to `CR`.
/// In literature, `F` is rarely greater than 1. If `F=0`, the evolution
/// degenerates to a crossover but no mutation, so a reasonable choise
/// for f_min_max seems to be (0.1, 1.0).
pub f_min_max: (f32, f32),
/// Probability to change the `f` value of an individual. See
/// `cr_change_probability`, 0.1 is a reasonable choice.
pub f_change_probability: f32,
/// Number of individuals for the DE. In many benchmarks, a size of
/// 100 is used. The choice somewhat depends on the difficulty and the
/// dimensionality of the problem to solve. Reasonable choices seem
/// between 20 and 200.
pub pop_size: usize,
/// Random number generator used to generate mutations. If the fitness
/// function is fairly fast, the random number generator should be
/// very fast as well. Since it is not necessary to use a cryptographic
/// secure RNG, the best (fastest) choice is to use `rand::weak_rng()`.
pub rng: R,
/// The cost function to minimize. This takes an `&[f32]` and returns
/// the calculated cost for this position as `C`. This should be
/// fast to evaluate, and always produce the same result for the same
/// input.
pub cost_function: F,
}
impl<F, C> Settings<F, XorShiftRng, C>
where
F: Fn(&[f32]) -> C,
C: PartialOrd + Clone,
{
/// Creates default settings for the differential evolution. It uses the default
/// parameters as defined in the paper "Self-Adapting Control Parameters in Differential
/// Evolution: A Comparative Study on Numerical Benchmark Problems", with a population
/// size of 100. It also uses This uses `rand::weak_rng()` for the fastest random number
/// generator available.
///
/// For most problems this should be a fairly good parameter set.
pub fn default(min_max_pos: Vec<(f32, f32)>, cost_function: F) -> Settings<F, XorShiftRng, C> {
Settings {
min_max_pos: min_max_pos,
cr_min_max: (0.0, 1.0),
cr_change_probability: 0.1,
f_min_max: (0.1, 1.0),
f_change_probability: 0.1,
pop_size: 100,
rng: XorShiftRng::seed_from_u64(2),
cost_function: cost_function,
}
}
}
/// Internally used struct for an inivididual.
#[derive(Clone)]
struct Individual<C>
where
C: PartialOrd + Clone,
{
pos: Vec<f32>,
// the lower, the better.
cost: Option<C>,
// control parameters
cr: f32,
f: f32,
}
/// Holds the population for the differential evolution based on the given settings.
pub struct Population<F, R, C>
where
F: Fn(&[f32]) -> C,
R: rand::Rng,
C: PartialOrd + Clone,
{
curr: Vec<Individual<C>>,
best: Vec<Individual<C>>,
settings: Settings<F, R, C>,
// index of global best individual. Might be in best or in curr.
best_idx: Option<usize>,
// cost value of the global best individual, for quick access
best_cost_cache: Option<C>,
num_cost_evaluations: usize,
dim: usize,
between_popsize: Uniform<usize>,
between_dim: Uniform<usize>,
between_cr: Uniform<f32>,
between_f: Uniform<f32>,
pop_countdown: usize,
}
/// Convenience function to create a fully configured self adaptive
/// differential evolution population.
pub fn self_adaptive_de<F, C>(
min_max_pos: Vec<(f32, f32)>,
cost_function: F,
) -> Population<F, XorShiftRng, C>
where
F: Fn(&[f32]) -> C,
C: PartialOrd + Clone,
{
Population::new(Settings::default(min_max_pos, cost_function))
}
impl<F, R, C> Population<F, R, C>
where
F: Fn(&[f32]) -> C,
R: rand::Rng,
C: PartialOrd + Clone,
{
/// Creates a new population based on the given settings.
pub fn new(s: Settings<F, R, C>) -> Population<F, R, C> {
assert!(
s.min_max_pos.len() >= 1,
"need at least one element to optimize"
);
// create a vector of randomly initialized individuals for current.
let dim = s.min_max_pos.len();
// Empty individual, with no cost value (yet)
let dummy_individual = Individual {
pos: vec![0.0; dim],
cost: None,
cr: 0.0,
f: 0.0,
};
// creates all the empty individuals
let mut pop = Population {
curr: vec![dummy_individual.clone(); s.pop_size],
best: vec![dummy_individual; s.pop_size],
best_idx: None,
best_cost_cache: None,
num_cost_evaluations: 0,
dim: dim,
pop_countdown: s.pop_size,
between_popsize: Uniform::new(0, s.pop_size),
between_dim: Uniform::new(0, dim),
between_cr: Uniform::new(s.cr_min_max.0, s.cr_min_max.1),
between_f: Uniform::new(s.f_min_max.0, s.f_min_max.1),
settings: s,
};
for ind in &mut pop.curr {
// init control parameters
ind.cr = pop.between_cr.sample(&mut pop.settings.rng);
ind.f = pop.between_f.sample(&mut pop.settings.rng);
// random range for each dimension
for d in 0..dim {
let between_min_max =
Uniform::new(pop.settings.min_max_pos[d].0, pop.settings.min_max_pos[d].1);
ind.pos[d] = between_min_max.sample(&mut pop.settings.rng);
}
}
pop
}
/// Loops through each individual and updates its personal best.
fn update_best(&mut self) {
for i in 0..self.curr.len() {
let curr = &mut self.curr[i];
let best = &mut self.best[i];
// we use <= here, so that the individual moves even if the cost
// stays the same.
let mut is_swapping = best.cost.is_none();
if !is_swapping {
if let Some(ref c) = curr.cost {
if let Some(ref b) = best.cost {
is_swapping = c <= b;
}
}
}
if is_swapping {
// replace individual's best. swap is *much* faster than clone.
std::mem::swap(curr, best);
}
}
}
// Modifies all the curr positions. This needs a lot of random numbers, so
// for a fast cost function it is important to use a fast random number
// generator.
fn update_positions(&mut self) {
let rng = &mut self.settings.rng;
for i in 0..self.curr.len() {
// sample 3 different individuals
let id1 = self.between_popsize.sample(rng);
let mut id2 = self.between_popsize.sample(rng);
while id2 == id1 {
id2 = self.between_popsize.sample(rng);
}
let mut id3 = self.between_popsize.sample(rng);
while id3 == id1 || id3 == id2 {
id3 = self.between_popsize.sample(rng);
}
let curr = &mut self.curr[i];
let best = &self.best[i];
// see "Self-Adapting Control Parameters in Differential Evolution:
// A Comparative Study on Numerical Benchmark Problems"
if rng.gen::<f32>() < self.settings.cr_change_probability {
curr.cr = self.between_cr.sample(rng);
} else {
curr.cr = best.cr;
}
if rng.gen::<f32>() < self.settings.f_change_probability {
curr.f = self.between_f.sample(rng);
} else {
curr.f = best.f;
}
let curr_pos = &mut curr.pos;
let best_pos = &best.pos;
let best1_pos = &self.best[id1].pos;
let best2_pos = &self.best[id2].pos;
let best3_pos = &self.best[id3].pos;
let forced_mutation_dim = self.between_dim.sample(rng);
// This implements the DE/rand/1/bin, the most widely used algorithm.
// See "A Comparative Study of Differential Evolution Variants for
// Global Optimization (2006)".
for d in 0..self.dim {
if d == forced_mutation_dim || rng.gen::<f32>() < curr.cr {
curr_pos[d] = best3_pos[d] + curr.f * (best1_pos[d] - best2_pos[d]);
} else {
curr_pos[d] = best_pos[d];
}
}
// reset cost, has to be updated by the user.
curr.cost = None;
}
}
/// Gets a tuple of the best cost and best position found so far.
pub fn best(&self) -> Option<(&C, &[f32])> {
if let Some(bi) = self.best_idx {
let curr = &self.curr[bi];
let best = &self.best[bi];
if curr.cost.is_none() {
return Some((best.cost.as_ref().unwrap(), &best.pos));
}
if best.cost.is_none() {
return Some((curr.cost.as_ref().unwrap(), &curr.pos));
}
if curr.cost.as_ref().unwrap() < best.cost.as_ref().unwrap() {
return Some((curr.cost.as_ref().unwrap(), &curr.pos));
}
return Some((best.cost.as_ref().unwrap(), &best.pos));
} else {
None
}
}
/// Gets the total number of times the cost function has been evaluated.
pub fn num_cost_evaluations(&self) -> usize {
self.num_cost_evaluations
}
/// Performs a single cost evaluation, and updates best positions and
/// evolves the population if the whole population has been evaluated.
/// Returns the cost value of the current best solution found.
pub fn eval(&mut self) {
if 0 == self.pop_countdown {
// if the whole pop has been evaluated, evolve it to update positions.
// this also copies curr to best, if better.
self.update_best();
self.update_positions();
self.pop_countdown = self.curr.len();
}
// perform a single fitness evaluation
self.pop_countdown -= 1;
let curr = &mut self.curr[self.pop_countdown];
let cost = (self.settings.cost_function)(&curr.pos);
curr.cost = Some(cost);
self.num_cost_evaluations += 1;
// see if we have improved the global best
if self.best_cost_cache.is_none()
|| curr.cost.as_ref().unwrap() < self.best_cost_cache.as_ref().unwrap()
{
self.best_cost_cache = curr.cost.clone();
self.best_idx = Some(self.pop_countdown);
}
}
/// Gets an iterator for this population. Each call to `next()`
/// performs one cost evaluation.
pub fn iter(&mut self) -> PopIter<F, R, C> {
PopIter { pop: self }
}
}
/// Iterator for the differential evolution, to perform a single cost
/// evaluation every time `move()` is called.
pub struct PopIter<'a, F, R, C>
where
F: 'a + Fn(&[f32]) -> C,
R: 'a + rand::Rng,
C: 'a + PartialOrd + Clone,
{
pop: &'a mut Population<F, R, C>,
}
impl<'a, F, R, C> Iterator for PopIter<'a, F, R, C>
where
F: 'a + Fn(&[f32]) -> C,
R: 'a + rand::Rng,
C: PartialOrd + Clone,
{
type Item = C;
/// Simply forwards to the population's `eval()`.
fn next(&mut self) -> Option<Self::Item> {
self.pop.eval();
self.pop.best_cost_cache.clone()
}
}
#[cfg(test)]
mod tests {
// TODO
}
| 33.034549 | 99 | 0.589739 |
1d23760bd1480e4672633da850473c4910db243d | 2,956 | // run-rustfix
#![deny(clippy::useless_asref)]
#![allow(clippy::trivially_copy_pass_by_ref)]
use std::fmt::Debug;
struct FakeAsRef;
#[allow(clippy::should_implement_trait)]
impl FakeAsRef {
fn as_ref(&self) -> &Self {
self
}
}
struct MoreRef;
impl<'a, 'b, 'c> AsRef<&'a &'b &'c MoreRef> for MoreRef {
fn as_ref(&self) -> &&'a &'b &'c MoreRef {
&&&&MoreRef
}
}
fn foo_rstr(x: &str) {
println!("{:?}", x);
}
fn foo_rslice(x: &[i32]) {
println!("{:?}", x);
}
fn foo_mrslice(x: &mut [i32]) {
println!("{:?}", x);
}
fn foo_rrrrmr(_: &&&&MoreRef) {
println!("so many refs");
}
fn not_ok() {
let rstr: &str = "hello";
let mut mrslice: &mut [i32] = &mut [1, 2, 3];
{
let rslice: &[i32] = &*mrslice;
foo_rstr(rstr.as_ref());
foo_rstr(rstr);
foo_rslice(rslice.as_ref());
foo_rslice(rslice);
}
{
foo_mrslice(mrslice.as_mut());
foo_mrslice(mrslice);
foo_rslice(mrslice.as_ref());
foo_rslice(mrslice);
}
{
let rrrrrstr = &&&&rstr;
let rrrrrslice = &&&&&*mrslice;
foo_rslice(rrrrrslice.as_ref());
foo_rslice(rrrrrslice);
foo_rstr(rrrrrstr.as_ref());
foo_rstr(rrrrrstr);
}
{
let mrrrrrslice = &mut &mut &mut &mut mrslice;
foo_mrslice(mrrrrrslice.as_mut());
foo_mrslice(mrrrrrslice);
foo_rslice(mrrrrrslice.as_ref());
foo_rslice(mrrrrrslice);
}
#[allow(unused_parens, clippy::double_parens)]
foo_rrrrmr((&&&&MoreRef).as_ref());
generic_not_ok(mrslice);
generic_ok(mrslice);
}
fn ok() {
let string = "hello".to_owned();
let mut arr = [1, 2, 3];
let mut vec = vec![1, 2, 3];
{
foo_rstr(string.as_ref());
foo_rslice(arr.as_ref());
foo_rslice(vec.as_ref());
}
{
foo_mrslice(arr.as_mut());
foo_mrslice(vec.as_mut());
}
{
let rrrrstring = &&&&string;
let rrrrarr = &&&&arr;
let rrrrvec = &&&&vec;
foo_rstr(rrrrstring.as_ref());
foo_rslice(rrrrarr.as_ref());
foo_rslice(rrrrvec.as_ref());
}
{
let mrrrrarr = &mut &mut &mut &mut arr;
let mrrrrvec = &mut &mut &mut &mut vec;
foo_mrslice(mrrrrarr.as_mut());
foo_mrslice(mrrrrvec.as_mut());
}
FakeAsRef.as_ref();
foo_rrrrmr(MoreRef.as_ref());
generic_not_ok(arr.as_mut());
generic_ok(&mut arr);
}
fn foo_mrt<T: Debug + ?Sized>(t: &mut T) {
println!("{:?}", t);
}
fn foo_rt<T: Debug + ?Sized>(t: &T) {
println!("{:?}", t);
}
fn generic_not_ok<T: AsMut<T> + AsRef<T> + Debug + ?Sized>(mrt: &mut T) {
foo_mrt(mrt.as_mut());
foo_mrt(mrt);
foo_rt(mrt.as_ref());
foo_rt(mrt);
}
fn generic_ok<U: AsMut<T> + AsRef<T> + ?Sized, T: Debug + ?Sized>(mru: &mut U) {
foo_mrt(mru.as_mut());
foo_rt(mru.as_ref());
}
fn main() {
not_ok();
ok();
}
| 21.576642 | 80 | 0.544993 |
4b4eef517cd75407ac46aa98524aa4ad40c70952 | 3,435 | // Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Verify sirenia-rpc_macros works for the intended use case.
extern crate sirenia_rpc_macros;
use std::fmt::{self, Display, Formatter};
use std::thread::spawn;
use anyhow::anyhow;
use assert_matches::assert_matches;
use libsirenia::rpc::RpcDispatcher;
use libsirenia::transport::create_transport_from_pipes;
use serde::{Deserialize, Serialize};
use sirenia_rpc_macros::sirenia_rpc;
const MAGIC_NUMBER: i32 = 42;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum Error {
MagicNumber,
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
use Error::*;
match self {
MagicNumber => write!(f, "You entered the magic number."),
}
}
}
impl std::error::Error for Error {}
#[sirenia_rpc(error = "Error")]
pub trait TestRpc<E> {
fn checked_neg(&mut self, input: i32) -> Result<Option<i32>, E>;
fn checked_add(&mut self, addend_a: i32, addend_b: i32) -> Result<Option<i32>, E>;
#[error()]
fn terminate(&mut self) -> Result<(), E>;
}
#[derive(Clone)]
struct TestRpcServerImpl {}
impl TestRpc<anyhow::Error> for TestRpcServerImpl {
fn checked_neg(&mut self, input: i32) -> Result<Option<i32>, anyhow::Error> {
if input == MAGIC_NUMBER {
Err(Error::MagicNumber.into())
} else {
Ok(input.checked_neg())
}
}
fn checked_add(&mut self, addend_a: i32, addend_b: i32) -> Result<Option<i32>, anyhow::Error> {
if addend_a == MAGIC_NUMBER || addend_b == MAGIC_NUMBER {
Err(Error::MagicNumber.into())
} else {
Ok(addend_a.checked_add(addend_b))
}
}
fn terminate(&mut self) -> Result<(), anyhow::Error> {
Err(anyhow!("Done"))
}
}
#[test]
fn smoke_test() {
let (server_transport, client_transport) = create_transport_from_pipes().unwrap();
let handler: Box<dyn TestRpcServer> = Box::new(TestRpcServerImpl {});
let mut dispatcher = RpcDispatcher::new_nonblocking(handler, server_transport).unwrap();
// Queue the client RPC:
let client_thread = spawn(move || {
let mut rpc_client = TestRpcClient::new(client_transport);
let neg_resp = rpc_client.checked_neg(125).unwrap();
assert_matches!(neg_resp, Some(-125));
let neg_err_resp = rpc_client.checked_neg(42);
if let Err(err) = neg_err_resp {
assert_matches!(err.downcast_ref::<Error>(), Some(Error::MagicNumber));
} else {
panic!("Got {:?}; expected Err(Error::MagicNumber)", neg_err_resp)
};
let add_resp = rpc_client.checked_add(5, 4).unwrap();
assert_matches!(add_resp, Some(9));
assert!(rpc_client.terminate().is_err());
});
let sleep_for = None;
assert_matches!(dispatcher.read_complete_message(sleep_for), Ok(None));
assert_matches!(dispatcher.read_complete_message(sleep_for), Ok(None));
assert_matches!(dispatcher.read_complete_message(sleep_for), Ok(None));
assert_matches!(dispatcher.read_complete_message(sleep_for), Ok(Some(_)));
// Explicitly call drop to close the pipe so the client thread gets the hang up since the return
// value should be a RemoveFd mutator.
drop(dispatcher);
client_thread.join().unwrap();
}
| 31.805556 | 100 | 0.657642 |
fba0c2246466e684e930c2bd5623cc88a71f38b8 | 2,311 | use num::Zero;
use na::{self, RealField};
use crate::math::{AngularInertia, Point, DIM};
/// The volume of a cylinder.
#[inline]
pub fn cylinder_volume<N: RealField>(half_height: N, radius: N) -> N {
if DIM == 2 {
half_height * radius * na::convert(4.0f64)
} else {
half_height * radius * radius * N::pi() * na::convert(2.0f64)
}
}
/// The area of a cylinder.
#[inline]
pub fn cylinder_area<N: RealField>(half_height: N, radius: N) -> N {
if DIM == 2 {
(half_height + radius) * na::convert(2.0f64)
} else {
let _pi = N::pi();
let basis = radius * radius * _pi;
let side = _pi * radius * (half_height + half_height) * na::convert(2.0f64);
side + basis + basis
}
}
/// The center of mass of a cylinder.
#[inline]
pub fn cylinder_center_of_mass<N: RealField>() -> Point<N> {
Point::origin()
}
/// The unit angular inertia of a cylinder.
#[inline]
pub fn cylinder_unit_angular_inertia<N: RealField>(half_height: N, radius: N) -> AngularInertia<N> {
if DIM == 2 {
// Same a the rectangle.
let _2: N = na::convert(2.0f64);
let _i12: N = na::convert(1.0f64 / 12.0);
let w = _i12 * _2 * _2;
let ix = w * half_height * half_height;
let iy = w * radius * radius;
let mut res = AngularInertia::zero();
res[(0, 0)] = ix + iy;
res
} else {
let sq_radius = radius * radius;
let sq_height = half_height * half_height * na::convert(4.0f64);
let off_principal = (sq_radius * na::convert(3.0f64) + sq_height) / na::convert(12.0f64);
let mut res = AngularInertia::zero();
res[(0, 0)] = off_principal.clone();
res[(1, 1)] = sq_radius / na::convert(2.0f64);
res[(2, 2)] = off_principal;
res
}
}
//impl<N: RealField> Volumetric<N> for Cylinder<N> {
// fn area(&self) -> N {
// cylinder_area(self.half_height(), self.radius())
// }
//
// fn volume(&self) -> N {
// cylinder_volume(self.half_height(), self.radius())
// }
//
// fn center_of_mass(&self) -> Point<N> {
// cylinder_center_of_mass()
// }
//
// fn unit_angular_inertia(&self) -> AngularInertia<N> {
// cylinder_unit_angular_inertia(self.half_height(), self.radius())
// }
//}
| 27.511905 | 100 | 0.570749 |
1eb4b0ce65a64b189bd3d4223121ba65c74d0af0 | 14,080 | use std::fs::{self, File, OpenOptions};
use std::io::prelude::*;
use std::path::Path;
use std::sync::atomic::Ordering;
use anyhow::{ensure, Context, Result};
use bincode::{deserialize, serialize};
use log::info;
use memmap::MmapOptions;
use merkletree::store::{StoreConfig, DEFAULT_CACHED_ABOVE_BASE_LAYER};
use paired::bls12_381::{Bls12, Fr};
use storage_proofs::circuit::multi_proof::MultiProof;
use storage_proofs::circuit::stacked::StackedCompound;
use storage_proofs::compound_proof::{self, CompoundProof};
use storage_proofs::drgraph::{DefaultTreeHasher, Graph};
use storage_proofs::hasher::{Domain, Hasher};
use storage_proofs::merkle::create_merkle_tree;
use storage_proofs::porep::PoRep;
use storage_proofs::sector::SectorId;
use storage_proofs::stacked::{
self, generate_replica_id, CacheKey, ChallengeRequirements, StackedDrg, Tau, TemporaryAux,
TemporaryAuxCache,
};
use crate::api::util::{as_safe_commitment, commitment_from_fr};
use crate::caches::{get_stacked_params, get_stacked_verifying_key};
use crate::constants::{
DefaultPieceHasher, POREP_WINDOW_MINIMUM_CHALLENGES, SINGLE_PARTITION_PROOF_LEN,
};
use crate::parameters::setup_params;
pub use crate::pieces;
pub use crate::pieces::verify_pieces;
use crate::types::{
Commitment, PaddedBytesAmount, PieceInfo, PoRepConfig, PoRepProofPartitions, ProverId,
SealCommitOutput, SealPreCommitOutput, Ticket,
};
/// Seals the staged sector at `in_path` in place, saving the resulting replica to `out_path`.
///
/// # Arguments
///
/// * `porep_config` - porep configuration containing the number of bytes in this sector.
/// * `cache_path` - path to a directory in which the sector data's Merkle Tree can be written.
/// * `in_path` - the path where the unsealed sector data is read.
/// * `out_path` - the path where the sealed sector data will be written.
/// * `prover_id` - the prover-id that is sealing this sector.
/// * `sector_id` - the sector-id of this sector.
/// * `ticket` - the ticket that will be used to generate this sector's replica-id.
/// * `piece_infos` - each piece's info (number of bytes and commitment) in this sector.
#[allow(clippy::too_many_arguments)]
pub fn seal_pre_commit<R: AsRef<Path>, T: AsRef<Path>, S: AsRef<Path>>(
porep_config: PoRepConfig,
cache_path: R,
in_path: T,
out_path: S,
prover_id: ProverId,
sector_id: SectorId,
ticket: Ticket,
piece_infos: &[PieceInfo],
) -> Result<SealPreCommitOutput> {
info!("seal_pre_commit: start");
let sector_bytes = usize::from(PaddedBytesAmount::from(porep_config));
fs::metadata(&in_path)
.with_context(|| format!("could not read in_path={:?})", in_path.as_ref()))?;
fs::metadata(&out_path)
.with_context(|| format!("could not read out_path={:?}", out_path.as_ref()))?;
// Copy unsealed data to output location, where it will be sealed in place.
fs::copy(&in_path, &out_path).with_context(|| {
format!(
"could not copy in_path={:?} to out_path={:?}",
in_path.as_ref(),
out_path.as_ref()
)
})?;
let f_data = OpenOptions::new()
.read(true)
.write(true)
.open(&out_path)
.with_context(|| format!("could not open out_path={:?}", out_path.as_ref()))?;
// Zero-pad the data to the requested size by extending the underlying file if needed.
f_data.set_len(sector_bytes as u64)?;
let mut data = unsafe {
MmapOptions::new()
.map_mut(&f_data)
.with_context(|| format!("could mmap out_path={:?}", out_path.as_ref()))?
};
let compound_setup_params = compound_proof::SetupParams {
vanilla_params: setup_params(
PaddedBytesAmount::from(porep_config),
usize::from(PoRepProofPartitions::from(porep_config)),
)?,
partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),
};
let compound_public_params = <StackedCompound as CompoundProof<
_,
StackedDrg<DefaultTreeHasher, DefaultPieceHasher>,
_,
>>::setup(&compound_setup_params)?;
// MT for original data is always named tree-d, and it will be
// referenced later in the process as such.
let config = StoreConfig::new(
cache_path.as_ref(),
CacheKey::CommDTree.to_string(),
DEFAULT_CACHED_ABOVE_BASE_LAYER,
);
info!("building merkle tree for the original data");
let data_tree = create_merkle_tree::<DefaultPieceHasher>(
Some(config.clone()),
compound_public_params.vanilla_params.wrapper_graph.size(),
&data,
)?;
let comm_d_root: Fr = data_tree.root().into();
let comm_d = commitment_from_fr::<Bls12>(comm_d_root);
ensure!(
verify_pieces(&comm_d, piece_infos, porep_config.into())?,
"pieces and comm_d do not match"
);
let replica_id = generate_replica_id::<DefaultTreeHasher, _>(
&prover_id,
sector_id.into(),
&ticket,
data_tree.root(),
);
let (tau, (p_aux, t_aux)) = StackedDrg::<DefaultTreeHasher, DefaultPieceHasher>::replicate(
&compound_public_params.vanilla_params,
&replica_id,
&mut data,
Some(data_tree),
Some(config),
)?;
let comm_r = commitment_from_fr::<Bls12>(tau.comm_r.into());
info!("seal_pre_commit: end");
// Persist p_aux and t_aux here
let p_aux_path = cache_path.as_ref().join(CacheKey::PAux.to_string());
let mut f_p_aux = File::create(&p_aux_path)
.with_context(|| format!("could not create file p_aux={:?}", p_aux_path))?;
let p_aux_bytes = serialize(&p_aux)?;
f_p_aux
.write_all(&p_aux_bytes)
.with_context(|| format!("could not write to file p_aux={:?}", p_aux_path))?;
let t_aux_path = cache_path.as_ref().join(CacheKey::TAux.to_string());
let mut f_t_aux = File::create(&t_aux_path)
.with_context(|| format!("could not create file t_aux={:?}", t_aux_path))?;
let t_aux_bytes = serialize(&t_aux)?;
f_t_aux
.write_all(&t_aux_bytes)
.with_context(|| format!("could not write to file t_aux={:?}", t_aux_path))?;
Ok(SealPreCommitOutput { comm_r, comm_d })
}
/// Generates a proof for the pre committed sector.
///
/// # Arguments
///
/// * `porep_config` - porep configuration containing the number of bytes in this sector.
/// * `cache_path` - path to a directory in which the sector data's Merkle Tree can be written.
/// * `prover_id` - the prover-id that is sealing the sector.
/// * `sector_id` - the sector-id of this sector.
/// * `ticket` - the ticket that will be used to generate this sector's replica-id.
/// * `seed` - the seed used to derive the porep challenges.
/// * `pre_commit` - commitments to the sector data and its replica.
/// * `piece_infos` - each piece's info (number of bytes and commitment) in this sector.
#[allow(clippy::too_many_arguments)]
pub fn seal_commit<T: AsRef<Path>>(
porep_config: PoRepConfig,
cache_path: T,
prover_id: ProverId,
sector_id: SectorId,
ticket: Ticket,
seed: Ticket,
pre_commit: SealPreCommitOutput,
piece_infos: &[PieceInfo],
) -> Result<SealCommitOutput> {
info!("seal_commit:start");
let SealPreCommitOutput { comm_d, comm_r } = pre_commit;
ensure!(comm_d != [0; 32], "Invalid all zero commitment (comm_d)");
ensure!(comm_r != [0; 32], "Invalid all zero commitment (comm_r)");
ensure!(
verify_pieces(&comm_d, piece_infos, porep_config.into())?,
"pieces and comm_d do not match"
);
let p_aux = {
let mut p_aux_bytes = vec![];
let p_aux_path = cache_path.as_ref().join(CacheKey::PAux.to_string());
let mut f_p_aux = File::open(&p_aux_path)
.with_context(|| format!("could not open file p_aux={:?}", p_aux_path))?;
f_p_aux.read_to_end(&mut p_aux_bytes)?;
deserialize(&p_aux_bytes)
}?;
let t_aux = {
let mut t_aux_bytes = vec![];
let t_aux_path = cache_path.as_ref().join(CacheKey::TAux.to_string());
let mut f_t_aux = File::open(&t_aux_path)
.with_context(|| format!("could not open file t_aux={:?}", t_aux_path))?;
f_t_aux.read_to_end(&mut t_aux_bytes)?;
let mut res: TemporaryAux<_, _> = deserialize(&t_aux_bytes)?;
// Switch t_aux to the passed in cache_path
res.set_cache_path(cache_path);
res
};
// Convert TemporaryAux to TemporaryAuxCache, which instantiates all
// elements based on the configs stored in TemporaryAux.
let t_aux_cache: TemporaryAuxCache<DefaultTreeHasher, DefaultPieceHasher> =
TemporaryAuxCache::new(&t_aux).context("failed to restore contents of t_aux")?;
let comm_r_safe = as_safe_commitment(&comm_r, "comm_r")?;
let comm_d_safe = <DefaultPieceHasher as Hasher>::Domain::try_from_bytes(&comm_d)?;
let replica_id = generate_replica_id::<DefaultTreeHasher, _>(
&prover_id,
sector_id.into(),
&ticket,
comm_d_safe,
);
let public_inputs = stacked::PublicInputs {
replica_id,
tau: Some(stacked::Tau {
comm_d: comm_d_safe,
comm_r: comm_r_safe,
}),
k: None,
seed,
};
let private_inputs = stacked::PrivateInputs::<DefaultTreeHasher, DefaultPieceHasher> {
p_aux,
t_aux: t_aux_cache,
};
let groth_params = get_stacked_params(porep_config)?;
info!(
"got groth params ({}) while sealing",
u64::from(PaddedBytesAmount::from(porep_config))
);
let compound_setup_params = compound_proof::SetupParams {
vanilla_params: setup_params(
PaddedBytesAmount::from(porep_config),
usize::from(PoRepProofPartitions::from(porep_config)),
)?,
partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),
};
let compound_public_params = StackedCompound::setup(&compound_setup_params)?;
let proof = StackedCompound::prove(
&compound_public_params,
&public_inputs,
&private_inputs,
&groth_params,
)?;
// Delete cached MTs that are no longer needed.
TemporaryAux::<DefaultTreeHasher, DefaultPieceHasher>::delete(t_aux)?;
let mut buf = Vec::with_capacity(
SINGLE_PARTITION_PROOF_LEN * usize::from(PoRepProofPartitions::from(porep_config)),
);
proof.write(&mut buf)?;
// Verification is cheap when parameters are cached,
// and it is never correct to return a proof which does not verify.
verify_seal(
porep_config,
comm_r,
comm_d,
prover_id,
sector_id,
ticket,
seed,
&buf,
)
.context("post-seal verification sanity check failed")?;
info!("seal_commit:end");
Ok(SealCommitOutput { proof: buf })
}
/// Computes a sectors's `comm_d` given its pieces.
///
/// # Arguments
///
/// * `porep_config` - this sector's porep config that contains the number of bytes in the sector.
/// * `piece_infos` - the piece info (commitment and byte length) for each piece in this sector.
pub fn compute_comm_d(porep_config: PoRepConfig, piece_infos: &[PieceInfo]) -> Result<Commitment> {
pieces::compute_comm_d(porep_config.sector_size, piece_infos)
}
/// Verifies the output of some previously-run seal operation.
///
/// # Arguments
///
/// * `porep_config` - this sector's porep config that contains the number of bytes in this sector.
/// * `comm_r_in` - commitment to the sector's replica (`comm_r`).
/// * `comm_d_in` - commitment to the sector's data (`comm_d`).
/// * `prover_id` - the prover-id that sealed this sector.
/// * `sector_id` - this sector's sector-id.
/// * `ticket` - the ticket that was used to generate this sector's replica-id.
/// * `seed` - the seed used to derive the porep challenges.
/// * `proof_vec` - the porep circuit proof serialized into a vector of bytes.
#[allow(clippy::too_many_arguments)]
pub fn verify_seal(
porep_config: PoRepConfig,
comm_r_in: Commitment,
comm_d_in: Commitment,
prover_id: ProverId,
sector_id: SectorId,
ticket: Ticket,
seed: Ticket,
proof_vec: &[u8],
) -> Result<bool> {
ensure!(comm_d_in != [0; 32], "Invalid all zero commitment (comm_d)");
ensure!(comm_r_in != [0; 32], "Invalid all zero commitment (comm_r)");
let sector_bytes = PaddedBytesAmount::from(porep_config);
let comm_r = as_safe_commitment(&comm_r_in, "comm_r")?;
let comm_d = as_safe_commitment(&comm_d_in, "comm_d")?;
let replica_id =
generate_replica_id::<DefaultTreeHasher, _>(&prover_id, sector_id.into(), &ticket, comm_d);
let compound_setup_params = compound_proof::SetupParams {
vanilla_params: setup_params(
PaddedBytesAmount::from(porep_config),
usize::from(PoRepProofPartitions::from(porep_config)),
)?,
partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),
};
let compound_public_params: compound_proof::PublicParams<
'_,
StackedDrg<'_, DefaultTreeHasher, DefaultPieceHasher>,
> = StackedCompound::setup(&compound_setup_params)?;
let public_inputs = stacked::PublicInputs::<
<DefaultTreeHasher as Hasher>::Domain,
<DefaultPieceHasher as Hasher>::Domain,
> {
replica_id,
tau: Some(Tau { comm_r, comm_d }),
seed,
k: None,
};
let verifying_key = get_stacked_verifying_key(porep_config)?;
info!(
"got verifying key ({}) while verifying seal",
u64::from(sector_bytes)
);
let proof = MultiProof::new_from_reader(
Some(usize::from(PoRepProofPartitions::from(porep_config))),
proof_vec,
&verifying_key,
)?;
StackedCompound::verify(
&compound_public_params,
&public_inputs,
&proof,
&ChallengeRequirements {
minimum_challenges: POREP_WINDOW_MINIMUM_CHALLENGES.load(Ordering::Relaxed) as usize, // TODO: what do we want here?
},
)
.map_err(Into::into)
}
| 35.376884 | 128 | 0.662145 |
71c4da32e3b236cff7e47c3d2995ad69bd1d688a | 10,490 | extern crate ctrlc;
extern crate deque;
extern crate docopt;
extern crate env_logger;
extern crate grep;
extern crate ignore;
#[cfg(windows)]
extern crate kernel32;
#[macro_use]
extern crate lazy_static;
extern crate libc;
#[macro_use]
extern crate log;
extern crate memchr;
extern crate memmap;
extern crate num_cpus;
extern crate regex;
extern crate rustc_serialize;
extern crate term;
#[cfg(windows)]
extern crate winapi;
use std::error::Error;
use std::fs::File;
use std::io;
use std::io::Write;
use std::path::Path;
use std::process;
use std::result;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::cmp;
use deque::{Stealer, Stolen};
use grep::Grep;
use memmap::{Mmap, Protection};
use term::Terminal;
use ignore::DirEntry;
use args::Args;
use out::{ColoredTerminal, Out};
use pathutil::strip_prefix;
use printer::Printer;
use search_stream::InputBuffer;
#[cfg(windows)]
use terminal_win::WindowsBuffer;
macro_rules! errored {
($($tt:tt)*) => {
return Err(From::from(format!($($tt)*)));
}
}
macro_rules! eprintln {
($($tt:tt)*) => {{
use std::io::Write;
let _ = writeln!(&mut ::std::io::stderr(), $($tt)*);
}}
}
mod args;
mod atty;
mod out;
mod pathutil;
mod printer;
mod search_buffer;
mod search_stream;
#[cfg(windows)]
mod terminal_win;
pub type Result<T> = result::Result<T, Box<Error + Send + Sync>>;
fn main() {
match Args::parse().and_then(run) {
Ok(count) if count == 0 => process::exit(1),
Ok(_) => process::exit(0),
Err(err) => {
eprintln!("{}", err);
process::exit(1);
}
}
}
fn run(args: Args) -> Result<u64> {
let args = Arc::new(args);
let handler_args = args.clone();
ctrlc::set_handler(move || {
let stdout = io::stdout();
let mut stdout = stdout.lock();
let _ = handler_args.stdout().reset();
let _ = stdout.flush();
process::exit(1);
});
let paths = args.paths();
let threads = cmp::max(1, args.threads() - 1);
let isone =
paths.len() == 1 && (paths[0] == Path::new("-") || paths[0].is_file());
if args.files() {
return run_files(args.clone());
}
if args.type_list() {
return run_types(args.clone());
}
if threads == 1 || isone {
return run_one_thread(args.clone());
}
let out = Arc::new(Mutex::new(args.out()));
let quiet_matched = QuietMatched::new(args.quiet());
let mut workers = vec![];
let workq = {
let (workq, stealer) = deque::new();
for _ in 0..threads {
let worker = MultiWorker {
chan_work: stealer.clone(),
quiet_matched: quiet_matched.clone(),
out: out.clone(),
outbuf: Some(args.outbuf()),
worker: Worker {
args: args.clone(),
inpbuf: args.input_buffer(),
grep: args.grep(),
match_count: 0,
},
};
workers.push(thread::spawn(move || worker.run()));
}
workq
};
let mut paths_searched: u64 = 0;
for dent in args.walker() {
if quiet_matched.has_match() {
break;
}
paths_searched += 1;
if dent.is_stdin() {
workq.push(Work::Stdin);
} else {
workq.push(Work::File(dent));
}
}
if !paths.is_empty() && paths_searched == 0 {
eprintln!("No files were searched, which means ripgrep probably \
applied a filter you didn't expect. \
Try running again with --debug.");
}
for _ in 0..workers.len() {
workq.push(Work::Quit);
}
let mut match_count = 0;
for worker in workers {
match_count += worker.join().unwrap();
}
Ok(match_count)
}
fn run_one_thread(args: Arc<Args>) -> Result<u64> {
let mut worker = Worker {
args: args.clone(),
inpbuf: args.input_buffer(),
grep: args.grep(),
match_count: 0,
};
let mut term = args.stdout();
let mut paths_searched: u64 = 0;
for dent in args.walker() {
let mut printer = args.printer(&mut term);
if worker.match_count > 0 {
if args.quiet() {
break;
}
if let Some(sep) = args.file_separator() {
printer = printer.file_separator(sep);
}
}
paths_searched += 1;
if dent.is_stdin() {
worker.do_work(&mut printer, WorkReady::Stdin);
} else {
let file = match File::open(dent.path()) {
Ok(file) => file,
Err(err) => {
eprintln!("{}: {}", dent.path().display(), err);
continue;
}
};
worker.do_work(&mut printer, WorkReady::DirFile(dent, file));
}
}
if !args.paths().is_empty() && paths_searched == 0 {
eprintln!("No files were searched, which means ripgrep probably \
applied a filter you didn't expect. \
Try running again with --debug.");
}
Ok(worker.match_count)
}
fn run_files(args: Arc<Args>) -> Result<u64> {
let term = args.stdout();
let mut printer = args.printer(term);
let mut file_count = 0;
for dent in args.walker() {
printer.path(dent.path());
file_count += 1;
}
Ok(file_count)
}
fn run_types(args: Arc<Args>) -> Result<u64> {
let term = args.stdout();
let mut printer = args.printer(term);
let mut ty_count = 0;
for def in args.type_defs() {
printer.type_def(def);
ty_count += 1;
}
Ok(ty_count)
}
enum Work {
Stdin,
File(DirEntry),
Quit,
}
enum WorkReady {
Stdin,
DirFile(DirEntry, File),
}
struct MultiWorker {
chan_work: Stealer<Work>,
quiet_matched: QuietMatched,
out: Arc<Mutex<Out>>,
#[cfg(not(windows))]
outbuf: Option<ColoredTerminal<term::TerminfoTerminal<Vec<u8>>>>,
#[cfg(windows)]
outbuf: Option<ColoredTerminal<WindowsBuffer>>,
worker: Worker,
}
struct Worker {
args: Arc<Args>,
inpbuf: InputBuffer,
grep: Grep,
match_count: u64,
}
impl MultiWorker {
fn run(mut self) -> u64 {
loop {
if self.quiet_matched.has_match() {
break;
}
let work = match self.chan_work.steal() {
Stolen::Empty | Stolen::Abort => continue,
Stolen::Data(Work::Quit) => break,
Stolen::Data(Work::Stdin) => WorkReady::Stdin,
Stolen::Data(Work::File(ent)) => {
match File::open(ent.path()) {
Ok(file) => WorkReady::DirFile(ent, file),
Err(err) => {
eprintln!("{}: {}", ent.path().display(), err);
continue;
}
}
}
};
let mut outbuf = self.outbuf.take().unwrap();
outbuf.clear();
let mut printer = self.worker.args.printer(outbuf);
self.worker.do_work(&mut printer, work);
if self.quiet_matched.set_match(self.worker.match_count > 0) {
break;
}
let outbuf = printer.into_inner();
if !outbuf.get_ref().is_empty() {
let mut out = self.out.lock().unwrap();
out.write(&outbuf);
}
self.outbuf = Some(outbuf);
}
self.worker.match_count
}
}
impl Worker {
fn do_work<W: Terminal + Send>(
&mut self,
printer: &mut Printer<W>,
work: WorkReady,
) {
let result = match work {
WorkReady::Stdin => {
let stdin = io::stdin();
let stdin = stdin.lock();
self.search(printer, &Path::new("<stdin>"), stdin)
}
WorkReady::DirFile(ent, file) => {
let mut path = ent.path();
if let Some(p) = strip_prefix("./", path) {
path = p;
}
if self.args.mmap() {
self.search_mmap(printer, path, &file)
} else {
self.search(printer, path, file)
}
}
};
match result {
Ok(count) => {
self.match_count += count;
}
Err(err) => {
eprintln!("{}", err);
}
}
}
fn search<R: io::Read, W: Terminal + Send>(
&mut self,
printer: &mut Printer<W>,
path: &Path,
rdr: R,
) -> Result<u64> {
self.args.searcher(
&mut self.inpbuf,
printer,
&self.grep,
path,
rdr,
).run().map_err(From::from)
}
fn search_mmap<W: Terminal + Send>(
&mut self,
printer: &mut Printer<W>,
path: &Path,
file: &File,
) -> Result<u64> {
if try!(file.metadata()).len() == 0 {
// Opening a memory map with an empty file results in an error.
// However, this may not actually be an empty file! For example,
// /proc/cpuinfo reports itself as an empty file, but it can
// produce data when it's read from. Therefore, we fall back to
// regular read calls.
return self.search(printer, path, file);
}
let mmap = try!(Mmap::open(file, Protection::Read));
Ok(self.args.searcher_buffer(
printer,
&self.grep,
path,
unsafe { mmap.as_slice() },
).run())
}
}
#[derive(Clone, Debug)]
struct QuietMatched(Arc<Option<AtomicBool>>);
impl QuietMatched {
fn new(quiet: bool) -> QuietMatched {
let atomic = if quiet { Some(AtomicBool::new(false)) } else { None };
QuietMatched(Arc::new(atomic))
}
fn has_match(&self) -> bool {
match *self.0 {
None => false,
Some(ref matched) => matched.load(Ordering::SeqCst),
}
}
fn set_match(&self, yes: bool) -> bool {
match *self.0 {
None => false,
Some(_) if !yes => false,
Some(ref m) => { m.store(true, Ordering::SeqCst); true }
}
}
}
| 26.966581 | 79 | 0.510296 |
1e7271ef192ee1a30a57f7fa603e7ff2ffe0efb8 | 1,094 | mod starwars;
use async_graphql::{
http::{playground_source, GraphQLPlaygroundConfig},
EmptyMutation, EmptySubscription, Request, Response, Schema,
};
use axum::{
extract::Extension,
response::{Html, IntoResponse},
routing::get,
AddExtensionLayer, Json, Router,
};
use starwars::{QueryRoot, StarWars, StarWarsSchema};
async fn graphql_handler(schema: Extension<StarWarsSchema>, req: Json<Request>) -> Json<Response> {
schema.execute(req.0).await.into()
}
async fn graphql_playground() -> impl IntoResponse {
Html(playground_source(GraphQLPlaygroundConfig::new("/")))
}
#[tokio::main]
async fn main() {
let schema = Schema::build(QueryRoot, EmptyMutation, EmptySubscription)
.data(StarWars::new())
.finish();
let app = Router::new()
.route("/", get(graphql_playground).post(graphql_handler))
.layer(AddExtensionLayer::new(schema));
println!("Playground: http://localhost:3000");
axum::Server::bind(&"0.0.0.0:3000".parse().unwrap())
.serve(app.into_make_service())
.await
.unwrap();
}
| 27.35 | 99 | 0.670018 |
0ea15204c6fe7e15de0f5b4d9bac018d911b1305 | 20,531 | //! The `Fuzzer` is the main struct for a fuzz campaign.
use crate::{
bolts::current_time,
corpus::{Corpus, CorpusScheduler, Testcase},
events::{Event, EventConfig, EventFirer, EventManager, ProgressReporter},
executors::{Executor, ExitKind, HasObservers},
feedbacks::Feedback,
inputs::Input,
mark_feature_time,
observers::ObserversTuple,
stages::StagesTuple,
start_timer,
state::{HasClientPerfMonitor, HasCorpus, HasExecutions, HasSolutions},
Error,
};
#[cfg(feature = "introspection")]
use crate::monitors::PerfFeature;
use alloc::string::ToString;
use core::{marker::PhantomData, time::Duration};
/// Send a monitor update all 15 (or more) seconds
const STATS_TIMEOUT_DEFAULT: Duration = Duration::from_secs(15);
/// Holds a scheduler
pub trait HasCorpusScheduler<CS, I, S>
where
CS: CorpusScheduler<I, S>,
I: Input,
{
/// The scheduler
fn scheduler(&self) -> &CS;
/// The scheduler (mut)
fn scheduler_mut(&mut self) -> &mut CS;
}
/// Holds an feedback
pub trait HasFeedback<F, I, S>
where
F: Feedback<I, S>,
I: Input,
S: HasClientPerfMonitor,
{
/// The feedback
fn feedback(&self) -> &F;
/// The feedback (mut)
fn feedback_mut(&mut self) -> &mut F;
}
/// Holds an objective feedback
pub trait HasObjective<I, OF, S>
where
OF: Feedback<I, S>,
I: Input,
S: HasClientPerfMonitor,
{
/// The objective feedback
fn objective(&self) -> &OF;
/// The objective feedback (mut)
fn objective_mut(&mut self) -> &mut OF;
}
/// Evaluate if an input is interesting using the feedback
pub trait ExecutionProcessor<I, OT, S>
where
OT: ObserversTuple<I, S>,
I: Input,
{
/// Evaluate if a set of observation channels has an interesting state
fn process_execution<EM>(
&mut self,
state: &mut S,
manager: &mut EM,
input: I,
observers: &OT,
exit_kind: &ExitKind,
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>
where
EM: EventFirer<I>;
}
/// Evaluate an input modyfing the state of the fuzzer
pub trait EvaluatorObservers<I, OT, S>: Sized
where
I: Input,
OT: ObserversTuple<I, S>,
{
/// Runs the input and triggers observers and feedback,
/// returns if is interesting an (option) the index of the new testcase in the corpus
fn evaluate_input_with_observers<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>
where
E: Executor<EM, I, S, Self> + HasObservers<I, OT, S>,
EM: EventManager<E, I, S, Self>;
}
/// Evaluate an input modyfing the state of the fuzzer
pub trait Evaluator<E, EM, I, S> {
/// Runs the input and triggers observers and feedback,
/// returns if is interesting an (option) the index of the new testcase in the corpus
fn evaluate_input(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
) -> Result<(ExecuteInputResult, Option<usize>), Error> {
self.evaluate_input_events(state, executor, manager, input, true)
}
/// Runs the input and triggers observers and feedback,
/// returns if is interesting an (option) the index of the new testcase in the corpus
/// This version has a boolean to decide if send events to the manager.
fn evaluate_input_events(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>;
/// Runs the input and triggers observers and feedback.
/// Adds an input, to the corpus even if it's not considered `interesting` by the `feedback`.
/// Returns the `index` of the new testcase in the corpus.
/// Usually, you want to use [`Evaluator::evaluate_input`], unless you know what you are doing.
fn add_input(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
) -> Result<usize, Error>;
}
/// The main fuzzer trait.
pub trait Fuzzer<E, EM, I, S, ST>
where
I: Input,
EM: ProgressReporter<I>,
S: HasExecutions + HasClientPerfMonitor,
{
/// Fuzz for a single iteration
/// Returns the index of the last fuzzed corpus item
///
/// If you use this fn in a restarting scenario to only run for `n` iterations,
/// before exiting, make sure you call `event_mgr.on_restart(&mut state)?;`.
/// This way, the state will be available in the next, respawned, iteration.
fn fuzz_one(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
) -> Result<usize, Error>;
/// Fuzz forever (or until stopped)
fn fuzz_loop(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
) -> Result<usize, Error> {
let mut last = current_time();
let monitor_timeout = STATS_TIMEOUT_DEFAULT;
loop {
self.fuzz_one(stages, executor, state, manager)?;
last = manager.maybe_report_progress(state, last, monitor_timeout)?;
}
}
/// Fuzz for n iterations
/// Returns the index of the last fuzzed corpus item
///
/// If you use this fn in a restarting scenario to only run for `n` iterations,
/// before exiting, make sure you call `event_mgr.on_restart(&mut state)?;`.
/// This way, the state will be available in the next, respawned, iteration.
fn fuzz_loop_for(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
iters: u64,
) -> Result<usize, Error> {
if iters == 0 {
return Err(Error::IllegalArgument(
"Cannot fuzz for 0 iterations!".to_string(),
));
}
let mut ret = 0;
let mut last = current_time();
let monitor_timeout = STATS_TIMEOUT_DEFAULT;
for _ in 0..iters {
ret = self.fuzz_one(stages, executor, state, manager)?;
last = manager.maybe_report_progress(state, last, monitor_timeout)?;
}
// If we would assume the fuzzer loop will always exit after this, we could do this here:
// manager.on_restart(state)?;
// But as the state may grow to a few megabytes,
// for now we won' and the user has to do it (unless we find a way to do this on `Drop`).
Ok(ret)
}
}
/// The corpus this input should be added to
#[derive(Debug, PartialEq)]
pub enum ExecuteInputResult {
/// No special input
None,
/// This input should be stored ini the corpus
Corpus,
/// This input leads to a solution
Solution,
}
/// Your default fuzzer instance, for everyday use.
#[derive(Debug)]
pub struct StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasClientPerfMonitor,
{
scheduler: CS,
feedback: F,
objective: OF,
phantom: PhantomData<(I, OT, S)>,
}
impl<CS, F, I, OF, OT, S> HasCorpusScheduler<CS, I, S> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasClientPerfMonitor,
{
fn scheduler(&self) -> &CS {
&self.scheduler
}
fn scheduler_mut(&mut self) -> &mut CS {
&mut self.scheduler
}
}
impl<CS, F, I, OF, OT, S> HasFeedback<F, I, S> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasClientPerfMonitor,
{
fn feedback(&self) -> &F {
&self.feedback
}
fn feedback_mut(&mut self) -> &mut F {
&mut self.feedback
}
}
impl<CS, F, I, OF, OT, S> HasObjective<I, OF, S> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasClientPerfMonitor,
{
fn objective(&self) -> &OF {
&self.objective
}
fn objective_mut(&mut self) -> &mut OF {
&mut self.objective
}
}
impl<CS, F, I, OF, OT, S> ExecutionProcessor<I, OT, S> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
OT: ObserversTuple<I, S> + serde::Serialize + serde::de::DeserializeOwned,
S: HasCorpus<I> + HasSolutions<I> + HasClientPerfMonitor + HasExecutions,
{
/// Evaluate if a set of observation channels has an interesting state
fn process_execution<EM>(
&mut self,
state: &mut S,
manager: &mut EM,
input: I,
observers: &OT,
exit_kind: &ExitKind,
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>
where
EM: EventFirer<I>,
{
let mut res = ExecuteInputResult::None;
#[cfg(not(feature = "introspection"))]
let is_solution = self
.objective_mut()
.is_interesting(state, manager, &input, observers, exit_kind)?;
#[cfg(feature = "introspection")]
let is_solution = self
.objective_mut()
.is_interesting_introspection(state, manager, &input, observers, exit_kind)?;
if is_solution {
res = ExecuteInputResult::Solution;
} else {
#[cfg(not(feature = "introspection"))]
let is_corpus = self
.feedback_mut()
.is_interesting(state, manager, &input, observers, exit_kind)?;
#[cfg(feature = "introspection")]
let is_corpus = self
.feedback_mut()
.is_interesting_introspection(state, manager, &input, observers, exit_kind)?;
if is_corpus {
res = ExecuteInputResult::Corpus;
}
}
match res {
ExecuteInputResult::None => {
self.feedback_mut().discard_metadata(state, &input)?;
self.objective_mut().discard_metadata(state, &input)?;
Ok((res, None))
}
ExecuteInputResult::Corpus => {
// Not a solution
self.objective_mut().discard_metadata(state, &input)?;
// Add the input to the main corpus
let mut testcase = Testcase::with_executions(input.clone(), *state.executions());
self.feedback_mut().append_metadata(state, &mut testcase)?;
let idx = state.corpus_mut().add(testcase)?;
self.scheduler_mut().on_add(state, idx)?;
if send_events {
// TODO set None for fast targets
let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique {
None
} else {
Some(manager.serialize_observers(observers)?)
};
manager.fire(
state,
Event::NewTestcase {
input,
observers_buf,
exit_kind: *exit_kind,
corpus_size: state.corpus().count(),
client_config: manager.configuration(),
time: current_time(),
executions: *state.executions(),
},
)?;
}
Ok((res, Some(idx)))
}
ExecuteInputResult::Solution => {
// Not interesting
self.feedback_mut().discard_metadata(state, &input)?;
// The input is a solution, add it to the respective corpus
let mut testcase = Testcase::with_executions(input, *state.executions());
self.objective_mut().append_metadata(state, &mut testcase)?;
state.solutions_mut().add(testcase)?;
if send_events {
manager.fire(
state,
Event::Objective {
objective_size: state.solutions().count(),
},
)?;
}
Ok((res, None))
}
}
}
}
impl<CS, F, I, OF, OT, S> EvaluatorObservers<I, OT, S> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
OT: ObserversTuple<I, S> + serde::Serialize + serde::de::DeserializeOwned,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasCorpus<I> + HasSolutions<I> + HasClientPerfMonitor + HasExecutions,
{
/// Process one input, adding to the respective corpuses if needed and firing the right events
#[inline]
fn evaluate_input_with_observers<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error>
where
E: Executor<EM, I, S, Self> + HasObservers<I, OT, S>,
EM: EventManager<E, I, S, Self>,
{
let exit_kind = self.execute_input(state, executor, manager, &input)?;
let observers = executor.observers();
self.process_execution(state, manager, input, observers, &exit_kind, send_events)
}
}
impl<CS, E, EM, F, I, OF, OT, S> Evaluator<E, EM, I, S> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
E: Executor<EM, I, S, Self> + HasObservers<I, OT, S>,
OT: ObserversTuple<I, S> + serde::Serialize + serde::de::DeserializeOwned,
EM: EventManager<E, I, S, Self>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasCorpus<I> + HasSolutions<I> + HasClientPerfMonitor + HasExecutions,
{
/// Process one input, adding to the respective corpuses if needed and firing the right events
#[inline]
fn evaluate_input_events(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
send_events: bool,
) -> Result<(ExecuteInputResult, Option<usize>), Error> {
self.evaluate_input_with_observers(state, executor, manager, input, send_events)
}
/// Adds an input, even if it's not conisered `interesting` by any of the executors
fn add_input(
&mut self,
state: &mut S,
executor: &mut E,
manager: &mut EM,
input: I,
) -> Result<usize, Error> {
let exit_kind = self.execute_input(state, executor, manager, &input)?;
let observers = executor.observers();
// Always consider this to be "interesting"
// Not a solution
self.objective_mut().discard_metadata(state, &input)?;
// Add the input to the main corpus
let mut testcase = Testcase::with_executions(input.clone(), *state.executions());
self.feedback_mut().append_metadata(state, &mut testcase)?;
let idx = state.corpus_mut().add(testcase)?;
self.scheduler_mut().on_add(state, idx)?;
let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique {
None
} else {
Some(manager.serialize_observers(observers)?)
};
manager.fire(
state,
Event::NewTestcase {
input,
observers_buf,
exit_kind,
corpus_size: state.corpus().count(),
client_config: manager.configuration(),
time: current_time(),
executions: *state.executions(),
},
)?;
Ok(idx)
}
}
impl<CS, E, EM, F, I, OF, OT, S, ST> Fuzzer<E, EM, I, S, ST> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
EM: EventManager<E, I, S, Self>,
F: Feedback<I, S>,
I: Input,
S: HasClientPerfMonitor + HasExecutions,
OF: Feedback<I, S>,
ST: StagesTuple<E, EM, S, Self>,
{
fn fuzz_one(
&mut self,
stages: &mut ST,
executor: &mut E,
state: &mut S,
manager: &mut EM,
) -> Result<usize, Error> {
// Init timer for scheduler
#[cfg(feature = "introspection")]
state.introspection_monitor_mut().start_timer();
// Get the next index from the scheduler
let idx = self.scheduler.next(state)?;
// Mark the elapsed time for the scheduler
#[cfg(feature = "introspection")]
state.introspection_monitor_mut().mark_scheduler_time();
// Mark the elapsed time for the scheduler
#[cfg(feature = "introspection")]
state.introspection_monitor_mut().reset_stage_index();
// Execute all stages
stages.perform_all(self, executor, state, manager, idx)?;
// Init timer for manager
#[cfg(feature = "introspection")]
state.introspection_monitor_mut().start_timer();
// Execute the manager
manager.process(self, state, executor)?;
// Mark the elapsed time for the manager
#[cfg(feature = "introspection")]
state.introspection_monitor_mut().mark_manager_time();
Ok(idx)
}
}
impl<CS, F, I, OF, OT, S> StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OF: Feedback<I, S>,
S: HasExecutions + HasClientPerfMonitor,
{
/// Create a new `StdFuzzer` with standard behavior.
pub fn new(scheduler: CS, feedback: F, objective: OF) -> Self {
Self {
scheduler,
feedback,
objective,
phantom: PhantomData,
}
}
/// Runs the input and triggers observers and feedback
pub fn execute_input<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
event_mgr: &mut EM,
input: &I,
) -> Result<ExitKind, Error>
where
E: Executor<EM, I, S, Self> + HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
{
start_timer!(state);
executor.observers_mut().pre_exec_all(state, input)?;
mark_feature_time!(state, PerfFeature::PreExecObservers);
start_timer!(state);
let exit_kind = executor.run_target(self, state, event_mgr, input)?;
mark_feature_time!(state, PerfFeature::TargetExecution);
*state.executions_mut() += 1;
start_timer!(state);
executor
.observers_mut()
.post_exec_all(state, input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(exit_kind)
}
}
/// Structs with this trait will execute an [`Input`]
pub trait ExecutesInput<I, OT, S, Z>
where
I: Input,
OT: ObserversTuple<I, S>,
{
/// Runs the input and triggers observers and feedback
fn execute_input<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
event_mgr: &mut EM,
input: &I,
) -> Result<ExitKind, Error>
where
E: Executor<EM, I, S, Z> + HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>;
}
impl<CS, F, I, OF, OT, S> ExecutesInput<I, OT, S, Self> for StdFuzzer<CS, F, I, OF, OT, S>
where
CS: CorpusScheduler<I, S>,
F: Feedback<I, S>,
I: Input,
OT: ObserversTuple<I, S>,
OF: Feedback<I, S>,
S: HasExecutions + HasClientPerfMonitor,
{
/// Runs the input and triggers observers and feedback
fn execute_input<E, EM>(
&mut self,
state: &mut S,
executor: &mut E,
event_mgr: &mut EM,
input: &I,
) -> Result<ExitKind, Error>
where
E: Executor<EM, I, S, Self> + HasObservers<I, OT, S>,
OT: ObserversTuple<I, S>,
{
start_timer!(state);
executor.observers_mut().pre_exec_all(state, input)?;
mark_feature_time!(state, PerfFeature::PreExecObservers);
start_timer!(state);
let exit_kind = executor.run_target(self, state, event_mgr, input)?;
mark_feature_time!(state, PerfFeature::TargetExecution);
*state.executions_mut() += 1;
start_timer!(state);
executor
.observers_mut()
.post_exec_all(state, input, &exit_kind)?;
mark_feature_time!(state, PerfFeature::PostExecObservers);
Ok(exit_kind)
}
}
| 30.689088 | 99 | 0.574448 |
fe67d5e0d76a91681bea195d9794353f5ddff663 | 6,853 | use std::io::{stdin, stdout, Write};
use termion::event::Key;
use termion::input::TermRead;
use termion::raw::IntoRawMode;
pub fn select<'a, T: std::fmt::Display>(prompt: &str, list: &'a [T]) -> Option<&'a T> {
let stdin = stdin();
let mut stdout = stdout().into_raw_mode().unwrap();
let last_item_id = list.len() - 1;
write!(
stdout,
"{} ? {}{}{}{}\n\r",
termion::color::Fg(termion::color::Green),
termion::style::Reset,
termion::style::Bold,
prompt,
termion::style::Reset
)
.unwrap();
let mut curr = 0;
let mut input = stdin.keys();
loop {
for (i, s) in list.iter().enumerate() {
write!(stdout, "{}", termion::clear::CurrentLine).unwrap();
if curr == i {
write!(
stdout,
"{}{} {} {}{}",
termion::style::Bold,
termion::color::Fg(termion::color::Cyan),
figures_rs::POINTER,
s,
termion::style::Reset
)
.unwrap();
} else {
write!(stdout, " {}", s).unwrap();
}
write!(stdout, "\n\r").unwrap();
}
stdout.flush().unwrap();
let next = input.next().unwrap();
match next.unwrap() {
Key::Char('\n') | Key::Char('l') => {
write!(stdout, "\n\r{}", termion::cursor::Show).unwrap();
return list.get(curr);
}
Key::Up | Key::Char('k') => {
if curr == 0 {
curr = last_item_id;
} else {
curr -= 1;
}
}
Key::Down | Key::Char('j') => {
if curr == last_item_id {
curr = 0;
} else {
curr += 1;
}
}
Key::Ctrl('c') | Key::Char('q') => {
write!(stdout, "\n\r{}", termion::cursor::Show).unwrap();
return None;
}
_ => {}
};
print!("{}", termion::cursor::Up(list.len() as u16));
}
}
pub fn checkbox<'a, T: std::fmt::Display>(
prompt: &'a str,
list: &'a [T],
) -> Option<Vec<(&'a T, bool)>> {
let mut list: Vec<(&T, bool)> = list.iter().map(|item| (item, false)).collect();
let stdin = stdin();
let mut stdout = stdout().into_raw_mode().unwrap();
let last_item_id = list.len() - 1;
write!(
stdout,
"{} ? {}{}{}{}\n\r",
termion::color::Fg(termion::color::Green),
termion::style::Reset,
termion::style::Bold,
prompt,
termion::style::Reset
)
.unwrap();
let mut curr = 0;
let mut input = stdin.keys();
fn print_check<'a, T>(s: &(&T, bool)) -> &'a str {
if s.1 {
figures_rs::CIRCLE_FILLED
} else {
figures_rs::CIRCLE
}
}
loop {
for (i, s) in list.iter_mut().enumerate() {
write!(stdout, "{}", termion::clear::CurrentLine).unwrap();
if curr == i {
write!(
stdout,
"{}{} {} {} {}{}",
termion::style::Bold,
termion::color::Fg(termion::color::Cyan),
figures_rs::POINTER,
print_check(s),
s.0,
termion::style::Reset
)
.unwrap();
} else {
write!(stdout, " {} {}", print_check(s), s.0).unwrap();
}
write!(stdout, "\n\r").unwrap();
}
stdout.flush().unwrap();
let next = input.next().unwrap();
match next.unwrap() {
Key::Char('\n') | Key::Char('l') => {
write!(stdout, "\n\r{}", termion::cursor::Show).unwrap();
return Some(list);
}
Key::Char(' ') => {
list[curr].1 = !list[curr].1;
}
Key::Up | Key::Char('k') => {
if curr == 0 {
curr = last_item_id;
} else {
curr -= 1;
}
}
Key::Down | Key::Char('j') => {
if curr == last_item_id {
curr = 0;
} else {
curr += 1;
}
}
Key::Ctrl('c') | Key::Char('q') => {
write!(stdout, "\n\r{}", termion::cursor::Show).unwrap();
return None;
}
_ => {}
};
print!("{}", termion::cursor::Up(list.len() as u16));
}
}
pub fn confirm(prompt: &str) -> bool {
let stdin = stdin();
let mut stdout = stdout().into_raw_mode().unwrap();
write!(
stdout,
"{} ? {}{}{}{} (y/n) ",
termion::color::Fg(termion::color::Green),
termion::style::Reset,
termion::style::Bold,
prompt,
termion::style::Reset
)
.unwrap();
let mut curr = None;
let mut input = stdin.keys();
fn curr_to_letter(curr: &Option<bool>) -> &str {
if let Some(curr) = curr {
if *curr {
"Y"
} else {
"N"
}
} else {
" "
}
}
write!(stdout, "{}", termion::cursor::Hide).unwrap();
loop {
write!(
stdout,
"{}{}",
termion::clear::AfterCursor,
curr_to_letter(&curr)
)
.unwrap();
print!("{}", termion::cursor::Left(1));
stdout.flush().unwrap();
let next = input.next().unwrap();
match next.unwrap() {
Key::Char('\n') | Key::Char('l') => {
write!(stdout, "\n\r{}", termion::cursor::Show).unwrap();
return if let Some(true) = curr { true } else { false };
}
Key::Up | Key::Char('k') => {
if let Some(c) = curr {
curr = Some(!c);
} else {
curr = Some(false);
}
}
Key::Down | Key::Char('j') => {
if let Some(c) = curr {
curr = Some(!c);
} else {
curr = Some(true);
}
}
Key::Char('y') | Key::Char('Y') => curr = Some(true),
Key::Char('n') | Key::Char('N') => curr = Some(false),
Key::Backspace => curr = None,
Key::Ctrl('c') | Key::Char('q') => {
write!(stdout, "\n\r{}", termion::cursor::Show).unwrap();
return false;
}
_ => {}
};
}
}
| 27.744939 | 87 | 0.383482 |
1d710d44d6fe1110eaf98e5ed29751d69a72efa9 | 9,600 | /*
* ignore.c rust version
*/
use crate::bindings::{
dirent, scandir_baton_t, ignores,
log_debug, pcre_exec,
opts,
};
use crate::file_types::*;
use crate::helpers::{
get_extension, char_ptr_to_string, str_to_c_char_ptr, fl_c_array_to_str,
double_i8_ptr_to_vec, strncmp, strncmp_fl, match_position,
get_position_in_string
};
use std::ffi::{ CStr, CString };
use std::ptr::slice_from_raw_parts;
use std::mem;
use std::str;
use regex::Regex;
// placeholder until log_debug is translated
fn log_debug_rs(message: &str) {
unsafe { log_debug(str_to_c_char_ptr(message)) };
}
unsafe fn ackmate_dir_match(dir_name: *const cty::c_char) -> cty::c_int {
if opts.ackmate_dir_filter.is_null() {
return 0;
}
/* we just care about the match, not where the matches are */
pcre_exec(
opts.ackmate_dir_filter, mem::MaybeUninit::uninit().assume_init(),
dir_name, char_ptr_to_string(dir_name).len() as i32, 0,
0, mem::MaybeUninit::uninit().assume_init(), 0
)
}
fn match_regexes(pattern: Vec<String>, match_str: &str) -> bool{
for mut name in pattern {
if name.starts_with('*') {
name = format!("{}{}", "[[:alpha:]]", &name);
}
let re = Regex::new(&name).unwrap();
if re.is_match(match_str) {
let message = format!("{} {} {} {}", "File", match_str, "ignored because name matches slash regex pattern", &name);
log_debug_rs(&message);
return false
}
let message = format!("{} {} {} {}", "Pattern", &name, "doesn't match slash file", match_str);
log_debug_rs(&message);
}
true
}
fn match_static_pattern(vec: &Vec<String>, s: &str) -> bool {
let match_pos = match_position(s, vec);
if match_pos != -1 {
let message = format!("{} {} {} {}", "File", s, "ignored because name matches static pattern", vec[match_pos as usize]);
log_debug_rs(&message);
return true
}
false
}
fn match_slash_filename(vec: &Vec<String>, s: &str) -> bool {
for v in vec {
let pos = get_position_in_string(v, s);
if pos != -1 {
let longstring = String::from(s);
let mut longstring_vec: Vec<char> = longstring.chars().collect();
let long_len = longstring_vec.len() as usize;
let substring: Vec<char> = v.chars().collect();
let sub_len = substring.len();
let mut one_before_pos = '?';
if pos > 1 {
one_before_pos = longstring_vec[pos as usize - 1];
}
let mut string_vec_at_pos: Vec<char> = Vec::new();
for i in pos as usize..long_len {
string_vec_at_pos.push(longstring_vec[i]);
}
let string_at_pos: String = string_vec_at_pos.iter().collect();
if string_at_pos == s || one_before_pos == '/' {
for i in 0..sub_len {
string_vec_at_pos.remove(0);
}
if string_vec_at_pos[0] == '\0' || string_vec_at_pos[0] == '/' {
let message = format!("{} {} {} {}", "File", s, "ignored because name matches static pattern", v);
log_debug_rs(&message);
return true
}
}
}
let message = format!("{} {} {} {}", "Pattern", v, "doesn't match path", s);
log_debug_rs(&message);
}
false
}
/* This is the hottest code in Ag. 10-15% of all execution time is spent here */
unsafe fn path_ignore_search(ig: *const ignores, path: &str, filename: &str) -> bool {
// Some convencience defines
let names = (*ig).names;
let slash_names = (*ig).slash_names;
let regexes = (*ig).regexes;
let slash_regexes = (*ig).slash_regexes;
let invert_regexes = (*ig).invert_regexes;
let abs_path = (*ig).abs_path;
let names_len = (*ig).names_len as usize;
let slash_names_len = (*ig).slash_names_len as usize;
let regexes_len = (*ig).regexes_len as usize;
let slash_regexes_len = (*ig).slash_regexes_len as usize;
let invert_regexes_len = (*ig).invert_regexes_len as usize;
let abs_path_len: usize = (*ig).abs_path_len as usize;
let names_vec = double_i8_ptr_to_vec(names, names_len);
let slash_names_vec = double_i8_ptr_to_vec(slash_names, slash_names_len);
let regexes_vec = double_i8_ptr_to_vec(regexes, regexes_len);
let slash_regexes_vec = double_i8_ptr_to_vec(slash_regexes, slash_regexes_len);
let invert_regexes_vec = double_i8_ptr_to_vec(invert_regexes, invert_regexes_len);
if match_static_pattern(&names_vec, &filename) { return true };
let mut path_str = String::from(path);
if path_str.starts_with('.') {
path_str.remove(0);
}
let mut temp = format!("{}/{}", &path_str, &filename);
// strip leading slash like abs_path
if temp.starts_with('/') {
temp.remove(0);
}
let mut slash_filename = String::from(&temp);
if strncmp_fl(&slash_filename, &char_ptr_to_string(abs_path), abs_path_len) == 0 {
for i in 0..abs_path_len {
if slash_filename.len() > 0 {
slash_filename.remove(0);
}
}
if slash_filename.len() > 0 && slash_filename.starts_with('/') {
slash_filename.remove(0);
}
if match_static_pattern(&names_vec, &slash_filename) ||
match_static_pattern(&slash_names_vec, &slash_filename) ||
match_slash_filename(&names_vec, &slash_filename) {
return true
}
if !match_regexes(slash_regexes_vec, &slash_filename) { return false }
}
if !match_regexes(invert_regexes_vec, &filename) { return false }
if !match_regexes(regexes_vec, &filename) { return true }
ackmate_dir_match(str_to_c_char_ptr(&temp)) == 1
}
fn is_evil_hardcoded(filename: &str) -> bool {
// some paths to always ignore
let mut evil_hardcoded_ignore_files_rs: Vec<&str> = Vec::new();
evil_hardcoded_ignore_files_rs.push(".");
evil_hardcoded_ignore_files_rs.push("..");
for file in evil_hardcoded_ignore_files_rs {
if filename == file {
return true
}
}
false
}
fn is_unwanted_symlink(filename: &str, d_type: cty::c_uchar) -> bool {
if unsafe { opts.follow_symlinks } == 0 && d_type == DT_LNK {
let message = format!("{} {} {}", "File", filename, "ignored becaused it's a symlink");
log_debug_rs(&message);
return true
}
false
}
fn is_fifo(filename: &str, d_type: cty::c_uchar) -> bool {
if d_type == DT_FIFO || d_type == DT_SOCK {
let message = format!("{} {} {}", "File", filename, "ignored becaused it's a named pipe or socket");
log_debug_rs(&message);
return true
}
false
}
fn check_extension(filename: &str, extensions: &Vec<String>) -> bool {
let extension = get_extension(&filename);
if extension.is_some() {
let extension = extension.unwrap();
if extensions.contains(&extension) {
let message = format!("{} {} {} {}", "File", filename, "ignored because name matches extension", &extension);
log_debug_rs(&message);
return false
}
}
true
}
fn check_dir(filename_vec: &Vec<char>, d_type: cty::c_uchar, path_start: &str, ig: *const ignores) -> bool {
if d_type == DT_DIR {
if filename_vec[&filename_vec.len() - 1] != '/' {
let s: String = filename_vec.iter().collect();
let temp = format!("{}/", &s);
if unsafe { path_ignore_search(ig, path_start, &temp) } {
return false
}
}
}
true
}
fn is_return_condition_a(filename_vec: &Vec<char>, d_type: cty::c_uchar) -> bool {
let s: String = filename_vec.iter().collect();
let cond_a = unsafe { opts.search_hidden_files == 0 } && filename_vec[0] == '.';
let cond_b = is_evil_hardcoded(&s);
let cond_c = is_unwanted_symlink(&s, d_type);
let cond_d = is_fifo(&s, d_type);
cond_a || cond_b || cond_c || cond_d
}
unsafe fn is_return_condition_b(filename_vec: &Vec<char>, d_type: cty::c_uchar,
path_start: &str, ig: *const ignores) -> bool {
let s: String = filename_vec.iter().collect();
let extensions = double_i8_ptr_to_vec((*ig).extensions, (*ig).extensions_len as usize);
let cond_a = !check_extension(&s, &extensions);
let cond_b = path_ignore_search(ig, path_start, &s);
let cond_c = !check_dir(&filename_vec, d_type, path_start, ig);
cond_a || cond_b || cond_c
}
#[no_mangle]
pub unsafe extern "C" fn filename_filter(path: *const cty::c_char, dir: *const dirent, baton: *mut cty::c_void) -> cty::c_int {
let filename: String = fl_c_array_to_str(&(*dir).d_name);
let mut filename_vec: Vec<char> = filename.chars().collect();
if is_return_condition_a(&filename_vec, (*dir).d_type) { return 0 };
if opts.search_all_files == 1 && opts.path_to_ignore == 0 { return 1 }
if filename_vec[0] == '.' && filename_vec[1] == '/' {
filename_vec.remove(0);
}
let scandir_baton = baton as *const scandir_baton_t;
let path_start = (*scandir_baton).path_start;
let mut ig = (*scandir_baton).ig;
while !ig.is_null() {
if is_return_condition_b(&filename_vec, (*dir).d_type, &char_ptr_to_string(path_start), ig) { return 0 }
ig = (*ig).parent;
}
let message = format!("{} {}", &filename, "not ignored");
log_debug(str_to_c_char_ptr(&message));
1
}
| 32.876712 | 128 | 0.607604 |
69853e95adf66b32319c1fc2eea5979aa2e9507e | 693 |
use bytevec::ByteEncodable;
use bytevec::ByteDecodable;
use std::collections::HashMap;
#[derive(PartialEq, Debug, Default, Clone)]
pub struct ClParams{
pub version: (u32, u32),
pub gamefiles: HashMap<String, String>,
}
bytevec_impls! {
impl ClParams {
version: (u32, u32),
gamefiles: HashMap<String, String>
}
}
impl ClParams {
pub fn new() -> ClParams{
ClParams{
version: (0, 0),
gamefiles: HashMap::new()
}
}
pub fn to_network(&self) -> Vec<u8>{
self.encode::<u16>().unwrap()
}
pub fn from_network(message: Vec<u8>) -> ClParams{
ClParams::decode::<u16>(&message).unwrap()
}
}
| 21.65625 | 54 | 0.585859 |
5b65879ce7b05cad7951e17676c8f2b9852e790d | 4,769 | extern crate selfe_config;
use selfe_config::build_helpers::*;
use selfe_config::model::contextualized::Contextualized;
use selfe_config::model::*;
use std::cmp::max;
use std::env;
use std::fs::File;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
fn main() {
BuildEnv::request_reruns();
let config = load_config_from_env_or_default();
config.print_boolean_feature_flags();
println!("ferros build.rs config: {:#?}", config);
let out_dir = PathBuf::from(env::var("OUT_DIR").expect("Required env var OUT_DIR not set"));
if !out_dir.exists() || !out_dir.is_dir() {
panic!("OUT_DIR is not an extant directory");
}
generate_root_task_stack_types(&out_dir, &config);
generate_kernel_retype_fan_out_limit_types(&out_dir, &config)
}
fn generate_root_task_stack_types(out_dir: &Path, config: &Contextualized) {
// TODO - check against target-pointer-width or similar for 32/64 bit differences and panic if unsupported
// Gleaned from: sel4/kernel/include/arch/arm/arch/32/mode/api/constants.h
// TODO - instead of calculating these now, we would much rather prefer to have typenum constants
// generated from the selected headers (e.g. in bindgen, or based on the bindgen output)
let page_table_bits = 8;
let pages_per_table = 2u32.pow(page_table_bits);
let page_bits = 12;
let bytes_per_page = 2u32.pow(page_bits);
let bytes_per_page_table = bytes_per_page * pages_per_table;
let raw_stack_bytes = if let Some(SingleValue::Integer(root_task_stack_bytes)) =
config.metadata.get("root_task_stack_bytes")
{
let bytes = *root_task_stack_bytes;
if bytes as i128 > ::std::u32::MAX as i128 || bytes <= 0 {
panic!("root_task_stack_bytes must be greater than 0 and less than u32::MAX");
} else {
f64::from(bytes as u32)
}
} else {
const DEFAULT_STACK_BYTES: u32 = 2097152;
println!(
"cargo:warning=Using a default root_task_stack_bytes of {}",
DEFAULT_STACK_BYTES
);
f64::from(DEFAULT_STACK_BYTES)
};
let stack_reserved_page_tables: usize = max(
1,
(raw_stack_bytes / f64::from(bytes_per_page_table)).ceil() as usize,
);
let typenum_for_reserved_page_tables_count = format!(
"pub type RootTaskStackPageTableCount = typenum::U{};",
stack_reserved_page_tables
);
const FILE_NAME: &'static str = "ROOT_TASK_STACK_PAGE_TABLE_COUNT";
let mut file = File::create(out_dir.join(FILE_NAME))
.expect(&format!("Could not create {} file", FILE_NAME));
file.write_all(typenum_for_reserved_page_tables_count.as_bytes())
.expect(&format!("Could not write to {}", FILE_NAME))
}
fn generate_kernel_retype_fan_out_limit_types(out_dir: &Path, config: &Contextualized) {
const FANOUT_PROP: &'static str = "KernelRetypeFanOutLimit";
let kernel_retype_fan_out_limit = match config
.sel4_config
.get(FANOUT_PROP)
.unwrap_or_else(|| panic!("Missing required sel4.toml property, {}", FANOUT_PROP))
{
SingleValue::Integer(i) => {
if *i > 0 {
*i as u32
} else {
panic!(
"{} sel4.toml property is required to be greater than 0",
FANOUT_PROP
)
}
}
_ => panic!(
"{} sel4.toml property is required to be a positive integer",
FANOUT_PROP
),
};
if !is_typenum_const(kernel_retype_fan_out_limit as u64) {
panic!("{} sel4.toml property must be an unsigned value supported by `typenum::consts` : (0, 1024], the powers of 2, and the powers of 10.", FANOUT_PROP)
} else if kernel_retype_fan_out_limit < 16384 {
// TODO - This is the fan out size of the largest `retype_multi` call in ferros,
// presently Count == `paging::CodePageCount` in `vspace.rs`
// If we want to lower the minimum fanout for downstream users,
// we'll have to split up that `retype_multi` call manually
panic!(
"{} sel4.toml property is required to be >= 16384 (2^14)",
FANOUT_PROP
)
}
let limit_type = format!(
"pub type {} = typenum::U{};",
FANOUT_PROP, kernel_retype_fan_out_limit
);
const FILE_NAME: &'static str = "KERNEL_RETYPE_FAN_OUT_LIMIT";
let mut file = File::create(out_dir.join(FILE_NAME))
.expect(&format!("Could not create {} file", FILE_NAME));
file.write_all(limit_type.as_bytes())
.expect(&format!("Could not write to {}", FILE_NAME))
}
fn is_typenum_const(check: u64) -> bool {
check.is_power_of_two() || (check == ((check / 10) * 10)) || check <= 1024
}
| 40.760684 | 161 | 0.643951 |
b96d9f40a28e31fe8b9efb0d7d74557c4329efa2 | 889 | /*
*
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LolNpeTutorialPathAccountSettingsCategoryResource {
#[serde(rename = "data", skip_serializing_if = "Option::is_none")]
pub data: Option<crate::models::LolNpeTutorialPathAccountSettingsTutorial>,
#[serde(rename = "schemaVersion", skip_serializing_if = "Option::is_none")]
pub schema_version: Option<i32>,
}
impl LolNpeTutorialPathAccountSettingsCategoryResource {
pub fn new() -> LolNpeTutorialPathAccountSettingsCategoryResource {
LolNpeTutorialPathAccountSettingsCategoryResource {
data: None,
schema_version: None,
}
}
}
| 27.78125 | 109 | 0.721035 |
0138e026a3fb01d8c9a3ba95f349668db0ba646b | 7,251 | ///
/// Copied from diesel
///
use byteorder::{
NetworkEndian,
ReadBytesExt,
};
use bytes::{
BufMut,
BytesMut,
};
use std::error::Error;
use postgres::types::{
to_sql_checked,
FromSql,
IsNull,
ToSql,
Type,
};
use bigdecimal::{
num_bigint::{
BigInt,
BigUint,
Sign,
},
num_traits::{
Signed,
ToPrimitive,
Zero,
},
BigDecimal,
};
use num_integer::Integer;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PgNumeric {
Positive {
weight: i16,
scale: u16,
digits: Vec<i16>,
},
Negative {
weight: i16,
scale: u16,
digits: Vec<i16>,
},
NaN,
}
#[derive(Debug, Clone, Copy)]
struct InvalidNumericSign(u16);
impl ::std::fmt::Display for InvalidNumericSign {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "InvalidNumericSign({0:x})", self.0)
}
}
impl Error for InvalidNumericSign {
fn description(&self) -> &str { "sign for numeric field was not one of 0, 0x4000, 0xC000" }
}
impl<'b> FromSql<'b> for PgNumeric {
fn from_sql(_ty: &Type, bytes: &'b [u8]) -> Result<Self, Box<dyn Error + Send + Sync>> {
let mut bytes = <&[u8]>::clone(&bytes);
let ndigits = bytes.read_u16::<NetworkEndian>()?;
let mut digits = Vec::with_capacity(ndigits as usize);
let weight = bytes.read_i16::<NetworkEndian>()?;
let sign = bytes.read_u16::<NetworkEndian>()?;
let scale = bytes.read_u16::<NetworkEndian>()?;
for _ in 0..ndigits {
digits.push(bytes.read_i16::<NetworkEndian>()?);
}
match sign {
0 => {
Ok(PgNumeric::Positive {
weight,
scale,
digits,
})
}
0x4000 => {
Ok(PgNumeric::Negative {
weight,
scale,
digits,
})
}
0xC000 => Ok(PgNumeric::NaN),
invalid => Err(Box::new(InvalidNumericSign(invalid))),
}
}
fn accepts(ty: &Type) -> bool {
match *ty {
Type::NUMERIC => true,
_ => panic!("can not accept type {:?}", ty),
}
}
}
impl ToSql for PgNumeric {
to_sql_checked!();
fn to_sql(
&self,
_ty: &Type,
out: &mut BytesMut,
) -> Result<IsNull, Box<dyn Error + Sync + Send>> {
let sign = match *self {
PgNumeric::Positive { .. } => 0,
PgNumeric::Negative { .. } => 0x4000,
PgNumeric::NaN => 0xC000,
};
let empty_vec = Vec::new();
let digits = match *self {
PgNumeric::Positive { ref digits, .. } | PgNumeric::Negative { ref digits, .. } => {
digits
}
PgNumeric::NaN => &empty_vec,
};
let weight = match *self {
PgNumeric::Positive { weight, .. } | PgNumeric::Negative { weight, .. } => weight,
PgNumeric::NaN => 0,
};
let scale = match *self {
PgNumeric::Positive { scale, .. } | PgNumeric::Negative { scale, .. } => scale,
PgNumeric::NaN => 0,
};
out.put_u16(digits.len() as u16);
out.put_i16(weight);
out.put_u16(sign);
out.put_u16(scale);
for digit in digits.iter() {
out.put_i16(*digit);
}
Ok(IsNull::No)
}
fn accepts(ty: &Type) -> bool { matches!(*ty, Type::NUMERIC) }
}
/// Iterator over the digits of a big uint in base 10k.
/// The digits will be returned in little endian order.
struct ToBase10000(Option<BigUint>);
impl Iterator for ToBase10000 {
type Item = i16;
fn next(&mut self) -> Option<Self::Item> {
self.0.take().map(|v| {
let (div, rem) = v.div_rem(&BigUint::from(10_000u16));
if !div.is_zero() {
self.0 = Some(div);
}
rem.to_i16().expect("10000 always fits in an i16")
})
}
}
impl<'a> From<&'a BigDecimal> for PgNumeric {
#[allow(clippy::redundant_closure)]
fn from(decimal: &'a BigDecimal) -> Self {
let (mut integer, scale) = decimal.as_bigint_and_exponent();
let scale = scale as u16;
integer = integer.abs();
// Ensure that the decimal will always lie on a digit boundary
for _ in 0..(4 - scale % 4) {
integer *= 10;
}
let integer = integer.to_biguint().expect("integer is always positive");
let mut digits = ToBase10000(Some(integer)).collect::<Vec<_>>();
digits.reverse();
let digits_after_decimal = scale as u16 / 4 + 1;
let weight = digits.len() as i16 - digits_after_decimal as i16 - 1;
let unnecessary_zeroes = if weight >= 0 {
let index_of_decimal = (weight + 1) as usize;
digits
.get(index_of_decimal..)
.expect("enough digits exist")
.iter()
.rev()
.take_while(|i| i.is_zero())
.count()
} else {
0
};
let relevant_digits = digits.len() - unnecessary_zeroes;
digits.truncate(relevant_digits);
match decimal.sign() {
Sign::Plus => {
PgNumeric::Positive {
digits,
scale,
weight,
}
}
Sign::Minus => {
PgNumeric::Negative {
digits,
scale,
weight,
}
}
Sign::NoSign => {
PgNumeric::Positive {
digits: vec![0],
scale: 0,
weight: 0,
}
}
}
}
}
impl From<BigDecimal> for PgNumeric {
fn from(bigdecimal: BigDecimal) -> Self { (&bigdecimal).into() }
}
impl From<PgNumeric> for BigDecimal {
fn from(numeric: PgNumeric) -> Self {
let (sign, weight, _, digits) = match numeric {
PgNumeric::Positive {
weight,
scale,
digits,
} => (Sign::Plus, weight, scale, digits),
PgNumeric::Negative {
weight,
scale,
digits,
} => (Sign::Minus, weight, scale, digits),
PgNumeric::NaN => panic!("NaN is not (yet) supported in BigDecimal"),
};
let mut result = BigUint::default();
let count = digits.len() as i64;
for digit in digits {
result *= BigUint::from(10_000u64);
result += BigUint::from(digit as u64);
}
// First digit got factor 10_000^(digits.len() - 1), but should get 10_000^weight
let correction_exp = 4 * (i64::from(weight) - count + 1);
// FIXME: `scale` allows to drop some insignificant figures, which is currently unimplemented.
// This means that e.g. PostgreSQL 0.01 will be interpreted as 0.0100
BigDecimal::new(BigInt::from_biguint(sign, result), -correction_exp)
}
}
| 28.104651 | 102 | 0.494828 |
ef79417c6f084b96348ddfcd671d032991fc2225 | 172,791 | // ignore-tidy-filelength
//! Candidate selection. See the [rustc guide] for more information on how this works.
//!
//! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html#selection
use self::EvaluationResult::*;
use self::SelectionCandidate::*;
use super::coherence::{self, Conflict};
use super::project;
use super::project::{normalize_with_depth, Normalized, ProjectionCacheKey};
use super::util;
use super::DerivedObligationCause;
use super::Selection;
use super::SelectionResult;
use super::TraitNotObjectSafe;
use super::{BuiltinDerivedObligation, ImplDerivedObligation, ObligationCauseCode};
use super::{IntercrateMode, TraitQueryMode};
use super::{ObjectCastObligation, Obligation};
use super::{ObligationCause, PredicateObligation, TraitObligation};
use super::{OutputTypeParameterMismatch, Overflow, SelectionError, Unimplemented};
use super::{
VtableAutoImpl, VtableBuiltin, VtableClosure, VtableFnPointer, VtableGenerator, VtableImpl,
VtableObject, VtableParam, VtableTraitAlias,
};
use super::{
VtableAutoImplData, VtableBuiltinData, VtableClosureData, VtableFnPointerData,
VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData,
};
use crate::dep_graph::{DepKind, DepNodeIndex};
use crate::hir::def_id::DefId;
use crate::infer::{CombinedSnapshot, InferCtxt, InferOk, PlaceholderMap, TypeFreshener};
use crate::middle::lang_items;
use crate::mir::interpret::GlobalId;
use crate::ty::fast_reject;
use crate::ty::relate::TypeRelation;
use crate::ty::subst::{Subst, SubstsRef};
use crate::ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable};
use crate::hir;
use rustc_index::bit_set::GrowableBitSet;
use rustc_data_structures::sync::Lock;
use rustc_target::spec::abi::Abi;
use syntax::attr;
use syntax::symbol::sym;
use std::cell::{Cell, RefCell};
use std::cmp;
use std::fmt::{self, Display};
use std::iter;
use std::rc::Rc;
use crate::util::nodemap::{FxHashMap, FxHashSet};
pub struct SelectionContext<'cx, 'tcx> {
infcx: &'cx InferCtxt<'cx, 'tcx>,
/// Freshener used specifically for entries on the obligation
/// stack. This ensures that all entries on the stack at one time
/// will have the same set of placeholder entries, which is
/// important for checking for trait bounds that recursively
/// require themselves.
freshener: TypeFreshener<'cx, 'tcx>,
/// If `true`, indicates that the evaluation should be conservative
/// and consider the possibility of types outside this crate.
/// This comes up primarily when resolving ambiguity. Imagine
/// there is some trait reference `$0: Bar` where `$0` is an
/// inference variable. If `intercrate` is true, then we can never
/// say for sure that this reference is not implemented, even if
/// there are *no impls at all for `Bar`*, because `$0` could be
/// bound to some type that in a downstream crate that implements
/// `Bar`. This is the suitable mode for coherence. Elsewhere,
/// though, we set this to false, because we are only interested
/// in types that the user could actually have written --- in
/// other words, we consider `$0: Bar` to be unimplemented if
/// there is no type that the user could *actually name* that
/// would satisfy it. This avoids crippling inference, basically.
intercrate: Option<IntercrateMode>,
intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
/// Controls whether or not to filter out negative impls when selecting.
/// This is used in librustdoc to distinguish between the lack of an impl
/// and a negative impl
allow_negative_impls: bool,
/// The mode that trait queries run in, which informs our error handling
/// policy. In essence, canonicalized queries need their errors propagated
/// rather than immediately reported because we do not have accurate spans.
query_mode: TraitQueryMode,
}
#[derive(Clone, Debug)]
pub enum IntercrateAmbiguityCause {
DownstreamCrate {
trait_desc: String,
self_desc: Option<String>,
},
UpstreamCrateUpdate {
trait_desc: String,
self_desc: Option<String>,
},
ReservationImpl {
message: String
},
}
impl IntercrateAmbiguityCause {
/// Emits notes when the overlap is caused by complex intercrate ambiguities.
/// See #23980 for details.
pub fn add_intercrate_ambiguity_hint(&self, err: &mut errors::DiagnosticBuilder<'_>) {
err.note(&self.intercrate_ambiguity_hint());
}
pub fn intercrate_ambiguity_hint(&self) -> String {
match self {
&IntercrateAmbiguityCause::DownstreamCrate {
ref trait_desc,
ref self_desc,
} => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
} else {
String::new()
};
format!(
"downstream crates may implement trait `{}`{}",
trait_desc, self_desc
)
}
&IntercrateAmbiguityCause::UpstreamCrateUpdate {
ref trait_desc,
ref self_desc,
} => {
let self_desc = if let &Some(ref ty) = self_desc {
format!(" for type `{}`", ty)
} else {
String::new()
};
format!(
"upstream crates may add a new impl of trait `{}`{} \
in future versions",
trait_desc, self_desc
)
}
&IntercrateAmbiguityCause::ReservationImpl {
ref message
} => {
message.clone()
}
}
}
}
// A stack that walks back up the stack frame.
struct TraitObligationStack<'prev, 'tcx> {
obligation: &'prev TraitObligation<'tcx>,
/// Trait ref from `obligation` but "freshened" with the
/// selection-context's freshener. Used to check for recursion.
fresh_trait_ref: ty::PolyTraitRef<'tcx>,
/// Starts out equal to `depth` -- if, during evaluation, we
/// encounter a cycle, then we will set this flag to the minimum
/// depth of that cycle for all participants in the cycle. These
/// participants will then forego caching their results. This is
/// not the most efficient solution, but it addresses #60010. The
/// problem we are trying to prevent:
///
/// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
/// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
/// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
///
/// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
/// is `EvaluatedToOk`; this is because they were only considered
/// ok on the premise that if `A: AutoTrait` held, but we indeed
/// encountered a problem (later on) with `A: AutoTrait. So we
/// currently set a flag on the stack node for `B: AutoTrait` (as
/// well as the second instance of `A: AutoTrait`) to suppress
/// caching.
///
/// This is a simple, targeted fix. A more-performant fix requires
/// deeper changes, but would permit more caching: we could
/// basically defer caching until we have fully evaluated the
/// tree, and then cache the entire tree at once. In any case, the
/// performance impact here shouldn't be so horrible: every time
/// this is hit, we do cache at least one trait, so we only
/// evaluate each member of a cycle up to N times, where N is the
/// length of the cycle. This means the performance impact is
/// bounded and we shouldn't have any terrible worst-cases.
reached_depth: Cell<usize>,
previous: TraitObligationStackList<'prev, 'tcx>,
/// Number of parent frames plus one -- so the topmost frame has depth 1.
depth: usize,
/// Depth-first number of this node in the search graph -- a
/// pre-order index. Basically a freshly incremented counter.
dfn: usize,
}
#[derive(Clone, Default)]
pub struct SelectionCache<'tcx> {
hashmap: Lock<
FxHashMap<ty::TraitRef<'tcx>, WithDepNode<SelectionResult<'tcx, SelectionCandidate<'tcx>>>>,
>,
}
/// The selection process begins by considering all impls, where
/// clauses, and so forth that might resolve an obligation. Sometimes
/// we'll be able to say definitively that (e.g.) an impl does not
/// apply to the obligation: perhaps it is defined for `usize` but the
/// obligation is for `int`. In that case, we drop the impl out of the
/// list. But the other cases are considered *candidates*.
///
/// For selection to succeed, there must be exactly one matching
/// candidate. If the obligation is fully known, this is guaranteed
/// by coherence. However, if the obligation contains type parameters
/// or variables, there may be multiple such impls.
///
/// It is not a real problem if multiple matching impls exist because
/// of type variables - it just means the obligation isn't sufficiently
/// elaborated. In that case we report an ambiguity, and the caller can
/// try again after more type information has been gathered or report a
/// "type annotations needed" error.
///
/// However, with type parameters, this can be a real problem - type
/// parameters don't unify with regular types, but they *can* unify
/// with variables from blanket impls, and (unless we know its bounds
/// will always be satisfied) picking the blanket impl will be wrong
/// for at least *some* substitutions. To make this concrete, if we have
///
/// trait AsDebug { type Out : fmt::Debug; fn debug(self) -> Self::Out; }
/// impl<T: fmt::Debug> AsDebug for T {
/// type Out = T;
/// fn debug(self) -> fmt::Debug { self }
/// }
/// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
///
/// we can't just use the impl to resolve the <T as AsDebug> obligation
/// - a type from another crate (that doesn't implement fmt::Debug) could
/// implement AsDebug.
///
/// Because where-clauses match the type exactly, multiple clauses can
/// only match if there are unresolved variables, and we can mostly just
/// report this ambiguity in that case. This is still a problem - we can't
/// *do anything* with ambiguities that involve only regions. This is issue
/// #21974.
///
/// If a single where-clause matches and there are no inference
/// variables left, then it definitely matches and we can just select
/// it.
///
/// In fact, we even select the where-clause when the obligation contains
/// inference variables. The can lead to inference making "leaps of logic",
/// for example in this situation:
///
/// pub trait Foo<T> { fn foo(&self) -> T; }
/// impl<T> Foo<()> for T { fn foo(&self) { } }
/// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
///
/// pub fn foo<T>(t: T) where T: Foo<bool> {
/// println!("{:?}", <T as Foo<_>>::foo(&t));
/// }
/// fn main() { foo(false); }
///
/// Here the obligation <T as Foo<$0>> can be matched by both the blanket
/// impl and the where-clause. We select the where-clause and unify $0=bool,
/// so the program prints "false". However, if the where-clause is omitted,
/// the blanket impl is selected, we unify $0=(), and the program prints
/// "()".
///
/// Exactly the same issues apply to projection and object candidates, except
/// that we can have both a projection candidate and a where-clause candidate
/// for the same obligation. In that case either would do (except that
/// different "leaps of logic" would occur if inference variables are
/// present), and we just pick the where-clause. This is, for example,
/// required for associated types to work in default impls, as the bounds
/// are visible both as projection bounds and as where-clauses from the
/// parameter environment.
#[derive(PartialEq, Eq, Debug, Clone)]
enum SelectionCandidate<'tcx> {
/// If has_nested is false, there are no *further* obligations
BuiltinCandidate {
has_nested: bool,
},
ParamCandidate(ty::PolyTraitRef<'tcx>),
ImplCandidate(DefId),
AutoImplCandidate(DefId),
/// This is a trait matching with a projected type as `Self`, and
/// we found an applicable bound in the trait definition.
ProjectionCandidate,
/// Implementation of a `Fn`-family trait by one of the anonymous types
/// generated for a `||` expression.
ClosureCandidate,
/// Implementation of a `Generator` trait by one of the anonymous types
/// generated for a generator.
GeneratorCandidate,
/// Implementation of a `Fn`-family trait by one of the anonymous
/// types generated for a fn pointer type (e.g., `fn(int)->int`)
FnPointerCandidate,
TraitAliasCandidate(DefId),
ObjectCandidate,
BuiltinObjectCandidate,
BuiltinUnsizeCandidate,
}
impl<'a, 'tcx> ty::Lift<'tcx> for SelectionCandidate<'a> {
type Lifted = SelectionCandidate<'tcx>;
fn lift_to_tcx(&self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
Some(match *self {
BuiltinCandidate { has_nested } => BuiltinCandidate { has_nested },
ImplCandidate(def_id) => ImplCandidate(def_id),
AutoImplCandidate(def_id) => AutoImplCandidate(def_id),
ProjectionCandidate => ProjectionCandidate,
ClosureCandidate => ClosureCandidate,
GeneratorCandidate => GeneratorCandidate,
FnPointerCandidate => FnPointerCandidate,
TraitAliasCandidate(def_id) => TraitAliasCandidate(def_id),
ObjectCandidate => ObjectCandidate,
BuiltinObjectCandidate => BuiltinObjectCandidate,
BuiltinUnsizeCandidate => BuiltinUnsizeCandidate,
ParamCandidate(ref trait_ref) => {
return tcx.lift(trait_ref).map(ParamCandidate);
}
})
}
}
EnumTypeFoldableImpl! {
impl<'tcx> TypeFoldable<'tcx> for SelectionCandidate<'tcx> {
(SelectionCandidate::BuiltinCandidate) { has_nested },
(SelectionCandidate::ParamCandidate)(poly_trait_ref),
(SelectionCandidate::ImplCandidate)(def_id),
(SelectionCandidate::AutoImplCandidate)(def_id),
(SelectionCandidate::ProjectionCandidate),
(SelectionCandidate::ClosureCandidate),
(SelectionCandidate::GeneratorCandidate),
(SelectionCandidate::FnPointerCandidate),
(SelectionCandidate::TraitAliasCandidate)(def_id),
(SelectionCandidate::ObjectCandidate),
(SelectionCandidate::BuiltinObjectCandidate),
(SelectionCandidate::BuiltinUnsizeCandidate),
}
}
struct SelectionCandidateSet<'tcx> {
// a list of candidates that definitely apply to the current
// obligation (meaning: types unify).
vec: Vec<SelectionCandidate<'tcx>>,
// if this is true, then there were candidates that might or might
// not have applied, but we couldn't tell. This occurs when some
// of the input types are type variables, in which case there are
// various "builtin" rules that might or might not trigger.
ambiguous: bool,
}
#[derive(PartialEq, Eq, Debug, Clone)]
struct EvaluatedCandidate<'tcx> {
candidate: SelectionCandidate<'tcx>,
evaluation: EvaluationResult,
}
/// When does the builtin impl for `T: Trait` apply?
enum BuiltinImplConditions<'tcx> {
/// The impl is conditional on T1,T2,.. : Trait
Where(ty::Binder<Vec<Ty<'tcx>>>),
/// There is no built-in impl. There may be some other
/// candidate (a where-clause or user-defined impl).
None,
/// It is unknown whether there is an impl.
Ambiguous,
}
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
/// The result of trait evaluation. The order is important
/// here as the evaluation of a list is the maximum of the
/// evaluations.
///
/// The evaluation results are ordered:
/// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions`
/// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown`
/// - `EvaluatedToErr` implies `EvaluatedToRecur`
/// - the "union" of evaluation results is equal to their maximum -
/// all the "potential success" candidates can potentially succeed,
/// so they are noops when unioned with a definite error, and within
/// the categories it's easy to see that the unions are correct.
pub enum EvaluationResult {
/// Evaluation successful
EvaluatedToOk,
/// Evaluation successful, but there were unevaluated region obligations
EvaluatedToOkModuloRegions,
/// Evaluation is known to be ambiguous - it *might* hold for some
/// assignment of inference variables, but it might not.
///
/// While this has the same meaning as `EvaluatedToUnknown` - we can't
/// know whether this obligation holds or not - it is the result we
/// would get with an empty stack, and therefore is cacheable.
EvaluatedToAmbig,
/// Evaluation failed because of recursion involving inference
/// variables. We are somewhat imprecise there, so we don't actually
/// know the real result.
///
/// This can't be trivially cached for the same reason as `EvaluatedToRecur`.
EvaluatedToUnknown,
/// Evaluation failed because we encountered an obligation we are already
/// trying to prove on this branch.
///
/// We know this branch can't be a part of a minimal proof-tree for
/// the "root" of our cycle, because then we could cut out the recursion
/// and maintain a valid proof tree. However, this does not mean
/// that all the obligations on this branch do not hold - it's possible
/// that we entered this branch "speculatively", and that there
/// might be some other way to prove this obligation that does not
/// go through this cycle - so we can't cache this as a failure.
///
/// For example, suppose we have this:
///
/// ```rust,ignore (pseudo-Rust)
/// pub trait Trait { fn xyz(); }
/// // This impl is "useless", but we can still have
/// // an `impl Trait for SomeUnsizedType` somewhere.
/// impl<T: Trait + Sized> Trait for T { fn xyz() {} }
///
/// pub fn foo<T: Trait + ?Sized>() {
/// <T as Trait>::xyz();
/// }
/// ```
///
/// When checking `foo`, we have to prove `T: Trait`. This basically
/// translates into this:
///
/// ```plain,ignore
/// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait
/// ```
///
/// When we try to prove it, we first go the first option, which
/// recurses. This shows us that the impl is "useless" -- it won't
/// tell us that `T: Trait` unless it already implemented `Trait`
/// by some other means. However, that does not prevent `T: Trait`
/// does not hold, because of the bound (which can indeed be satisfied
/// by `SomeUnsizedType` from another crate).
//
// FIXME: when an `EvaluatedToRecur` goes past its parent root, we
// ought to convert it to an `EvaluatedToErr`, because we know
// there definitely isn't a proof tree for that obligation. Not
// doing so is still sound -- there isn't any proof tree, so the
// branch still can't be a part of a minimal one -- but does not re-enable caching.
EvaluatedToRecur,
/// Evaluation failed.
EvaluatedToErr,
}
impl EvaluationResult {
/// Returns `true` if this evaluation result is known to apply, even
/// considering outlives constraints.
pub fn must_apply_considering_regions(self) -> bool {
self == EvaluatedToOk
}
/// Returns `true` if this evaluation result is known to apply, ignoring
/// outlives constraints.
pub fn must_apply_modulo_regions(self) -> bool {
self <= EvaluatedToOkModuloRegions
}
pub fn may_apply(self) -> bool {
match self {
EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToUnknown => {
true
}
EvaluatedToErr | EvaluatedToRecur => false,
}
}
fn is_stack_dependent(self) -> bool {
match self {
EvaluatedToUnknown | EvaluatedToRecur => true,
EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToErr => false,
}
}
}
impl_stable_hash_for!(enum self::EvaluationResult {
EvaluatedToOk,
EvaluatedToOkModuloRegions,
EvaluatedToAmbig,
EvaluatedToUnknown,
EvaluatedToRecur,
EvaluatedToErr
});
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
/// Indicates that trait evaluation caused overflow.
pub struct OverflowError;
impl_stable_hash_for!(struct OverflowError {});
impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
fn from(OverflowError: OverflowError) -> SelectionError<'tcx> {
SelectionError::Overflow
}
}
#[derive(Clone, Default)]
pub struct EvaluationCache<'tcx> {
hashmap: Lock<FxHashMap<ty::PolyTraitRef<'tcx>, WithDepNode<EvaluationResult>>>,
}
impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: None,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
pub fn intercrate(
infcx: &'cx InferCtxt<'cx, 'tcx>,
mode: IntercrateMode,
) -> SelectionContext<'cx, 'tcx> {
debug!("intercrate({:?})", mode);
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: Some(mode),
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
pub fn with_negative(
infcx: &'cx InferCtxt<'cx, 'tcx>,
allow_negative_impls: bool,
) -> SelectionContext<'cx, 'tcx> {
debug!("with_negative({:?})", allow_negative_impls);
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: None,
intercrate_ambiguity_causes: None,
allow_negative_impls,
query_mode: TraitQueryMode::Standard,
}
}
pub fn with_query_mode(
infcx: &'cx InferCtxt<'cx, 'tcx>,
query_mode: TraitQueryMode,
) -> SelectionContext<'cx, 'tcx> {
debug!("with_query_mode({:?})", query_mode);
SelectionContext {
infcx,
freshener: infcx.freshener(),
intercrate: None,
intercrate_ambiguity_causes: None,
allow_negative_impls: false,
query_mode,
}
}
/// Enables tracking of intercrate ambiguity causes. These are
/// used in coherence to give improved diagnostics. We don't do
/// this until we detect a coherence error because it can lead to
/// false overflow results (#47139) and because it costs
/// computation time.
pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
assert!(self.intercrate.is_some());
assert!(self.intercrate_ambiguity_causes.is_none());
self.intercrate_ambiguity_causes = Some(vec![]);
debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
}
/// Gets the intercrate ambiguity causes collected since tracking
/// was enabled and disables tracking at the same time. If
/// tracking is not enabled, just returns an empty vector.
pub fn take_intercrate_ambiguity_causes(&mut self) -> Vec<IntercrateAmbiguityCause> {
assert!(self.intercrate.is_some());
self.intercrate_ambiguity_causes.take().unwrap_or(vec![])
}
pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
self.infcx
}
pub fn tcx(&self) -> TyCtxt<'tcx> {
self.infcx.tcx
}
pub fn closure_typer(&self) -> &'cx InferCtxt<'cx, 'tcx> {
self.infcx
}
///////////////////////////////////////////////////////////////////////////
// Selection
//
// The selection phase tries to identify *how* an obligation will
// be resolved. For example, it will identify which impl or
// parameter bound is to be used. The process can be inconclusive
// if the self type in the obligation is not fully inferred. Selection
// can result in an error in one of two ways:
//
// 1. If no applicable impl or parameter bound can be found.
// 2. If the output type parameters in the obligation do not match
// those specified by the impl/bound. For example, if the obligation
// is `Vec<Foo>:Iterable<Bar>`, but the impl specifies
// `impl<T> Iterable<T> for Vec<T>`, than an error would result.
/// Attempts to satisfy the obligation. If successful, this will affect the surrounding
/// type environment by performing unification.
pub fn select(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, Selection<'tcx>> {
debug!("select({:?})", obligation);
debug_assert!(!obligation.predicate.has_escaping_bound_vars());
let pec = &ProvisionalEvaluationCache::default();
let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
let candidate = match self.candidate_from_obligation(&stack) {
Err(SelectionError::Overflow) => {
// In standard mode, overflow must have been caught and reported
// earlier.
assert!(self.query_mode == TraitQueryMode::Canonical);
return Err(SelectionError::Overflow);
}
Err(e) => {
return Err(e);
}
Ok(None) => {
return Ok(None);
}
Ok(Some(candidate)) => candidate,
};
match self.confirm_candidate(obligation, candidate) {
Err(SelectionError::Overflow) => {
assert!(self.query_mode == TraitQueryMode::Canonical);
Err(SelectionError::Overflow)
}
Err(e) => Err(e),
Ok(candidate) => Ok(Some(candidate)),
}
}
///////////////////////////////////////////////////////////////////////////
// EVALUATION
//
// Tests whether an obligation can be selected or whether an impl
// can be applied to particular types. It skips the "confirmation"
// step and hence completely ignores output type parameters.
//
// The result is "true" if the obligation *may* hold and "false" if
// we can be sure it does not.
/// Evaluates whether the obligation `obligation` can be satisfied (by any means).
pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
debug!("predicate_may_hold_fatal({:?})", obligation);
// This fatal query is a stopgap that should only be used in standard mode,
// where we do not expect overflow to be propagated.
assert!(self.query_mode == TraitQueryMode::Standard);
self.evaluate_root_obligation(obligation)
.expect("Overflow should be caught earlier in standard query mode")
.may_apply()
}
/// Evaluates whether the obligation `obligation` can be satisfied
/// and returns an `EvaluationResult`. This is meant for the
/// *initial* call.
pub fn evaluate_root_obligation(
&mut self,
obligation: &PredicateObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
self.evaluation_probe(|this| {
this.evaluate_predicate_recursively(
TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
obligation.clone(),
)
})
}
fn evaluation_probe(
&mut self,
op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
) -> Result<EvaluationResult, OverflowError> {
self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
let result = op(self)?;
match self.infcx.region_constraints_added_in_snapshot(snapshot) {
None => Ok(result),
Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)),
}
})
}
/// Evaluates the predicates in `predicates` recursively. Note that
/// this applies projections in the predicates, and therefore
/// is run within an inference probe.
fn evaluate_predicates_recursively<'o, I>(
&mut self,
stack: TraitObligationStackList<'o, 'tcx>,
predicates: I,
) -> Result<EvaluationResult, OverflowError>
where
I: IntoIterator<Item = PredicateObligation<'tcx>>,
{
let mut result = EvaluatedToOk;
for obligation in predicates {
let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
debug!(
"evaluate_predicate_recursively({:?}) = {:?}",
obligation, eval
);
if let EvaluatedToErr = eval {
// fast-path - EvaluatedToErr is the top of the lattice,
// so we don't need to look on the other predicates.
return Ok(EvaluatedToErr);
} else {
result = cmp::max(result, eval);
}
}
Ok(result)
}
fn evaluate_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
obligation: PredicateObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
debug!("evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})",
previous_stack.head(), obligation);
// Previous_stack stores a TraitObligatiom, while 'obligation' is
// a PredicateObligation. These are distinct types, so we can't
// use any Option combinator method that would force them to be
// the same
match previous_stack.head() {
Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
None => self.check_recursion_limit(&obligation, &obligation)?
}
match obligation.predicate {
ty::Predicate::Trait(ref t) => {
debug_assert!(!t.has_escaping_bound_vars());
let obligation = obligation.with(t.clone());
self.evaluate_trait_predicate_recursively(previous_stack, obligation)
}
ty::Predicate::Subtype(ref p) => {
// does this code ever run?
match self.infcx
.subtype_predicate(&obligation.cause, obligation.param_env, p)
{
Some(Ok(InferOk { mut obligations, .. })) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(previous_stack,obligations.into_iter())
}
Some(Err(_)) => Ok(EvaluatedToErr),
None => Ok(EvaluatedToAmbig),
}
}
ty::Predicate::WellFormed(ty) => match ty::wf::obligations(
self.infcx,
obligation.param_env,
obligation.cause.body_id,
ty,
obligation.cause.span,
) {
Some(mut obligations) => {
self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
self.evaluate_predicates_recursively(previous_stack, obligations.into_iter())
}
None => Ok(EvaluatedToAmbig),
},
ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => {
// we do not consider region relationships when
// evaluating trait matches
Ok(EvaluatedToOkModuloRegions)
}
ty::Predicate::ObjectSafe(trait_def_id) => {
if self.tcx().is_object_safe(trait_def_id) {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToErr)
}
}
ty::Predicate::Projection(ref data) => {
let project_obligation = obligation.with(data.clone());
match project::poly_project_and_unify_type(self, &project_obligation) {
Ok(Some(mut subobligations)) => {
self.add_depth(subobligations.iter_mut(), obligation.recursion_depth);
let result = self.evaluate_predicates_recursively(
previous_stack,
subobligations.into_iter(),
);
if let Some(key) =
ProjectionCacheKey::from_poly_projection_predicate(self, data)
{
self.infcx.projection_cache.borrow_mut().complete(key);
}
result
}
Ok(None) => Ok(EvaluatedToAmbig),
Err(_) => Ok(EvaluatedToErr),
}
}
ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => {
match self.infcx.closure_kind(closure_def_id, closure_substs) {
Some(closure_kind) => {
if closure_kind.extends(kind) {
Ok(EvaluatedToOk)
} else {
Ok(EvaluatedToErr)
}
}
None => Ok(EvaluatedToAmbig),
}
}
ty::Predicate::ConstEvaluatable(def_id, substs) => {
let tcx = self.tcx();
if !(obligation.param_env, substs).has_local_value() {
let param_env = obligation.param_env;
let instance =
ty::Instance::resolve(tcx, param_env, def_id, substs);
if let Some(instance) = instance {
let cid = GlobalId {
instance,
promoted: None,
};
match self.tcx().const_eval(param_env.and(cid)) {
Ok(_) => Ok(EvaluatedToOk),
Err(_) => Ok(EvaluatedToErr),
}
} else {
Ok(EvaluatedToErr)
}
} else {
// Inference variables still left in param_env or substs.
Ok(EvaluatedToAmbig)
}
}
}
}
fn evaluate_trait_predicate_recursively<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
mut obligation: TraitObligation<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
debug!("evaluate_trait_predicate_recursively({:?})", obligation);
if self.intercrate.is_none() && obligation.is_global()
&& obligation
.param_env
.caller_bounds
.iter()
.all(|bound| bound.needs_subst())
{
// If a param env has no global bounds, global obligations do not
// depend on its particular value in order to work, so we can clear
// out the param env and get better caching.
debug!(
"evaluate_trait_predicate_recursively({:?}) - in global",
obligation
);
obligation.param_env = obligation.param_env.without_caller_bounds();
}
let stack = self.push_stack(previous_stack, &obligation);
let fresh_trait_ref = stack.fresh_trait_ref;
if let Some(result) = self.check_evaluation_cache(obligation.param_env, fresh_trait_ref) {
debug!("CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
return Ok(result);
}
if let Some(result) = stack.cache().get_provisional(fresh_trait_ref) {
debug!("PROVISIONAL CACHE HIT: EVAL({:?})={:?}", fresh_trait_ref, result);
stack.update_reached_depth(stack.cache().current_reached_depth());
return Ok(result);
}
// Check if this is a match for something already on the
// stack. If so, we don't want to insert the result into the
// main cache (it is cycle dependent) nor the provisional
// cache (which is meant for things that have completed but
// for a "backedge" -- this result *is* the backedge).
if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
return Ok(cycle_result);
}
let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
let result = result?;
if !result.must_apply_modulo_regions() {
stack.cache().on_failure(stack.dfn);
}
let reached_depth = stack.reached_depth.get();
if reached_depth >= stack.depth {
debug!("CACHE MISS: EVAL({:?})={:?}", fresh_trait_ref, result);
self.insert_evaluation_cache(obligation.param_env, fresh_trait_ref, dep_node, result);
stack.cache().on_completion(stack.depth, |fresh_trait_ref, provisional_result| {
self.insert_evaluation_cache(
obligation.param_env,
fresh_trait_ref,
dep_node,
provisional_result.max(result),
);
});
} else {
debug!("PROVISIONAL: {:?}={:?}", fresh_trait_ref, result);
debug!(
"evaluate_trait_predicate_recursively: caching provisionally because {:?} \
is a cycle participant (at depth {}, reached depth {})",
fresh_trait_ref,
stack.depth,
reached_depth,
);
stack.cache().insert_provisional(
stack.dfn,
reached_depth,
fresh_trait_ref,
result,
);
}
Ok(result)
}
/// If there is any previous entry on the stack that precisely
/// matches this obligation, then we can assume that the
/// obligation is satisfied for now (still all other conditions
/// must be met of course). One obvious case this comes up is
/// marker traits like `Send`. Think of a linked list:
///
/// struct List<T> { data: T, next: Option<Box<List<T>>> }
///
/// `Box<List<T>>` will be `Send` if `T` is `Send` and
/// `Option<Box<List<T>>>` is `Send`, and in turn
/// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
/// `Send`.
///
/// Note that we do this comparison using the `fresh_trait_ref`
/// fields. Because these have all been freshened using
/// `self.freshener`, we can be sure that (a) this will not
/// affect the inferencer state and (b) that if we see two
/// fresh regions with the same index, they refer to the same
/// unbound type variable.
fn check_evaluation_cycle(
&mut self,
stack: &TraitObligationStack<'_, 'tcx>,
) -> Option<EvaluationResult> {
if let Some(cycle_depth) = stack.iter()
.skip(1) // skip top-most frame
.find(|prev| stack.obligation.param_env == prev.obligation.param_env &&
stack.fresh_trait_ref == prev.fresh_trait_ref)
.map(|stack| stack.depth)
{
debug!(
"evaluate_stack({:?}) --> recursive at depth {}",
stack.fresh_trait_ref,
cycle_depth,
);
// If we have a stack like `A B C D E A`, where the top of
// the stack is the final `A`, then this will iterate over
// `A, E, D, C, B` -- i.e., all the participants apart
// from the cycle head. We mark them as participating in a
// cycle. This suppresses caching for those nodes. See
// `in_cycle` field for more details.
stack.update_reached_depth(cycle_depth);
// Subtle: when checking for a coinductive cycle, we do
// not compare using the "freshened trait refs" (which
// have erased regions) but rather the fully explicit
// trait refs. This is important because it's only a cycle
// if the regions match exactly.
let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate));
if self.coinductive_match(cycle) {
debug!(
"evaluate_stack({:?}) --> recursive, coinductive",
stack.fresh_trait_ref
);
Some(EvaluatedToOk)
} else {
debug!(
"evaluate_stack({:?}) --> recursive, inductive",
stack.fresh_trait_ref
);
Some(EvaluatedToRecur)
}
} else {
None
}
}
fn evaluate_stack<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> Result<EvaluationResult, OverflowError> {
// In intercrate mode, whenever any of the types are unbound,
// there can always be an impl. Even if there are no impls in
// this crate, perhaps the type would be unified with
// something from another crate that does provide an impl.
//
// In intra mode, we must still be conservative. The reason is
// that we want to avoid cycles. Imagine an impl like:
//
// impl<T:Eq> Eq for Vec<T>
//
// and a trait reference like `$0 : Eq` where `$0` is an
// unbound variable. When we evaluate this trait-reference, we
// will unify `$0` with `Vec<$1>` (for some fresh variable
// `$1`), on the condition that `$1 : Eq`. We will then wind
// up with many candidates (since that are other `Eq` impls
// that apply) and try to winnow things down. This results in
// a recursive evaluation that `$1 : Eq` -- as you can
// imagine, this is just where we started. To avoid that, we
// check for unbound variables and return an ambiguous (hence possible)
// match if we've seen this trait before.
//
// This suffices to allow chains like `FnMut` implemented in
// terms of `Fn` etc, but we could probably make this more
// precise still.
let unbound_input_types = stack
.fresh_trait_ref
.skip_binder()
.input_types()
.any(|ty| ty.is_fresh());
// this check was an imperfect workaround for a bug n the old
// intercrate mode, it should be removed when that goes away.
if unbound_input_types && self.intercrate == Some(IntercrateMode::Issue43355) {
debug!(
"evaluate_stack({:?}) --> unbound argument, intercrate --> ambiguous",
stack.fresh_trait_ref
);
// Heuristics: show the diagnostics when there are no candidates in crate.
if self.intercrate_ambiguity_causes.is_some() {
debug!("evaluate_stack: intercrate_ambiguity_causes is some");
if let Ok(candidate_set) = self.assemble_candidates(stack) {
if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
let self_ty = trait_ref.self_ty();
let cause = IntercrateAmbiguityCause::DownstreamCrate {
trait_desc: trait_ref.to_string(),
self_desc: if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
},
};
debug!("evaluate_stack: pushing cause = {:?}", cause);
self.intercrate_ambiguity_causes
.as_mut()
.unwrap()
.push(cause);
}
}
}
return Ok(EvaluatedToAmbig);
}
if unbound_input_types && stack.iter().skip(1).any(|prev| {
stack.obligation.param_env == prev.obligation.param_env
&& self.match_fresh_trait_refs(
&stack.fresh_trait_ref, &prev.fresh_trait_ref, prev.obligation.param_env)
}) {
debug!(
"evaluate_stack({:?}) --> unbound argument, recursive --> giving up",
stack.fresh_trait_ref
);
return Ok(EvaluatedToUnknown);
}
match self.candidate_from_obligation(stack) {
Ok(Some(c)) => self.evaluate_candidate(stack, &c),
Ok(None) => Ok(EvaluatedToAmbig),
Err(Overflow) => Err(OverflowError),
Err(..) => Ok(EvaluatedToErr),
}
}
/// For defaulted traits, we use a co-inductive strategy to solve, so
/// that recursion is ok. This routine returns true if the top of the
/// stack (`cycle[0]`):
///
/// - is a defaulted trait,
/// - it also appears in the backtrace at some position `X`,
/// - all the predicates at positions `X..` between `X` and the top are
/// also defaulted traits.
pub fn coinductive_match<I>(&mut self, cycle: I) -> bool
where
I: Iterator<Item = ty::Predicate<'tcx>>,
{
let mut cycle = cycle;
cycle.all(|predicate| self.coinductive_predicate(predicate))
}
fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
let result = match predicate {
ty::Predicate::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()),
_ => false,
};
debug!("coinductive_predicate({:?}) = {:?}", predicate, result);
result
}
/// Further evaluate `candidate` to decide whether all type parameters match and whether nested
/// obligations are met. Returns whether `candidate` remains viable after this further
/// scrutiny.
fn evaluate_candidate<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
candidate: &SelectionCandidate<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
debug!(
"evaluate_candidate: depth={} candidate={:?}",
stack.obligation.recursion_depth, candidate
);
let result = self.evaluation_probe(|this| {
let candidate = (*candidate).clone();
match this.confirm_candidate(stack.obligation, candidate) {
Ok(selection) => this.evaluate_predicates_recursively(
stack.list(),
selection.nested_obligations().into_iter()
),
Err(..) => Ok(EvaluatedToErr),
}
})?;
debug!(
"evaluate_candidate: depth={} result={:?}",
stack.obligation.recursion_depth, result
);
Ok(result)
}
fn check_evaluation_cache(
&self,
param_env: ty::ParamEnv<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
) -> Option<EvaluationResult> {
let tcx = self.tcx();
if self.can_use_global_caches(param_env) {
let cache = tcx.evaluation_cache.hashmap.borrow();
if let Some(cached) = cache.get(&trait_ref) {
return Some(cached.get(tcx));
}
}
self.infcx
.evaluation_cache
.hashmap
.borrow()
.get(&trait_ref)
.map(|v| v.get(tcx))
}
fn insert_evaluation_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
trait_ref: ty::PolyTraitRef<'tcx>,
dep_node: DepNodeIndex,
result: EvaluationResult,
) {
// Avoid caching results that depend on more than just the trait-ref
// - the stack can create recursion.
if result.is_stack_dependent() {
return;
}
if self.can_use_global_caches(param_env) {
if !trait_ref.has_local_value() {
debug!(
"insert_evaluation_cache(trait_ref={:?}, candidate={:?}) global",
trait_ref, result,
);
// This may overwrite the cache with the same value
// FIXME: Due to #50507 this overwrites the different values
// This should be changed to use HashMapExt::insert_same
// when that is fixed
self.tcx()
.evaluation_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, result));
return;
}
}
debug!(
"insert_evaluation_cache(trait_ref={:?}, candidate={:?})",
trait_ref, result,
);
self.infcx
.evaluation_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, result));
}
// For various reasons, it's possible for a subobligation
// to have a *lower* recursion_depth than the obligation used to create it.
// Projection sub-obligations may be returned from the projection cache,
// which results in obligations with an 'old' recursion_depth.
// Additionally, methods like ty::wf::obligations and
// InferCtxt.subtype_predicate produce subobligations without
// taking in a 'parent' depth, causing the generated subobligations
// to have a recursion_depth of 0
//
// To ensure that obligation_depth never decreasees, we force all subobligations
// to have at least the depth of the original obligation.
fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(&self, it: I,
min_depth: usize) {
it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
}
// Check that the recursion limit has not been exceeded.
//
// The weird return type of this function allows it to be used with the 'try' (?)
// operator within certain functions
fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
&self,
obligation: &Obligation<'tcx, T>,
error_obligation: &Obligation<'tcx, V>
) -> Result<(), OverflowError> {
let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get();
if obligation.recursion_depth >= recursion_limit {
match self.query_mode {
TraitQueryMode::Standard => {
self.infcx().report_overflow_error(error_obligation, true);
}
TraitQueryMode::Canonical => {
return Err(OverflowError);
}
}
}
Ok(())
}
///////////////////////////////////////////////////////////////////////////
// CANDIDATE ASSEMBLY
//
// The selection process begins by examining all in-scope impls,
// caller obligations, and so forth and assembling a list of
// candidates. See the [rustc guide] for more details.
//
// [rustc guide]:
// https://rust-lang.github.io/rustc-guide/traits/resolution.html#candidate-assembly
fn candidate_from_obligation<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
// Watch out for overflow. This intentionally bypasses (and does
// not update) the cache.
self.check_recursion_limit(&stack.obligation, &stack.obligation)?;
// Check the cache. Note that we freshen the trait-ref
// separately rather than using `stack.fresh_trait_ref` --
// this is because we want the unbound variables to be
// replaced with fresh types starting from index 0.
let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate.clone());
debug!(
"candidate_from_obligation(cache_fresh_trait_pred={:?}, obligation={:?})",
cache_fresh_trait_pred, stack
);
debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
if let Some(c) =
self.check_candidate_cache(stack.obligation.param_env, &cache_fresh_trait_pred)
{
debug!("CACHE HIT: SELECT({:?})={:?}", cache_fresh_trait_pred, c);
return c;
}
// If no match, compute result and insert into cache.
//
// FIXME(nikomatsakis) -- this cache is not taking into
// account cycles that may have occurred in forming the
// candidate. I don't know of any specific problems that
// result but it seems awfully suspicious.
let (candidate, dep_node) =
self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
debug!(
"CACHE MISS: SELECT({:?})={:?}",
cache_fresh_trait_pred, candidate
);
self.insert_candidate_cache(
stack.obligation.param_env,
cache_fresh_trait_pred,
dep_node,
candidate.clone(),
);
candidate
}
fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
where
OP: FnOnce(&mut Self) -> R,
{
let (result, dep_node) = self.tcx()
.dep_graph
.with_anon_task(DepKind::TraitSelect, || op(self));
self.tcx().dep_graph.read_index(dep_node);
(result, dep_node)
}
// Treat negative impls as unimplemented, and reservation impls as ambiguity.
fn filter_negative_and_reservation_impls(
&mut self,
candidate: SelectionCandidate<'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
if let ImplCandidate(def_id) = candidate {
let tcx = self.tcx();
match tcx.impl_polarity(def_id) {
ty::ImplPolarity::Negative if !self.allow_negative_impls => {
return Err(Unimplemented);
}
ty::ImplPolarity::Reservation => {
if let Some(intercrate_ambiguity_clauses)
= &mut self.intercrate_ambiguity_causes
{
let attrs = tcx.get_attrs(def_id);
let attr = attr::find_by_name(&attrs, sym::rustc_reservation_impl);
let value = attr.and_then(|a| a.value_str());
if let Some(value) = value {
debug!("filter_negative_and_reservation_impls: \
reservation impl ambiguity on {:?}", def_id);
intercrate_ambiguity_clauses.push(
IntercrateAmbiguityCause::ReservationImpl {
message: value.to_string()
}
);
}
}
return Ok(None);
}
_ => {}
};
}
Ok(Some(candidate))
}
fn candidate_from_obligation_no_cache<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
if stack.obligation.predicate.references_error() {
// If we encounter a `Error`, we generally prefer the
// most "optimistic" result in response -- that is, the
// one least likely to report downstream errors. But
// because this routine is shared by coherence and by
// trait selection, there isn't an obvious "right" choice
// here in that respect, so we opt to just return
// ambiguity and let the upstream clients sort it out.
return Ok(None);
}
if let Some(conflict) = self.is_knowable(stack) {
debug!("coherence stage: not knowable");
if self.intercrate_ambiguity_causes.is_some() {
debug!("evaluate_stack: intercrate_ambiguity_causes is some");
// Heuristics: show the diagnostics when there are no candidates in crate.
if let Ok(candidate_set) = self.assemble_candidates(stack) {
let mut no_candidates_apply = true;
{
let evaluated_candidates = candidate_set
.vec
.iter()
.map(|c| self.evaluate_candidate(stack, &c));
for ec in evaluated_candidates {
match ec {
Ok(c) => {
if c.may_apply() {
no_candidates_apply = false;
break;
}
}
Err(e) => return Err(e.into()),
}
}
}
if !candidate_set.ambiguous && no_candidates_apply {
let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
let self_ty = trait_ref.self_ty();
let trait_desc = trait_ref.to_string();
let self_desc = if self_ty.has_concrete_skeleton() {
Some(self_ty.to_string())
} else {
None
};
let cause = if let Conflict::Upstream = conflict {
IntercrateAmbiguityCause::UpstreamCrateUpdate {
trait_desc,
self_desc,
}
} else {
IntercrateAmbiguityCause::DownstreamCrate {
trait_desc,
self_desc,
}
};
debug!("evaluate_stack: pushing cause = {:?}", cause);
self.intercrate_ambiguity_causes
.as_mut()
.unwrap()
.push(cause);
}
}
}
return Ok(None);
}
let candidate_set = self.assemble_candidates(stack)?;
if candidate_set.ambiguous {
debug!("candidate set contains ambig");
return Ok(None);
}
let mut candidates = candidate_set.vec;
debug!(
"assembled {} candidates for {:?}: {:?}",
candidates.len(),
stack,
candidates
);
// At this point, we know that each of the entries in the
// candidate set is *individually* applicable. Now we have to
// figure out if they contain mutual incompatibilities. This
// frequently arises if we have an unconstrained input type --
// for example, we are looking for $0:Eq where $0 is some
// unconstrained type variable. In that case, we'll get a
// candidate which assumes $0 == int, one that assumes $0 ==
// usize, etc. This spells an ambiguity.
// If there is more than one candidate, first winnow them down
// by considering extra conditions (nested obligations and so
// forth). We don't winnow if there is exactly one
// candidate. This is a relatively minor distinction but it
// can lead to better inference and error-reporting. An
// example would be if there was an impl:
//
// impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
//
// and we were to see some code `foo.push_clone()` where `boo`
// is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
// we were to winnow, we'd wind up with zero candidates.
// Instead, we select the right impl now but report `Bar does
// not implement Clone`.
if candidates.len() == 1 {
return self.filter_negative_and_reservation_impls(candidates.pop().unwrap());
}
// Winnow, but record the exact outcome of evaluation, which
// is needed for specialization. Propagate overflow if it occurs.
let mut candidates = candidates
.into_iter()
.map(|c| match self.evaluate_candidate(stack, &c) {
Ok(eval) if eval.may_apply() => Ok(Some(EvaluatedCandidate {
candidate: c,
evaluation: eval,
})),
Ok(_) => Ok(None),
Err(OverflowError) => Err(Overflow),
})
.flat_map(Result::transpose)
.collect::<Result<Vec<_>, _>>()?;
debug!(
"winnowed to {} candidates for {:?}: {:?}",
candidates.len(),
stack,
candidates
);
// If there are STILL multiple candidates, we can further
// reduce the list by dropping duplicates -- including
// resolving specializations.
if candidates.len() > 1 {
let mut i = 0;
while i < candidates.len() {
let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| {
self.candidate_should_be_dropped_in_favor_of(&candidates[i], &candidates[j])
});
if is_dup {
debug!(
"Dropping candidate #{}/{}: {:?}",
i,
candidates.len(),
candidates[i]
);
candidates.swap_remove(i);
} else {
debug!(
"Retaining candidate #{}/{}: {:?}",
i,
candidates.len(),
candidates[i]
);
i += 1;
// If there are *STILL* multiple candidates, give up
// and report ambiguity.
if i > 1 {
debug!("multiple matches, ambig");
return Ok(None);
}
}
}
}
// If there are *NO* candidates, then there are no impls --
// that we know of, anyway. Note that in the case where there
// are unbound type variables within the obligation, it might
// be the case that you could still satisfy the obligation
// from another crate by instantiating the type variables with
// a type from another crate that does have an impl. This case
// is checked for in `evaluate_stack` (and hence users
// who might care about this case, like coherence, should use
// that function).
if candidates.is_empty() {
return Err(Unimplemented);
}
// Just one candidate left.
self.filter_negative_and_reservation_impls(candidates.pop().unwrap().candidate)
}
fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
debug!("is_knowable(intercrate={:?})", self.intercrate);
if !self.intercrate.is_some() {
return None;
}
let obligation = &stack.obligation;
let predicate = self.infcx()
.resolve_vars_if_possible(&obligation.predicate);
// Okay to skip binder because of the nature of the
// trait-ref-is-knowable check, which does not care about
// bound regions.
let trait_ref = predicate.skip_binder().trait_ref;
let result = coherence::trait_ref_is_knowable(self.tcx(), trait_ref);
if let (
Some(Conflict::Downstream {
used_to_be_broken: true,
}),
Some(IntercrateMode::Issue43355),
) = (result, self.intercrate)
{
debug!("is_knowable: IGNORING conflict to be bug-compatible with #43355");
None
} else {
result
}
}
/// Returns `true` if the global caches can be used.
/// Do note that if the type itself is not in the
/// global tcx, the local caches will be used.
fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
// If there are any where-clauses in scope, then we always use
// a cache local to this particular scope. Otherwise, we
// switch to a global cache. We used to try and draw
// finer-grained distinctions, but that led to a serious of
// annoying and weird bugs like #22019 and #18290. This simple
// rule seems to be pretty clearly safe and also still retains
// a very high hit rate (~95% when compiling rustc).
if !param_env.caller_bounds.is_empty() {
return false;
}
// Avoid using the master cache during coherence and just rely
// on the local cache. This effectively disables caching
// during coherence. It is really just a simplification to
// avoid us having to fear that coherence results "pollute"
// the master cache. Since coherence executes pretty quickly,
// it's not worth going to more trouble to increase the
// hit-rate I don't think.
if self.intercrate.is_some() {
return false;
}
// Otherwise, we can use the global cache.
true
}
fn check_candidate_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: &ty::PolyTraitPredicate<'tcx>,
) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
let tcx = self.tcx();
let trait_ref = &cache_fresh_trait_pred.skip_binder().trait_ref;
if self.can_use_global_caches(param_env) {
let cache = tcx.selection_cache.hashmap.borrow();
if let Some(cached) = cache.get(&trait_ref) {
return Some(cached.get(tcx));
}
}
self.infcx
.selection_cache
.hashmap
.borrow()
.get(trait_ref)
.map(|v| v.get(tcx))
}
/// Determines whether can we safely cache the result
/// of selecting an obligation. This is almost always 'true',
/// except when dealing with certain ParamCandidates.
///
/// Ordinarily, a ParamCandidate will contain no inference variables,
/// since it was usually produced directly from a DefId. However,
/// certain cases (currently only librustdoc's blanket impl finder),
/// a ParamEnv may be explicitly constructed with inference types.
/// When this is the case, we do *not* want to cache the resulting selection
/// candidate. This is due to the fact that it might not always be possible
/// to equate the obligation's trait ref and the candidate's trait ref,
/// if more constraints end up getting added to an inference variable.
///
/// Because of this, we always want to re-run the full selection
/// process for our obligation the next time we see it, since
/// we might end up picking a different SelectionCandidate (or none at all)
fn can_cache_candidate(&self,
result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>
) -> bool {
match result {
Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => {
!trait_ref.skip_binder().input_types().any(|t| t.walk().any(|t_| t_.is_ty_infer()))
},
_ => true
}
}
fn insert_candidate_cache(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
dep_node: DepNodeIndex,
candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
) {
let tcx = self.tcx();
let trait_ref = cache_fresh_trait_pred.skip_binder().trait_ref;
if !self.can_cache_candidate(&candidate) {
debug!("insert_candidate_cache(trait_ref={:?}, candidate={:?} -\
candidate is not cacheable", trait_ref, candidate);
return;
}
if self.can_use_global_caches(param_env) {
if let Err(Overflow) = candidate {
// Don't cache overflow globally; we only produce this
// in certain modes.
} else if !trait_ref.has_local_value() {
if !candidate.has_local_value() {
debug!(
"insert_candidate_cache(trait_ref={:?}, candidate={:?}) global",
trait_ref, candidate,
);
// This may overwrite the cache with the same value
tcx.selection_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, candidate));
return;
}
}
}
debug!(
"insert_candidate_cache(trait_ref={:?}, candidate={:?}) local",
trait_ref, candidate,
);
self.infcx
.selection_cache
.hashmap
.borrow_mut()
.insert(trait_ref, WithDepNode::new(dep_node, candidate));
}
fn assemble_candidates<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> {
let TraitObligationStack { obligation, .. } = *stack;
let ref obligation = Obligation {
param_env: obligation.param_env,
cause: obligation.cause.clone(),
recursion_depth: obligation.recursion_depth,
predicate: self.infcx()
.resolve_vars_if_possible(&obligation.predicate),
};
if obligation.predicate.skip_binder().self_ty().is_ty_var() {
// Self is a type variable (e.g., `_: AsRef<str>`).
//
// This is somewhat problematic, as the current scheme can't really
// handle it turning to be a projection. This does end up as truly
// ambiguous in most cases anyway.
//
// Take the fast path out - this also improves
// performance by preventing assemble_candidates_from_impls from
// matching every impl for this trait.
return Ok(SelectionCandidateSet {
vec: vec![],
ambiguous: true,
});
}
let mut candidates = SelectionCandidateSet {
vec: Vec::new(),
ambiguous: false,
};
self.assemble_candidates_for_trait_alias(obligation, &mut candidates)?;
// Other bounds. Consider both in-scope bounds from fn decl
// and applicable impls. There is a certain set of precedence rules here.
let def_id = obligation.predicate.def_id();
let lang_items = self.tcx().lang_items();
if lang_items.copy_trait() == Some(def_id) {
debug!(
"obligation self ty is {:?}",
obligation.predicate.skip_binder().self_ty()
);
// User-defined copy impls are permitted, but only for
// structs and enums.
self.assemble_candidates_from_impls(obligation, &mut candidates)?;
// For other types, we'll use the builtin rules.
let copy_conditions = self.copy_clone_conditions(obligation);
self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates)?;
} else if lang_items.sized_trait() == Some(def_id) {
// Sized is never implementable by end-users, it is
// always automatically computed.
let sized_conditions = self.sized_conditions(obligation);
self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates)?;
} else if lang_items.unsize_trait() == Some(def_id) {
self.assemble_candidates_for_unsizing(obligation, &mut candidates);
} else {
if lang_items.clone_trait() == Some(def_id) {
// Same builtin conditions as `Copy`, i.e., every type which has builtin support
// for `Copy` also has builtin support for `Clone`, + tuples and arrays of `Clone`
// types have builtin support for `Clone`.
let clone_conditions = self.copy_clone_conditions(obligation);
self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates)?;
}
self.assemble_generator_candidates(obligation, &mut candidates)?;
self.assemble_closure_candidates(obligation, &mut candidates)?;
self.assemble_fn_pointer_candidates(obligation, &mut candidates)?;
self.assemble_candidates_from_impls(obligation, &mut candidates)?;
self.assemble_candidates_from_object_ty(obligation, &mut candidates);
}
self.assemble_candidates_from_projected_tys(obligation, &mut candidates);
self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?;
// Auto implementations have lower priority, so we only
// consider triggering a default if there is no other impl that can apply.
if candidates.vec.is_empty() {
self.assemble_candidates_from_auto_impls(obligation, &mut candidates)?;
}
debug!("candidate list size: {}", candidates.vec.len());
Ok(candidates)
}
fn assemble_candidates_from_projected_tys(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
debug!("assemble_candidates_for_projected_tys({:?})", obligation);
// before we go into the whole placeholder thing, just
// quickly check if the self-type is a projection at all.
match obligation.predicate.skip_binder().trait_ref.self_ty().kind {
ty::Projection(_) | ty::Opaque(..) => {}
ty::Infer(ty::TyVar(_)) => {
span_bug!(
obligation.cause.span,
"Self=_ should have been handled by assemble_candidates"
);
}
_ => return,
}
let result = self.infcx.probe(|snapshot| {
self.match_projection_obligation_against_definition_bounds(
obligation,
snapshot,
)
});
if result {
candidates.vec.push(ProjectionCandidate);
}
}
fn match_projection_obligation_against_definition_bounds(
&mut self,
obligation: &TraitObligation<'tcx>,
snapshot: &CombinedSnapshot<'_, 'tcx>,
) -> bool {
let poly_trait_predicate = self.infcx()
.resolve_vars_if_possible(&obligation.predicate);
let (placeholder_trait_predicate, placeholder_map) = self.infcx()
.replace_bound_vars_with_placeholders(&poly_trait_predicate);
debug!(
"match_projection_obligation_against_definition_bounds: \
placeholder_trait_predicate={:?}",
placeholder_trait_predicate,
);
let (def_id, substs) = match placeholder_trait_predicate.trait_ref.self_ty().kind {
ty::Projection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs),
ty::Opaque(def_id, substs) => (def_id, substs),
_ => {
span_bug!(
obligation.cause.span,
"match_projection_obligation_against_definition_bounds() called \
but self-ty is not a projection: {:?}",
placeholder_trait_predicate.trait_ref.self_ty()
);
}
};
debug!(
"match_projection_obligation_against_definition_bounds: \
def_id={:?}, substs={:?}",
def_id, substs
);
let predicates_of = self.tcx().predicates_of(def_id);
let bounds = predicates_of.instantiate(self.tcx(), substs);
debug!(
"match_projection_obligation_against_definition_bounds: \
bounds={:?}",
bounds
);
let elaborated_predicates = util::elaborate_predicates(self.tcx(), bounds.predicates);
let matching_bound = elaborated_predicates
.filter_to_traits()
.find(|bound| {
self.infcx.probe(|_| {
self.match_projection(
obligation,
bound.clone(),
placeholder_trait_predicate.trait_ref.clone(),
&placeholder_map,
snapshot,
)
})
});
debug!(
"match_projection_obligation_against_definition_bounds: \
matching_bound={:?}",
matching_bound
);
match matching_bound {
None => false,
Some(bound) => {
// Repeat the successful match, if any, this time outside of a probe.
let result = self.match_projection(
obligation,
bound,
placeholder_trait_predicate.trait_ref.clone(),
&placeholder_map,
snapshot,
);
assert!(result);
true
}
}
}
fn match_projection(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_bound: ty::PolyTraitRef<'tcx>,
placeholder_trait_ref: ty::TraitRef<'tcx>,
placeholder_map: &PlaceholderMap<'tcx>,
snapshot: &CombinedSnapshot<'_, 'tcx>,
) -> bool {
debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
.is_ok()
&&
self.infcx.leak_check(false, placeholder_map, snapshot).is_ok()
}
/// Given an obligation like `<SomeTrait for T>`, search the obligations that the caller
/// supplied to find out whether it is listed among them.
///
/// Never affects inference environment.
fn assemble_candidates_from_caller_bounds<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
debug!(
"assemble_candidates_from_caller_bounds({:?})",
stack.obligation
);
let all_bounds = stack
.obligation
.param_env
.caller_bounds
.iter()
.filter_map(|o| o.to_opt_poly_trait_ref());
// Micro-optimization: filter out predicates relating to different traits.
let matching_bounds =
all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id());
// Keep only those bounds which may apply, and propagate overflow if it occurs.
let mut param_candidates = vec![];
for bound in matching_bounds {
let wc = self.evaluate_where_clause(stack, bound.clone())?;
if wc.may_apply() {
param_candidates.push(ParamCandidate(bound));
}
}
candidates.vec.extend(param_candidates);
Ok(())
}
fn evaluate_where_clause<'o>(
&mut self,
stack: &TraitObligationStack<'o, 'tcx>,
where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<EvaluationResult, OverflowError> {
self.evaluation_probe(|this| {
match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
Ok(obligations) => {
this.evaluate_predicates_recursively(stack.list(), obligations.into_iter())
}
Err(()) => Ok(EvaluatedToErr),
}
})
}
fn assemble_generator_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) {
return Ok(());
}
// Okay to skip binder because the substs on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.kind {
ty::Generator(..) => {
debug!(
"assemble_generator_candidates: self_ty={:?} obligation={:?}",
self_ty, obligation
);
candidates.vec.push(GeneratorCandidate);
}
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_generator_candidates: ambiguous self-type");
candidates.ambiguous = true;
}
_ => {}
}
Ok(())
}
/// Checks for the artificial impl that the compiler will create for an obligation like `X :
/// FnMut<..>` where `X` is a closure type.
///
/// Note: the type parameters on a closure candidate are modeled as *output* type
/// parameters and hence do not affect whether this trait is a match or not. They will be
/// unified during the confirmation step.
fn assemble_closure_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
let kind = match self.tcx()
.lang_items()
.fn_trait_kind(obligation.predicate.def_id())
{
Some(k) => k,
None => {
return Ok(());
}
};
// Okay to skip binder because the substs on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters
match obligation.self_ty().skip_binder().kind {
ty::Closure(closure_def_id, closure_substs) => {
debug!(
"assemble_unboxed_candidates: kind={:?} obligation={:?}",
kind, obligation
);
match self.infcx.closure_kind(closure_def_id, closure_substs) {
Some(closure_kind) => {
debug!(
"assemble_unboxed_candidates: closure_kind = {:?}",
closure_kind
);
if closure_kind.extends(kind) {
candidates.vec.push(ClosureCandidate);
}
}
None => {
debug!("assemble_unboxed_candidates: closure_kind not yet known");
candidates.vec.push(ClosureCandidate);
}
}
}
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
candidates.ambiguous = true;
}
_ => {}
}
Ok(())
}
/// Implement one of the `Fn()` family for a fn pointer.
fn assemble_fn_pointer_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
// We provide impl of all fn traits for fn pointers.
if self.tcx()
.lang_items()
.fn_trait_kind(obligation.predicate.def_id())
.is_none()
{
return Ok(());
}
// Okay to skip binder because what we are inspecting doesn't involve bound regions
let self_ty = *obligation.self_ty().skip_binder();
match self_ty.kind {
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_fn_pointer_candidates: ambiguous self-type");
candidates.ambiguous = true; // could wind up being a fn() type
}
// provide an impl, but only for suitable `fn` pointers
ty::FnDef(..) | ty::FnPtr(_) => {
if let ty::FnSig {
unsafety: hir::Unsafety::Normal,
abi: Abi::Rust,
c_variadic: false,
..
} = self_ty.fn_sig(self.tcx()).skip_binder()
{
candidates.vec.push(FnPointerCandidate);
}
}
_ => {}
}
Ok(())
}
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_impls(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
debug!(
"assemble_candidates_from_impls(obligation={:?})",
obligation
);
self.tcx().for_each_relevant_impl(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().trait_ref.self_ty(),
|impl_def_id| {
self.infcx.probe(|snapshot| {
if let Ok(_substs) = self.match_impl(impl_def_id, obligation, snapshot)
{
candidates.vec.push(ImplCandidate(impl_def_id));
}
});
},
);
Ok(())
}
fn assemble_candidates_from_auto_impls(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
// Okay to skip binder here because the tests we do below do not involve bound regions.
let self_ty = *obligation.self_ty().skip_binder();
debug!("assemble_candidates_from_auto_impls(self_ty={:?})", self_ty);
let def_id = obligation.predicate.def_id();
if self.tcx().trait_is_auto(def_id) {
match self_ty.kind {
ty::Dynamic(..) => {
// For object types, we don't know what the closed
// over types are. This means we conservatively
// say nothing; a candidate may be added by
// `assemble_candidates_from_object_ty`.
}
ty::Foreign(..) => {
// Since the contents of foreign types is unknown,
// we don't add any `..` impl. Default traits could
// still be provided by a manual implementation for
// this trait and type.
}
ty::Param(..) | ty::Projection(..) => {
// In these cases, we don't know what the actual
// type is. Therefore, we cannot break it down
// into its constituent types. So we don't
// consider the `..` impl but instead just add no
// candidates: this means that typeck will only
// succeed if there is another reason to believe
// that this obligation holds. That could be a
// where-clause or, in the case of an object type,
// it could be that the object type lists the
// trait (e.g., `Foo+Send : Send`). See
// `compile-fail/typeck-default-trait-impl-send-param.rs`
// for an example of a test case that exercises
// this path.
}
ty::Infer(ty::TyVar(_)) => {
// the auto impl might apply, we don't know
candidates.ambiguous = true;
}
ty::Generator(_, _, movability)
if self.tcx().lang_items().unpin_trait() == Some(def_id) =>
{
match movability {
hir::GeneratorMovability::Static => {
// Immovable generators are never `Unpin`, so
// suppress the normal auto-impl candidate for it.
}
hir::GeneratorMovability::Movable => {
// Movable generators are always `Unpin`, so add an
// unconditional builtin candidate.
candidates.vec.push(BuiltinCandidate {
has_nested: false,
});
}
}
}
_ => candidates.vec.push(AutoImplCandidate(def_id.clone())),
}
}
Ok(())
}
/// Search for impls that might apply to `obligation`.
fn assemble_candidates_from_object_ty(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
debug!(
"assemble_candidates_from_object_ty(self_ty={:?})",
obligation.self_ty().skip_binder()
);
self.infcx.probe(|_snapshot| {
// The code below doesn't care about regions, and the
// self-ty here doesn't escape this probe, so just erase
// any LBR.
let self_ty = self.tcx().erase_late_bound_regions(&obligation.self_ty());
let poly_trait_ref = match self_ty.kind {
ty::Dynamic(ref data, ..) => {
if data.auto_traits()
.any(|did| did == obligation.predicate.def_id())
{
debug!(
"assemble_candidates_from_object_ty: matched builtin bound, \
pushing candidate"
);
candidates.vec.push(BuiltinObjectCandidate);
return;
}
if let Some(principal) = data.principal() {
principal.with_self_ty(self.tcx(), self_ty)
} else {
// Only auto-trait bounds exist.
return;
}
}
ty::Infer(ty::TyVar(_)) => {
debug!("assemble_candidates_from_object_ty: ambiguous");
candidates.ambiguous = true; // could wind up being an object type
return;
}
_ => return,
};
debug!(
"assemble_candidates_from_object_ty: poly_trait_ref={:?}",
poly_trait_ref
);
// Count only those upcast versions that match the trait-ref
// we are looking for. Specifically, do not only check for the
// correct trait, but also the correct type parameters.
// For example, we may be trying to upcast `Foo` to `Bar<i32>`,
// but `Foo` is declared as `trait Foo : Bar<u32>`.
let upcast_trait_refs = util::supertraits(self.tcx(), poly_trait_ref)
.filter(|upcast_trait_ref| {
self.infcx.probe(|_| {
let upcast_trait_ref = upcast_trait_ref.clone();
self.match_poly_trait_ref(obligation, upcast_trait_ref)
.is_ok()
})
})
.count();
if upcast_trait_refs > 1 {
// Can be upcast in many ways; need more type information.
candidates.ambiguous = true;
} else if upcast_trait_refs == 1 {
candidates.vec.push(ObjectCandidate);
}
})
}
/// Search for unsizing that might apply to `obligation`.
fn assemble_candidates_for_unsizing(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
// We currently never consider higher-ranked obligations e.g.
// `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not
// because they are a priori invalid, and we could potentially add support
// for them later, it's just that there isn't really a strong need for it.
// A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>`
// impl, and those are generally applied to concrete types.
//
// That said, one might try to write a fn with a where clause like
// for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>>
// where the `'a` is kind of orthogonal to the relevant part of the `Unsize`.
// Still, you'd be more likely to write that where clause as
// T: Trait
// so it seems ok if we (conservatively) fail to accept that `Unsize`
// obligation above. Should be possible to extend this in the future.
let source = match obligation.self_ty().no_bound_vars() {
Some(t) => t,
None => {
// Don't add any candidates if there are bound regions.
return;
}
};
let target = obligation
.predicate
.skip_binder()
.trait_ref
.substs
.type_at(1);
debug!(
"assemble_candidates_for_unsizing(source={:?}, target={:?})",
source, target
);
let may_apply = match (&source.kind, &target.kind) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
(&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
// Upcasts permit two things:
//
// 1. Dropping builtin bounds, e.g., `Foo+Send` to `Foo`
// 2. Tightening the region bound, e.g., `Foo+'a` to `Foo+'b` if `'a : 'b`
//
// Note that neither of these changes requires any
// change at runtime. Eventually this will be
// generalized.
//
// We always upcast when we can because of reason
// #2 (region bounds).
data_a.principal_def_id() == data_b.principal_def_id()
&& data_b.auto_traits()
// All of a's auto traits need to be in b's auto traits.
.all(|b| data_a.auto_traits().any(|a| a == b))
}
// T -> Trait.
(_, &ty::Dynamic(..)) => true,
// Ambiguous handling is below T -> Trait, because inference
// variables can still implement Unsize<Trait> and nested
// obligations will have the final say (likely deferred).
(&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => {
debug!("assemble_candidates_for_unsizing: ambiguous");
candidates.ambiguous = true;
false
}
// [T; n] -> [T].
(&ty::Array(..), &ty::Slice(_)) => true,
// Struct<T> -> Struct<U>.
(&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => {
def_id_a == def_id_b
}
// (.., T) -> (.., U).
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => tys_a.len() == tys_b.len(),
_ => false,
};
if may_apply {
candidates.vec.push(BuiltinUnsizeCandidate);
}
}
fn assemble_candidates_for_trait_alias(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
// Okay to skip binder here because the tests we do below do not involve bound regions.
let self_ty = *obligation.self_ty().skip_binder();
debug!("assemble_candidates_for_trait_alias(self_ty={:?})", self_ty);
let def_id = obligation.predicate.def_id();
if self.tcx().is_trait_alias(def_id) {
candidates.vec.push(TraitAliasCandidate(def_id.clone()));
}
Ok(())
}
///////////////////////////////////////////////////////////////////////////
// WINNOW
//
// Winnowing is the process of attempting to resolve ambiguity by
// probing further. During the winnowing process, we unify all
// type variables and then we also attempt to evaluate recursive
// bounds to see if they are satisfied.
/// Returns `true` if `victim` should be dropped in favor of
/// `other`. Generally speaking we will drop duplicate
/// candidates and prefer where-clause candidates.
///
/// See the comment for "SelectionCandidate" for more details.
fn candidate_should_be_dropped_in_favor_of(
&mut self,
victim: &EvaluatedCandidate<'tcx>,
other: &EvaluatedCandidate<'tcx>,
) -> bool {
if victim.candidate == other.candidate {
return true;
}
// Check if a bound would previously have been removed when normalizing
// the param_env so that it can be given the lowest priority. See
// #50825 for the motivation for this.
let is_global =
|cand: &ty::PolyTraitRef<'_>| cand.is_global() && !cand.has_late_bound_regions();
match other.candidate {
// Prefer BuiltinCandidate { has_nested: false } to anything else.
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false } => true,
ParamCandidate(ref cand) => match victim.candidate {
AutoImplCandidate(..) => {
bug!(
"default implementations shouldn't be recorded \
when there are other valid candidates"
);
}
// Prefer BuiltinCandidate { has_nested: false } to anything else.
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false } => false,
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| BuiltinCandidate { .. }
| TraitAliasCandidate(..) => {
// Global bounds from the where clause should be ignored
// here (see issue #50825). Otherwise, we have a where
// clause so don't go around looking for impls.
!is_global(cand)
}
ObjectCandidate | ProjectionCandidate => {
// Arbitrarily give param candidates priority
// over projection and object candidates.
!is_global(cand)
}
ParamCandidate(..) => false,
},
ObjectCandidate | ProjectionCandidate => match victim.candidate {
AutoImplCandidate(..) => {
bug!(
"default implementations shouldn't be recorded \
when there are other valid candidates"
);
}
// Prefer BuiltinCandidate { has_nested: false } to anything else.
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false } => false,
ImplCandidate(..)
| ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| BuiltinCandidate { .. }
| TraitAliasCandidate(..) => true,
ObjectCandidate | ProjectionCandidate => {
// Arbitrarily give param candidates priority
// over projection and object candidates.
true
}
ParamCandidate(ref cand) => is_global(cand),
},
ImplCandidate(other_def) => {
// See if we can toss out `victim` based on specialization.
// This requires us to know *for sure* that the `other` impl applies
// i.e., EvaluatedToOk:
if other.evaluation.must_apply_modulo_regions() {
match victim.candidate {
ImplCandidate(victim_def) => {
let tcx = self.tcx();
return tcx.specializes((other_def, victim_def))
|| tcx.impls_are_allowed_to_overlap(
other_def, victim_def).is_some();
}
ParamCandidate(ref cand) => {
// Prefer the impl to a global where clause candidate.
return is_global(cand);
}
_ => (),
}
}
false
}
ClosureCandidate
| GeneratorCandidate
| FnPointerCandidate
| BuiltinObjectCandidate
| BuiltinUnsizeCandidate
| BuiltinCandidate { has_nested: true } => {
match victim.candidate {
ParamCandidate(ref cand) => {
// Prefer these to a global where-clause bound
// (see issue #50825)
is_global(cand) && other.evaluation.must_apply_modulo_regions()
}
_ => false,
}
}
_ => false,
}
}
///////////////////////////////////////////////////////////////////////////
// BUILTIN BOUNDS
//
// These cover the traits that are built-in to the language
// itself: `Copy`, `Clone` and `Sized`.
fn assemble_builtin_bound_candidates(
&mut self,
conditions: BuiltinImplConditions<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) -> Result<(), SelectionError<'tcx>> {
match conditions {
BuiltinImplConditions::Where(nested) => {
debug!("builtin_bound: nested={:?}", nested);
candidates.vec.push(BuiltinCandidate {
has_nested: nested.skip_binder().len() > 0,
});
}
BuiltinImplConditions::None => {}
BuiltinImplConditions::Ambiguous => {
debug!("assemble_builtin_bound_candidates: ambiguous builtin");
candidates.ambiguous = true;
}
}
Ok(())
}
fn sized_conditions(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
use self::BuiltinImplConditions::{Ambiguous, None, Where};
// NOTE: binder moved to (*)
let self_ty = self.infcx
.shallow_resolve(obligation.predicate.skip_binder().self_ty());
match self_ty.kind {
ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::Never
| ty::Error => {
// safe for everything
Where(ty::Binder::dummy(Vec::new()))
}
ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
ty::Tuple(tys) => {
Where(ty::Binder::bind(tys.last().into_iter().map(|k| k.expect_ty()).collect()))
}
ty::Adt(def, substs) => {
let sized_crit = def.sized_constraint(self.tcx());
// (*) binder moved here
Where(ty::Binder::bind(
sized_crit
.iter()
.map(|ty| ty.subst(self.tcx(), substs))
.collect(),
))
}
ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
ty::Infer(ty::TyVar(_)) => Ambiguous,
ty::UnnormalizedProjection(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
bug!(
"asked to assemble builtin bounds of unexpected type: {:?}",
self_ty
);
}
}
}
fn copy_clone_conditions(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> BuiltinImplConditions<'tcx> {
// NOTE: binder moved to (*)
let self_ty = self.infcx
.shallow_resolve(obligation.predicate.skip_binder().self_ty());
use self::BuiltinImplConditions::{Ambiguous, None, Where};
match self_ty.kind {
ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Error => Where(ty::Binder::dummy(Vec::new())),
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::Char
| ty::RawPtr(..)
| ty::Never
| ty::Ref(_, _, hir::MutImmutable) => {
// Implementations provided in libcore
None
}
ty::Dynamic(..)
| ty::Str
| ty::Slice(..)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Foreign(..)
| ty::Ref(_, _, hir::MutMutable) => None,
ty::Array(element_ty, _) => {
// (*) binder moved here
Where(ty::Binder::bind(vec![element_ty]))
}
ty::Tuple(tys) => {
// (*) binder moved here
Where(ty::Binder::bind(tys.iter().map(|k| k.expect_ty()).collect()))
}
ty::Closure(def_id, substs) => {
// (*) binder moved here
Where(ty::Binder::bind(
substs.upvar_tys(def_id, self.tcx()).collect(),
))
}
ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
// Fallback to whatever user-defined impls exist in this case.
None
}
ty::Infer(ty::TyVar(_)) => {
// Unbound type variable. Might or might not have
// applicable impls and so forth, depending on what
// those type variables wind up being bound to.
Ambiguous
}
ty::UnnormalizedProjection(..)
| ty::Placeholder(..)
| ty::Bound(..)
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
bug!(
"asked to assemble builtin bounds of unexpected type: {:?}",
self_ty
);
}
}
}
/// For default impls, we need to break apart a type into its
/// "constituent types" -- meaning, the types that it contains.
///
/// Here are some (simple) examples:
///
/// ```
/// (i32, u32) -> [i32, u32]
/// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
/// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
/// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
/// ```
fn constituent_types_for_ty(&self, t: Ty<'tcx>) -> Vec<Ty<'tcx>> {
match t.kind {
ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::Str
| ty::Error
| ty::Infer(ty::IntVar(_))
| ty::Infer(ty::FloatVar(_))
| ty::Never
| ty::Char => Vec::new(),
ty::UnnormalizedProjection(..)
| ty::Placeholder(..)
| ty::Dynamic(..)
| ty::Param(..)
| ty::Foreign(..)
| ty::Projection(..)
| ty::Bound(..)
| ty::Infer(ty::TyVar(_))
| ty::Infer(ty::FreshTy(_))
| ty::Infer(ty::FreshIntTy(_))
| ty::Infer(ty::FreshFloatTy(_)) => {
bug!(
"asked to assemble constituent types of unexpected type: {:?}",
t
);
}
ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
vec![element_ty]
}
ty::Array(element_ty, _) | ty::Slice(element_ty) => vec![element_ty],
ty::Tuple(ref tys) => {
// (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
tys.iter().map(|k| k.expect_ty()).collect()
}
ty::Closure(def_id, ref substs) => substs.upvar_tys(def_id, self.tcx()).collect(),
ty::Generator(def_id, ref substs, _) => {
let witness = substs.witness(def_id, self.tcx());
substs
.upvar_tys(def_id, self.tcx())
.chain(iter::once(witness))
.collect()
}
ty::GeneratorWitness(types) => {
// This is sound because no regions in the witness can refer to
// the binder outside the witness. So we'll effectivly reuse
// the implicit binder around the witness.
types.skip_binder().to_vec()
}
// for `PhantomData<T>`, we pass `T`
ty::Adt(def, substs) if def.is_phantom_data() => substs.types().collect(),
ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect(),
ty::Opaque(def_id, substs) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
vec![self.tcx().type_of(def_id).subst(self.tcx(), substs)]
}
}
}
fn collect_predicates_for_types(
&mut self,
param_env: ty::ParamEnv<'tcx>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
trait_def_id: DefId,
types: ty::Binder<Vec<Ty<'tcx>>>,
) -> Vec<PredicateObligation<'tcx>> {
// Because the types were potentially derived from
// higher-ranked obligations they may reference late-bound
// regions. For example, `for<'a> Foo<&'a int> : Copy` would
// yield a type like `for<'a> &'a int`. In general, we
// maintain the invariant that we never manipulate bound
// regions, so we have to process these bound regions somehow.
//
// The strategy is to:
//
// 1. Instantiate those regions to placeholder regions (e.g.,
// `for<'a> &'a int` becomes `&0 int`.
// 2. Produce something like `&'0 int : Copy`
// 3. Re-bind the regions back to `for<'a> &'a int : Copy`
types
.skip_binder()
.into_iter()
.flat_map(|ty| {
// binder moved -\
let ty: ty::Binder<Ty<'tcx>> = ty::Binder::bind(ty); // <----/
self.infcx.in_snapshot(|_| {
let (skol_ty, _) = self.infcx
.replace_bound_vars_with_placeholders(&ty);
let Normalized {
value: normalized_ty,
mut obligations,
} = project::normalize_with_depth(
self,
param_env,
cause.clone(),
recursion_depth,
&skol_ty,
);
let skol_obligation = self.tcx().predicate_for_trait_def(
param_env,
cause.clone(),
trait_def_id,
recursion_depth,
normalized_ty,
&[],
);
obligations.push(skol_obligation);
obligations
})
})
.collect()
}
///////////////////////////////////////////////////////////////////////////
// CONFIRMATION
//
// Confirmation unifies the output type parameters of the trait
// with the values found in the obligation, possibly yielding a
// type error. See the [rustc guide] for more details.
//
// [rustc guide]:
// https://rust-lang.github.io/rustc-guide/traits/resolution.html#confirmation
fn confirm_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
candidate: SelectionCandidate<'tcx>,
) -> Result<Selection<'tcx>, SelectionError<'tcx>> {
debug!("confirm_candidate({:?}, {:?})", obligation, candidate);
match candidate {
BuiltinCandidate { has_nested } => {
let data = self.confirm_builtin_candidate(obligation, has_nested);
Ok(VtableBuiltin(data))
}
ParamCandidate(param) => {
let obligations = self.confirm_param_candidate(obligation, param);
Ok(VtableParam(obligations))
}
ImplCandidate(impl_def_id) => Ok(VtableImpl(self.confirm_impl_candidate(
obligation,
impl_def_id,
))),
AutoImplCandidate(trait_def_id) => {
let data = self.confirm_auto_impl_candidate(obligation, trait_def_id);
Ok(VtableAutoImpl(data))
}
ProjectionCandidate => {
self.confirm_projection_candidate(obligation);
Ok(VtableParam(Vec::new()))
}
ClosureCandidate => {
let vtable_closure = self.confirm_closure_candidate(obligation)?;
Ok(VtableClosure(vtable_closure))
}
GeneratorCandidate => {
let vtable_generator = self.confirm_generator_candidate(obligation)?;
Ok(VtableGenerator(vtable_generator))
}
FnPointerCandidate => {
let data = self.confirm_fn_pointer_candidate(obligation)?;
Ok(VtableFnPointer(data))
}
TraitAliasCandidate(alias_def_id) => {
let data = self.confirm_trait_alias_candidate(obligation, alias_def_id);
Ok(VtableTraitAlias(data))
}
ObjectCandidate => {
let data = self.confirm_object_candidate(obligation);
Ok(VtableObject(data))
}
BuiltinObjectCandidate => {
// This indicates something like `(Trait+Send) :
// Send`. In this case, we know that this holds
// because that's what the object type is telling us,
// and there's really no additional obligations to
// prove and no types in particular to unify etc.
Ok(VtableParam(Vec::new()))
}
BuiltinUnsizeCandidate => {
let data = self.confirm_builtin_unsize_candidate(obligation)?;
Ok(VtableBuiltin(data))
}
}
}
fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) {
self.infcx.in_snapshot(|snapshot| {
let result =
self.match_projection_obligation_against_definition_bounds(
obligation,
snapshot,
);
assert!(result);
})
}
fn confirm_param_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
param: ty::PolyTraitRef<'tcx>,
) -> Vec<PredicateObligation<'tcx>> {
debug!("confirm_param_candidate({:?},{:?})", obligation, param);
// During evaluation, we already checked that this
// where-clause trait-ref could be unified with the obligation
// trait-ref. Repeat that unification now without any
// transactional boundary; it should not fail.
match self.match_where_clause_trait_ref(obligation, param.clone()) {
Ok(obligations) => obligations,
Err(()) => {
bug!(
"Where clause `{:?}` was applicable to `{:?}` but now is not",
param,
obligation
);
}
}
}
fn confirm_builtin_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
has_nested: bool,
) -> VtableBuiltinData<PredicateObligation<'tcx>> {
debug!(
"confirm_builtin_candidate({:?}, {:?})",
obligation, has_nested
);
let lang_items = self.tcx().lang_items();
let obligations = if has_nested {
let trait_def = obligation.predicate.def_id();
let conditions = if Some(trait_def) == lang_items.sized_trait() {
self.sized_conditions(obligation)
} else if Some(trait_def) == lang_items.copy_trait() {
self.copy_clone_conditions(obligation)
} else if Some(trait_def) == lang_items.clone_trait() {
self.copy_clone_conditions(obligation)
} else {
bug!("unexpected builtin trait {:?}", trait_def)
};
let nested = match conditions {
BuiltinImplConditions::Where(nested) => nested,
_ => bug!(
"obligation {:?} had matched a builtin impl but now doesn't",
obligation
),
};
let cause = obligation.derived_cause(BuiltinDerivedObligation);
self.collect_predicates_for_types(
obligation.param_env,
cause,
obligation.recursion_depth + 1,
trait_def,
nested,
)
} else {
vec![]
};
debug!("confirm_builtin_candidate: obligations={:?}", obligations);
VtableBuiltinData {
nested: obligations,
}
}
/// This handles the case where a `auto trait Foo` impl is being used.
/// The idea is that the impl applies to `X : Foo` if the following conditions are met:
///
/// 1. For each constituent type `Y` in `X`, `Y : Foo` holds
/// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds.
fn confirm_auto_impl_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_def_id: DefId,
) -> VtableAutoImplData<PredicateObligation<'tcx>> {
debug!(
"confirm_auto_impl_candidate({:?}, {:?})",
obligation, trait_def_id
);
let types = obligation.predicate.map_bound(|inner| {
let self_ty = self.infcx.shallow_resolve(inner.self_ty());
self.constituent_types_for_ty(self_ty)
});
self.vtable_auto_impl(obligation, trait_def_id, types)
}
/// See `confirm_auto_impl_candidate`.
fn vtable_auto_impl(
&mut self,
obligation: &TraitObligation<'tcx>,
trait_def_id: DefId,
nested: ty::Binder<Vec<Ty<'tcx>>>,
) -> VtableAutoImplData<PredicateObligation<'tcx>> {
debug!("vtable_auto_impl: nested={:?}", nested);
let cause = obligation.derived_cause(BuiltinDerivedObligation);
let mut obligations = self.collect_predicates_for_types(
obligation.param_env,
cause,
obligation.recursion_depth + 1,
trait_def_id,
nested,
);
let trait_obligations: Vec<PredicateObligation<'_>> = self.infcx.in_snapshot(|_| {
let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
let (trait_ref, _) = self.infcx
.replace_bound_vars_with_placeholders(&poly_trait_ref);
let cause = obligation.derived_cause(ImplDerivedObligation);
self.impl_or_trait_obligations(
cause,
obligation.recursion_depth + 1,
obligation.param_env,
trait_def_id,
&trait_ref.substs,
)
});
// Adds the predicates from the trait. Note that this contains a `Self: Trait`
// predicate as usual. It won't have any effect since auto traits are coinductive.
obligations.extend(trait_obligations);
debug!("vtable_auto_impl: obligations={:?}", obligations);
VtableAutoImplData {
trait_def_id,
nested: obligations,
}
}
fn confirm_impl_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
impl_def_id: DefId,
) -> VtableImplData<'tcx, PredicateObligation<'tcx>> {
debug!("confirm_impl_candidate({:?},{:?})", obligation, impl_def_id);
// First, create the substitutions by matching the impl again,
// this time not in a probe.
self.infcx.in_snapshot(|snapshot| {
let substs = self.rematch_impl(impl_def_id, obligation, snapshot);
debug!("confirm_impl_candidate: substs={:?}", substs);
let cause = obligation.derived_cause(ImplDerivedObligation);
self.vtable_impl(
impl_def_id,
substs,
cause,
obligation.recursion_depth + 1,
obligation.param_env,
)
})
}
fn vtable_impl(
&mut self,
impl_def_id: DefId,
mut substs: Normalized<'tcx, SubstsRef<'tcx>>,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
) -> VtableImplData<'tcx, PredicateObligation<'tcx>> {
debug!(
"vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={})",
impl_def_id, substs, recursion_depth,
);
let mut impl_obligations = self.impl_or_trait_obligations(
cause,
recursion_depth,
param_env,
impl_def_id,
&substs.value,
);
debug!(
"vtable_impl: impl_def_id={:?} impl_obligations={:?}",
impl_def_id, impl_obligations
);
// Because of RFC447, the impl-trait-ref and obligations
// are sufficient to determine the impl substs, without
// relying on projections in the impl-trait-ref.
//
// e.g., `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
impl_obligations.append(&mut substs.obligations);
VtableImplData {
impl_def_id,
substs: substs.value,
nested: impl_obligations,
}
}
fn confirm_object_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> VtableObjectData<'tcx, PredicateObligation<'tcx>> {
debug!("confirm_object_candidate({:?})", obligation);
// FIXME(nmatsakis) skipping binder here seems wrong -- we should
// probably flatten the binder from the obligation and the binder
// from the object. Have to try to make a broken test case that
// results.
let self_ty = self.infcx
.shallow_resolve(*obligation.self_ty().skip_binder());
let poly_trait_ref = match self_ty.kind {
ty::Dynamic(ref data, ..) =>
data.principal().unwrap_or_else(|| {
span_bug!(obligation.cause.span, "object candidate with no principal")
}).with_self_ty(self.tcx(), self_ty),
_ => span_bug!(obligation.cause.span, "object candidate with non-object"),
};
let mut upcast_trait_ref = None;
let mut nested = vec![];
let vtable_base;
{
let tcx = self.tcx();
// We want to find the first supertrait in the list of
// supertraits that we can unify with, and do that
// unification. We know that there is exactly one in the list
// where we can unify because otherwise select would have
// reported an ambiguity. (When we do find a match, also
// record it for later.)
let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while(
|&t| match self.infcx.commit_if_ok(|_| self.match_poly_trait_ref(obligation, t)) {
Ok(obligations) => {
upcast_trait_ref = Some(t);
nested.extend(obligations);
false
}
Err(_) => true,
},
);
// Additionally, for each of the nonmatching predicates that
// we pass over, we sum up the set of number of vtable
// entries, so that we can compute the offset for the selected
// trait.
vtable_base = nonmatching.map(|t| tcx.count_own_vtable_entries(t)).sum();
}
VtableObjectData {
upcast_trait_ref: upcast_trait_ref.unwrap(),
vtable_base,
nested,
}
}
fn confirm_fn_pointer_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
debug!("confirm_fn_pointer_candidate({:?})", obligation);
// Okay to skip binder; it is reintroduced below.
let self_ty = self.infcx
.shallow_resolve(*obligation.self_ty().skip_binder());
let sig = self_ty.fn_sig(self.tcx());
let trait_ref = self.tcx()
.closure_trait_ref_and_return_type(
obligation.predicate.def_id(),
self_ty,
sig,
util::TupleArgumentsFlag::Yes,
)
.map_bound(|(trait_ref, _)| trait_ref);
let Normalized {
value: trait_ref,
obligations,
} = project::normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&trait_ref,
);
self.confirm_poly_trait_refs(
obligation.cause.clone(),
obligation.param_env,
obligation.predicate.to_poly_trait_ref(),
trait_ref,
)?;
Ok(VtableFnPointerData {
fn_ty: self_ty,
nested: obligations,
})
}
fn confirm_trait_alias_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
alias_def_id: DefId,
) -> VtableTraitAliasData<'tcx, PredicateObligation<'tcx>> {
debug!(
"confirm_trait_alias_candidate({:?}, {:?})",
obligation, alias_def_id
);
self.infcx.in_snapshot(|_| {
let (predicate, _) = self.infcx()
.replace_bound_vars_with_placeholders(&obligation.predicate);
let trait_ref = predicate.trait_ref;
let trait_def_id = trait_ref.def_id;
let substs = trait_ref.substs;
let trait_obligations = self.impl_or_trait_obligations(
obligation.cause.clone(),
obligation.recursion_depth,
obligation.param_env,
trait_def_id,
&substs,
);
debug!(
"confirm_trait_alias_candidate: trait_def_id={:?} trait_obligations={:?}",
trait_def_id, trait_obligations
);
VtableTraitAliasData {
alias_def_id,
substs: substs,
nested: trait_obligations,
}
})
}
fn confirm_generator_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
// Okay to skip binder because the substs on generator types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let (generator_def_id, substs) = match self_ty.kind {
ty::Generator(id, substs, _) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation),
};
debug!(
"confirm_generator_candidate({:?},{:?},{:?})",
obligation, generator_def_id, substs
);
let trait_ref = self.generator_trait_ref_unnormalized(obligation, generator_def_id, substs);
let Normalized {
value: trait_ref,
mut obligations,
} = normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&trait_ref,
);
debug!(
"confirm_generator_candidate(generator_def_id={:?}, \
trait_ref={:?}, obligations={:?})",
generator_def_id, trait_ref, obligations
);
obligations.extend(self.confirm_poly_trait_refs(
obligation.cause.clone(),
obligation.param_env,
obligation.predicate.to_poly_trait_ref(),
trait_ref,
)?);
Ok(VtableGeneratorData {
generator_def_id: generator_def_id,
substs: substs.clone(),
nested: obligations,
})
}
fn confirm_closure_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
debug!("confirm_closure_candidate({:?})", obligation);
let kind = self.tcx()
.lang_items()
.fn_trait_kind(obligation.predicate.def_id())
.unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation));
// Okay to skip binder because the substs on closure types never
// touch bound regions, they just capture the in-scope
// type/region parameters.
let self_ty = self.infcx.shallow_resolve(*obligation.self_ty().skip_binder());
let (closure_def_id, substs) = match self_ty.kind {
ty::Closure(id, substs) => (id, substs),
_ => bug!("closure candidate for non-closure {:?}", obligation),
};
let trait_ref = self.closure_trait_ref_unnormalized(obligation, closure_def_id, substs);
let Normalized {
value: trait_ref,
mut obligations,
} = normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&trait_ref,
);
debug!(
"confirm_closure_candidate(closure_def_id={:?}, trait_ref={:?}, obligations={:?})",
closure_def_id, trait_ref, obligations
);
obligations.extend(self.confirm_poly_trait_refs(
obligation.cause.clone(),
obligation.param_env,
obligation.predicate.to_poly_trait_ref(),
trait_ref,
)?);
// FIXME: chalk
if !self.tcx().sess.opts.debugging_opts.chalk {
obligations.push(Obligation::new(
obligation.cause.clone(),
obligation.param_env,
ty::Predicate::ClosureKind(closure_def_id, substs, kind),
));
}
Ok(VtableClosureData {
closure_def_id,
substs: substs.clone(),
nested: obligations,
})
}
/// In the case of closure types and fn pointers,
/// we currently treat the input type parameters on the trait as
/// outputs. This means that when we have a match we have only
/// considered the self type, so we have to go back and make sure
/// to relate the argument types too. This is kind of wrong, but
/// since we control the full set of impls, also not that wrong,
/// and it DOES yield better error messages (since we don't report
/// errors as if there is no applicable impl, but rather report
/// errors are about mismatched argument types.
///
/// Here is an example. Imagine we have a closure expression
/// and we desugared it so that the type of the expression is
/// `Closure`, and `Closure` expects an int as argument. Then it
/// is "as if" the compiler generated this impl:
///
/// impl Fn(int) for Closure { ... }
///
/// Now imagine our obligation is `Fn(usize) for Closure`. So far
/// we have matched the self type `Closure`. At this point we'll
/// compare the `int` to `usize` and generate an error.
///
/// Note that this checking occurs *after* the impl has selected,
/// because these output type parameters should not affect the
/// selection of the impl. Therefore, if there is a mismatch, we
/// report an error to the user.
fn confirm_poly_trait_refs(
&mut self,
obligation_cause: ObligationCause<'tcx>,
obligation_param_env: ty::ParamEnv<'tcx>,
obligation_trait_ref: ty::PolyTraitRef<'tcx>,
expected_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let obligation_trait_ref = obligation_trait_ref.clone();
self.infcx
.at(&obligation_cause, obligation_param_env)
.sup(obligation_trait_ref, expected_trait_ref)
.map(|InferOk { obligations, .. }| obligations)
.map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e))
}
fn confirm_builtin_unsize_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
) -> Result<VtableBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
let tcx = self.tcx();
// assemble_candidates_for_unsizing should ensure there are no late bound
// regions here. See the comment there for more details.
let source = self.infcx
.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
let target = obligation
.predicate
.skip_binder()
.trait_ref
.substs
.type_at(1);
let target = self.infcx.shallow_resolve(target);
debug!(
"confirm_builtin_unsize_candidate(source={:?}, target={:?})",
source, target
);
let mut nested = vec![];
match (&source.kind, &target.kind) {
// Trait+Kx+'a -> Trait+Ky+'b (upcasts).
(&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
// See assemble_candidates_for_unsizing for more info.
let existential_predicates = data_a.map_bound(|data_a| {
let iter =
data_a.principal().map(|x| ty::ExistentialPredicate::Trait(x))
.into_iter().chain(
data_a
.projection_bounds()
.map(|x| ty::ExistentialPredicate::Projection(x)),
)
.chain(
data_b
.auto_traits()
.map(ty::ExistentialPredicate::AutoTrait),
);
tcx.mk_existential_predicates(iter)
});
let source_trait = tcx.mk_dynamic(existential_predicates, r_b);
// Require that the traits involved in this upcast are **equal**;
// only the **lifetime bound** is changed.
//
// FIXME: This condition is arguably too strong -- it
// would suffice for the source trait to be a
// *subtype* of the target trait. In particular
// changing from something like `for<'a, 'b> Foo<'a,
// 'b>` to `for<'a> Foo<'a, 'a>` should be
// permitted. And, indeed, in the in commit
// 904a0bde93f0348f69914ee90b1f8b6e4e0d7cbc, this
// condition was loosened. However, when the leak check was added
// back, using subtype here actually guies the coercion code in
// such a way that it accepts `old-lub-glb-object.rs`. This is probably
// a good thing, but I've modified this to `.eq` because I want
// to continue rejecting that test (as we have done for quite some time)
// before we are firmly comfortable with what our behavior
// should be there. -nikomatsakis
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(target, source_trait) // FIXME -- see below
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
// Register one obligation for 'a: 'b.
let cause = ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
ObjectCastObligation(target),
);
let outlives = ty::OutlivesPredicate(r_a, r_b);
nested.push(Obligation::with_depth(
cause,
obligation.recursion_depth + 1,
obligation.param_env,
ty::Binder::bind(outlives).to_predicate(),
));
}
// T -> Trait.
(_, &ty::Dynamic(ref data, r)) => {
let mut object_dids = data.auto_traits()
.chain(data.principal_def_id());
if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) {
return Err(TraitNotObjectSafe(did));
}
let cause = ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
ObjectCastObligation(target),
);
let predicate_to_obligation = |predicate| {
Obligation::with_depth(
cause.clone(),
obligation.recursion_depth + 1,
obligation.param_env,
predicate,
)
};
// Create obligations:
// - Casting T to Trait
// - For all the various builtin bounds attached to the object cast. (In other
// words, if the object type is Foo+Send, this would create an obligation for the
// Send check.)
// - Projection predicates
nested.extend(
data.iter()
.map(|d| predicate_to_obligation(d.with_self_ty(tcx, source))),
);
// We can only make objects from sized types.
let tr = ty::TraitRef {
def_id: tcx.require_lang_item(lang_items::SizedTraitLangItem, None),
substs: tcx.mk_substs_trait(source, &[]),
};
nested.push(predicate_to_obligation(tr.to_predicate()));
// If the type is `Foo+'a`, ensures that the type
// being cast to `Foo+'a` outlives `'a`:
let outlives = ty::OutlivesPredicate(source, r);
nested.push(predicate_to_obligation(
ty::Binder::dummy(outlives).to_predicate(),
));
}
// [T; n] -> [T].
(&ty::Array(a, _), &ty::Slice(b)) => {
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(b, a)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
}
// Struct<T> -> Struct<U>.
(&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
let fields = def.all_fields()
.map(|f| tcx.type_of(f.did))
.collect::<Vec<_>>();
// The last field of the structure has to exist and contain type parameters.
let field = if let Some(&field) = fields.last() {
field
} else {
return Err(Unimplemented);
};
let mut ty_params = GrowableBitSet::new_empty();
let mut found = false;
for ty in field.walk() {
if let ty::Param(p) = ty.kind {
ty_params.insert(p.index as usize);
found = true;
}
}
if !found {
return Err(Unimplemented);
}
// Replace type parameters used in unsizing with
// Error and ensure they do not affect any other fields.
// This could be checked after type collection for any struct
// with a potentially unsized trailing field.
let params = substs_a.iter().enumerate().map(|(i, &k)| {
if ty_params.contains(i) {
tcx.types.err.into()
} else {
k
}
});
let substs = tcx.mk_substs(params);
for &ty in fields.split_last().unwrap().1 {
if ty.subst(tcx, substs).references_error() {
return Err(Unimplemented);
}
}
// Extract Field<T> and Field<U> from Struct<T> and Struct<U>.
let inner_source = field.subst(tcx, substs_a);
let inner_target = field.subst(tcx, substs_b);
// Check that the source struct with the target's
// unsized parameters is equal to the target.
let params = substs_a.iter().enumerate().map(|(i, &k)| {
if ty_params.contains(i) {
substs_b.type_at(i).into()
} else {
k
}
});
let new_struct = tcx.mk_adt(def, tcx.mk_substs(params));
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(target, new_struct)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
// Construct the nested Field<T>: Unsize<Field<U>> predicate.
nested.push(tcx.predicate_for_trait_def(
obligation.param_env,
obligation.cause.clone(),
obligation.predicate.def_id(),
obligation.recursion_depth + 1,
inner_source,
&[inner_target.into()],
));
}
// (.., T) -> (.., U).
(&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
assert_eq!(tys_a.len(), tys_b.len());
// The last field of the tuple has to exist.
let (&a_last, a_mid) = if let Some(x) = tys_a.split_last() {
x
} else {
return Err(Unimplemented);
};
let &b_last = tys_b.last().unwrap();
// Check that the source tuple with the target's
// last element is equal to the target.
let new_tuple = tcx.mk_tup(
a_mid.iter().map(|k| k.expect_ty()).chain(iter::once(b_last.expect_ty())),
);
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(target, new_tuple)
.map_err(|_| Unimplemented)?;
nested.extend(obligations);
// Construct the nested T: Unsize<U> predicate.
nested.push(tcx.predicate_for_trait_def(
obligation.param_env,
obligation.cause.clone(),
obligation.predicate.def_id(),
obligation.recursion_depth + 1,
a_last.expect_ty(),
&[b_last.into()],
));
}
_ => bug!(),
};
Ok(VtableBuiltinData { nested })
}
///////////////////////////////////////////////////////////////////////////
// Matching
//
// Matching is a common path used for both evaluation and
// confirmation. It basically unifies types that appear in impls
// and traits. This does affect the surrounding environment;
// therefore, when used during evaluation, match routines must be
// run inside of a `probe()` so that their side-effects are
// contained.
fn rematch_impl(
&mut self,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &CombinedSnapshot<'_, 'tcx>,
) -> Normalized<'tcx, SubstsRef<'tcx>> {
match self.match_impl(impl_def_id, obligation, snapshot) {
Ok(substs) => substs,
Err(()) => {
bug!(
"Impl {:?} was matchable against {:?} but now is not",
impl_def_id,
obligation
);
}
}
}
fn match_impl(
&mut self,
impl_def_id: DefId,
obligation: &TraitObligation<'tcx>,
snapshot: &CombinedSnapshot<'_, 'tcx>,
) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap();
// Before we create the substitutions and everything, first
// consider a "quick reject". This avoids creating more types
// and so forth that we need to.
if self.fast_reject_trait_refs(obligation, &impl_trait_ref) {
return Err(());
}
let (skol_obligation, placeholder_map) = self.infcx()
.replace_bound_vars_with_placeholders(&obligation.predicate);
let skol_obligation_trait_ref = skol_obligation.trait_ref;
let impl_substs = self.infcx
.fresh_substs_for_item(obligation.cause.span, impl_def_id);
let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
let Normalized {
value: impl_trait_ref,
obligations: mut nested_obligations,
} = project::normalize_with_depth(
self,
obligation.param_env,
obligation.cause.clone(),
obligation.recursion_depth + 1,
&impl_trait_ref,
);
debug!(
"match_impl(impl_def_id={:?}, obligation={:?}, \
impl_trait_ref={:?}, skol_obligation_trait_ref={:?})",
impl_def_id, obligation, impl_trait_ref, skol_obligation_trait_ref
);
let InferOk { obligations, .. } = self.infcx
.at(&obligation.cause, obligation.param_env)
.eq(skol_obligation_trait_ref, impl_trait_ref)
.map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
nested_obligations.extend(obligations);
if let Err(e) = self.infcx.leak_check(false, &placeholder_map, snapshot) {
debug!("match_impl: failed leak check due to `{}`", e);
return Err(());
}
if self.intercrate.is_none()
&& self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
{
debug!("match_impl: reservation impls only apply in intercrate mode");
return Err(());
}
debug!("match_impl: success impl_substs={:?}", impl_substs);
Ok(Normalized {
value: impl_substs,
obligations: nested_obligations,
})
}
fn fast_reject_trait_refs(
&mut self,
obligation: &TraitObligation<'_>,
impl_trait_ref: &ty::TraitRef<'_>,
) -> bool {
// We can avoid creating type variables and doing the full
// substitution if we find that any of the input types, when
// simplified, do not match.
obligation
.predicate
.skip_binder()
.input_types()
.zip(impl_trait_ref.input_types())
.any(|(obligation_ty, impl_ty)| {
let simplified_obligation_ty =
fast_reject::simplify_type(self.tcx(), obligation_ty, true);
let simplified_impl_ty = fast_reject::simplify_type(self.tcx(), impl_ty, false);
simplified_obligation_ty.is_some()
&& simplified_impl_ty.is_some()
&& simplified_obligation_ty != simplified_impl_ty
})
}
/// Normalize `where_clause_trait_ref` and try to match it against
/// `obligation`. If successful, return any predicates that
/// result from the normalization. Normalization is necessary
/// because where-clauses are stored in the parameter environment
/// unnormalized.
fn match_where_clause_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
self.match_poly_trait_ref(obligation, where_clause_trait_ref)
}
/// Returns `Ok` if `poly_trait_ref` being true implies that the
/// obligation is satisfied.
fn match_poly_trait_ref(
&mut self,
obligation: &TraitObligation<'tcx>,
poly_trait_ref: ty::PolyTraitRef<'tcx>,
) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
debug!(
"match_poly_trait_ref: obligation={:?} poly_trait_ref={:?}",
obligation, poly_trait_ref
);
self.infcx
.at(&obligation.cause, obligation.param_env)
.sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
.map(|InferOk { obligations, .. }| obligations)
.map_err(|_| ())
}
///////////////////////////////////////////////////////////////////////////
// Miscellany
fn match_fresh_trait_refs(
&self,
previous: &ty::PolyTraitRef<'tcx>,
current: &ty::PolyTraitRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> bool {
let mut matcher = ty::_match::Match::new(self.tcx(), param_env);
matcher.relate(previous, current).is_ok()
}
fn push_stack<'o>(
&mut self,
previous_stack: TraitObligationStackList<'o, 'tcx>,
obligation: &'o TraitObligation<'tcx>,
) -> TraitObligationStack<'o, 'tcx> {
let fresh_trait_ref = obligation
.predicate
.to_poly_trait_ref()
.fold_with(&mut self.freshener);
let dfn = previous_stack.cache.next_dfn();
let depth = previous_stack.depth() + 1;
TraitObligationStack {
obligation,
fresh_trait_ref,
reached_depth: Cell::new(depth),
previous: previous_stack,
dfn,
depth,
}
}
fn closure_trait_ref_unnormalized(
&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
substs: ty::ClosureSubsts<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
debug!(
"closure_trait_ref_unnormalized(obligation={:?}, closure_def_id={:?}, substs={:?})",
obligation, closure_def_id, substs,
);
let closure_type = self.infcx.closure_sig(closure_def_id, substs);
debug!(
"closure_trait_ref_unnormalized: closure_type = {:?}",
closure_type
);
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an unboxed closure type and hence is
// in fact unparameterized (or at least does not reference any
// regions bound in the obligation). Still probably some
// refactoring could make this nicer.
self.tcx()
.closure_trait_ref_and_return_type(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().self_ty(), // (1)
closure_type,
util::TupleArgumentsFlag::No,
)
.map_bound(|(trait_ref, _)| trait_ref)
}
fn generator_trait_ref_unnormalized(
&mut self,
obligation: &TraitObligation<'tcx>,
closure_def_id: DefId,
substs: ty::GeneratorSubsts<'tcx>,
) -> ty::PolyTraitRef<'tcx> {
let gen_sig = substs.poly_sig(closure_def_id, self.tcx());
// (1) Feels icky to skip the binder here, but OTOH we know
// that the self-type is an generator type and hence is
// in fact unparameterized (or at least does not reference any
// regions bound in the obligation). Still probably some
// refactoring could make this nicer.
self.tcx()
.generator_trait_ref_and_outputs(
obligation.predicate.def_id(),
obligation.predicate.skip_binder().self_ty(), // (1)
gen_sig,
)
.map_bound(|(trait_ref, ..)| trait_ref)
}
/// Returns the obligations that are implied by instantiating an
/// impl or trait. The obligations are substituted and fully
/// normalized. This is used when confirming an impl or default
/// impl.
fn impl_or_trait_obligations(
&mut self,
cause: ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId, // of impl or trait
substs: SubstsRef<'tcx>, // for impl or trait
) -> Vec<PredicateObligation<'tcx>> {
debug!("impl_or_trait_obligations(def_id={:?})", def_id);
let tcx = self.tcx();
// To allow for one-pass evaluation of the nested obligation,
// each predicate must be preceded by the obligations required
// to normalize it.
// for example, if we have:
// impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
// the impl will have the following predicates:
// <V as Iterator>::Item = U,
// U: Iterator, U: Sized,
// V: Iterator, V: Sized,
// <U as Iterator>::Item: Copy
// When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
// obligation will normalize to `<$0 as Iterator>::Item = $1` and
// `$1: Copy`, so we must ensure the obligations are emitted in
// that order.
let predicates = tcx.predicates_of(def_id);
assert_eq!(predicates.parent, None);
let mut predicates: Vec<_> = predicates
.predicates
.iter()
.flat_map(|(predicate, _)| {
let predicate = normalize_with_depth(
self,
param_env,
cause.clone(),
recursion_depth,
&predicate.subst(tcx, substs),
);
predicate.obligations.into_iter().chain(Some(Obligation {
cause: cause.clone(),
recursion_depth,
param_env,
predicate: predicate.value,
}))
})
.collect();
// We are performing deduplication here to avoid exponential blowups
// (#38528) from happening, but the real cause of the duplication is
// unknown. What we know is that the deduplication avoids exponential
// amount of predicates being propagated when processing deeply nested
// types.
//
// This code is hot enough that it's worth avoiding the allocation
// required for the FxHashSet when possible. Special-casing lengths 0,
// 1 and 2 covers roughly 75--80% of the cases.
if predicates.len() <= 1 {
// No possibility of duplicates.
} else if predicates.len() == 2 {
// Only two elements. Drop the second if they are equal.
if predicates[0] == predicates[1] {
predicates.truncate(1);
}
} else {
// Three or more elements. Use a general deduplication process.
let mut seen = FxHashSet::default();
predicates.retain(|i| seen.insert(i.clone()));
}
predicates
}
}
impl<'tcx> TraitObligation<'tcx> {
#[allow(unused_comparisons)]
pub fn derived_cause(
&self,
variant: fn(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
) -> ObligationCause<'tcx> {
/*!
* Creates a cause for obligations that are derived from
* `obligation` by a recursive search (e.g., for a builtin
* bound, or eventually a `auto trait Foo`). If `obligation`
* is itself a derived obligation, this is just a clone, but
* otherwise we create a "derived obligation" cause so as to
* keep track of the original root obligation for error
* reporting.
*/
let obligation = self;
// NOTE(flaper87): As of now, it keeps track of the whole error
// chain. Ideally, we should have a way to configure this either
// by using -Z verbose or just a CLI argument.
if obligation.recursion_depth >= 0 {
let derived_cause = DerivedObligationCause {
parent_trait_ref: obligation.predicate.to_poly_trait_ref(),
parent_code: Rc::new(obligation.cause.code.clone()),
};
let derived_code = variant(derived_cause);
ObligationCause::new(
obligation.cause.span,
obligation.cause.body_id,
derived_code,
)
} else {
obligation.cause.clone()
}
}
}
impl<'tcx> SelectionCache<'tcx> {
/// Actually frees the underlying memory in contrast to what stdlib containers do on `clear`
pub fn clear(&self) {
*self.hashmap.borrow_mut() = Default::default();
}
}
impl<'tcx> EvaluationCache<'tcx> {
/// Actually frees the underlying memory in contrast to what stdlib containers do on `clear`
pub fn clear(&self) {
*self.hashmap.borrow_mut() = Default::default();
}
}
impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList::with(self)
}
fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
self.previous.cache
}
fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
self.list()
}
/// Indicates that attempting to evaluate this stack entry
/// required accessing something from the stack at depth `reached_depth`.
fn update_reached_depth(&self, reached_depth: usize) {
assert!(
self.depth > reached_depth,
"invoked `update_reached_depth` with something under this stack: \
self.depth={} reached_depth={}",
self.depth,
reached_depth,
);
debug!("update_reached_depth(reached_depth={})", reached_depth);
let mut p = self;
while reached_depth < p.depth {
debug!("update_reached_depth: marking {:?} as cycle participant", p.fresh_trait_ref);
p.reached_depth.set(p.reached_depth.get().min(reached_depth));
p = p.previous.head.unwrap();
}
}
}
/// The "provisional evaluation cache" is used to store intermediate cache results
/// when solving auto traits. Auto traits are unusual in that they can support
/// cycles. So, for example, a "proof tree" like this would be ok:
///
/// - `Foo<T>: Send` :-
/// - `Bar<T>: Send` :-
/// - `Foo<T>: Send` -- cycle, but ok
/// - `Baz<T>: Send`
///
/// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
/// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
/// For non-auto traits, this cycle would be an error, but for auto traits (because
/// they are coinductive) it is considered ok.
///
/// However, there is a complication: at the point where we have
/// "proven" `Bar<T>: Send`, we have in fact only proven it
/// *provisionally*. In particular, we proved that `Bar<T>: Send`
/// *under the assumption* that `Foo<T>: Send`. But what if we later
/// find out this assumption is wrong? Specifically, we could
/// encounter some kind of error proving `Baz<T>: Send`. In that case,
/// `Bar<T>: Send` didn't turn out to be true.
///
/// In Issue #60010, we found a bug in rustc where it would cache
/// these intermediate results. This was fixed in #60444 by disabling
/// *all* caching for things involved in a cycle -- in our example,
/// that would mean we don't cache that `Bar<T>: Send`. But this led
/// to large slowdowns.
///
/// Specifically, imagine this scenario, where proving `Baz<T>: Send`
/// first requires proving `Bar<T>: Send` (which is true:
///
/// - `Foo<T>: Send` :-
/// - `Bar<T>: Send` :-
/// - `Foo<T>: Send` -- cycle, but ok
/// - `Baz<T>: Send`
/// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
/// - `*const T: Send` -- but what if we later encounter an error?
///
/// The *provisional evaluation cache* resolves this issue. It stores
/// cache results that we've proven but which were involved in a cycle
/// in some way. We track the minimal stack depth (i.e., the
/// farthest from the top of the stack) that we are dependent on.
/// The idea is that the cache results within are all valid -- so long as
/// none of the nodes in between the current node and the node at that minimum
/// depth result in an error (in which case the cached results are just thrown away).
///
/// During evaluation, we consult this provisional cache and rely on
/// it. Accessing a cached value is considered equivalent to accessing
/// a result at `reached_depth`, so it marks the *current* solution as
/// provisional as well. If an error is encountered, we toss out any
/// provisional results added from the subtree that encountered the
/// error. When we pop the node at `reached_depth` from the stack, we
/// can commit all the things that remain in the provisional cache.
struct ProvisionalEvaluationCache<'tcx> {
/// next "depth first number" to issue -- just a counter
dfn: Cell<usize>,
/// Stores the "coldest" depth (bottom of stack) reached by any of
/// the evaluation entries. The idea here is that all things in the provisional
/// cache are always dependent on *something* that is colder in the stack:
/// therefore, if we add a new entry that is dependent on something *colder still*,
/// we have to modify the depth for all entries at once.
///
/// Example:
///
/// Imagine we have a stack `A B C D E` (with `E` being the top of
/// the stack). We cache something with depth 2, which means that
/// it was dependent on C. Then we pop E but go on and process a
/// new node F: A B C D F. Now F adds something to the cache with
/// depth 1, meaning it is dependent on B. Our original cache
/// entry is also dependent on B, because there is a path from E
/// to C and then from C to F and from F to B.
reached_depth: Cell<usize>,
/// Map from cache key to the provisionally evaluated thing.
/// The cache entries contain the result but also the DFN in which they
/// were added. The DFN is used to clear out values on failure.
///
/// Imagine we have a stack like:
///
/// - `A B C` and we add a cache for the result of C (DFN 2)
/// - Then we have a stack `A B D` where `D` has DFN 3
/// - We try to solve D by evaluating E: `A B D E` (DFN 4)
/// - `E` generates various cache entries which have cyclic dependices on `B`
/// - `A B D E F` and so forth
/// - the DFN of `F` for example would be 5
/// - then we determine that `E` is in error -- we will then clear
/// all cache values whose DFN is >= 4 -- in this case, that
/// means the cached value for `F`.
map: RefCell<FxHashMap<ty::PolyTraitRef<'tcx>, ProvisionalEvaluation>>,
}
/// A cache value for the provisional cache: contains the depth-first
/// number (DFN) and result.
#[derive(Copy, Clone, Debug)]
struct ProvisionalEvaluation {
from_dfn: usize,
result: EvaluationResult,
}
impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
fn default() -> Self {
Self {
dfn: Cell::new(0),
reached_depth: Cell::new(std::usize::MAX),
map: Default::default(),
}
}
}
impl<'tcx> ProvisionalEvaluationCache<'tcx> {
/// Get the next DFN in sequence (basically a counter).
fn next_dfn(&self) -> usize {
let result = self.dfn.get();
self.dfn.set(result + 1);
result
}
/// Check the provisional cache for any result for
/// `fresh_trait_ref`. If there is a hit, then you must consider
/// it an access to the stack slots at depth
/// `self.current_reached_depth()` and above.
fn get_provisional(&self, fresh_trait_ref: ty::PolyTraitRef<'tcx>) -> Option<EvaluationResult> {
debug!(
"get_provisional(fresh_trait_ref={:?}) = {:#?} with reached-depth {}",
fresh_trait_ref,
self.map.borrow().get(&fresh_trait_ref),
self.reached_depth.get(),
);
Some(self.map.borrow().get(&fresh_trait_ref)?.result)
}
/// Current value of the `reached_depth` counter -- all the
/// provisional cache entries are dependent on the item at this
/// depth.
fn current_reached_depth(&self) -> usize {
self.reached_depth.get()
}
/// Insert a provisional result into the cache. The result came
/// from the node with the given DFN. It accessed a minimum depth
/// of `reached_depth` to compute. It evaluated `fresh_trait_ref`
/// and resulted in `result`.
fn insert_provisional(
&self,
from_dfn: usize,
reached_depth: usize,
fresh_trait_ref: ty::PolyTraitRef<'tcx>,
result: EvaluationResult,
) {
debug!(
"insert_provisional(from_dfn={}, reached_depth={}, fresh_trait_ref={:?}, result={:?})",
from_dfn,
reached_depth,
fresh_trait_ref,
result,
);
let r_d = self.reached_depth.get();
self.reached_depth.set(r_d.min(reached_depth));
debug!("insert_provisional: reached_depth={:?}", self.reached_depth.get());
self.map.borrow_mut().insert(fresh_trait_ref, ProvisionalEvaluation { from_dfn, result });
}
/// Invoked when the node with dfn `dfn` does not get a successful
/// result. This will clear out any provisional cache entries
/// that were added since `dfn` was created. This is because the
/// provisional entries are things which must assume that the
/// things on the stack at the time of their creation succeeded --
/// since the failing node is presently at the top of the stack,
/// these provisional entries must either depend on it or some
/// ancestor of it.
fn on_failure(&self, dfn: usize) {
debug!(
"on_failure(dfn={:?})",
dfn,
);
self.map.borrow_mut().retain(|key, eval| {
if !eval.from_dfn >= dfn {
debug!("on_failure: removing {:?}", key);
false
} else {
true
}
});
}
/// Invoked when the node at depth `depth` completed without
/// depending on anything higher in the stack (if that completion
/// was a failure, then `on_failure` should have been invoked
/// already). The callback `op` will be invoked for each
/// provisional entry that we can now confirm.
fn on_completion(
&self,
depth: usize,
mut op: impl FnMut(ty::PolyTraitRef<'tcx>, EvaluationResult),
) {
debug!(
"on_completion(depth={}, reached_depth={})",
depth,
self.reached_depth.get(),
);
if self.reached_depth.get() < depth {
debug!("on_completion: did not yet reach depth to complete");
return;
}
for (fresh_trait_ref, eval) in self.map.borrow_mut().drain() {
debug!(
"on_completion: fresh_trait_ref={:?} eval={:?}",
fresh_trait_ref,
eval,
);
op(fresh_trait_ref, eval.result);
}
self.reached_depth.set(std::usize::MAX);
}
}
#[derive(Copy, Clone)]
struct TraitObligationStackList<'o, 'tcx> {
cache: &'o ProvisionalEvaluationCache<'tcx>,
head: Option<&'o TraitObligationStack<'o, 'tcx>>,
}
impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList { cache, head: None }
}
fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
TraitObligationStackList { cache: r.cache(), head: Some(r) }
}
fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
self.head
}
fn depth(&self) -> usize {
if let Some(head) = self.head {
head.depth
} else {
0
}
}
}
impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
type Item = &'o TraitObligationStack<'o, 'tcx>;
fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
match self.head {
Some(o) => {
*self = o.previous;
Some(o)
}
None => None,
}
}
}
impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "TraitObligationStack({:?})", self.obligation)
}
}
#[derive(Clone, Eq, PartialEq)]
pub struct WithDepNode<T> {
dep_node: DepNodeIndex,
cached_value: T,
}
impl<T: Clone> WithDepNode<T> {
pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self {
WithDepNode {
dep_node,
cached_value,
}
}
pub fn get(&self, tcx: TyCtxt<'_>) -> T {
tcx.dep_graph.read_index(self.dep_node);
self.cached_value.clone()
}
}
| 39.495086 | 100 | 0.552575 |
c17cc2f5cfe621c0f2011e2bb635596b1adeb1e6 | 6,040 | // Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Basic TUI to better output the overall system status and status
//! of various subsystems
use chrono::prelude::Utc;
use cursive::direction::Orientation;
use cursive::theme::BaseColor::{Black, Blue, Cyan, White};
use cursive::theme::Color::Dark;
use cursive::theme::PaletteColor::{
Background, Highlight, HighlightInactive, Primary, Shadow, View,
};
use cursive::theme::{BaseColor, BorderStyle, Color, Theme};
use cursive::traits::Boxable;
use cursive::traits::Identifiable;
use cursive::utils::markup::StyledString;
use cursive::views::{CircularFocus, Dialog, LinearLayout, Panel, StackView, TextView, ViewBox};
use cursive::Cursive;
use std::sync::mpsc;
use crate::built_info;
use crate::servers::Server;
use crate::tui::constants::ROOT_STACK;
use crate::tui::types::{TUIStatusListener, UIMessage};
use crate::tui::{logs, menu, mining, peers, status, version};
use grin_util::logger::LogEntry;
pub struct UI {
cursive: Cursive,
ui_rx: mpsc::Receiver<UIMessage>,
ui_tx: mpsc::Sender<UIMessage>,
controller_tx: mpsc::Sender<ControllerMessage>,
logs_rx: mpsc::Receiver<LogEntry>,
}
fn modify_theme(theme: &mut Theme) {
theme.shadow = false;
theme.borders = BorderStyle::Simple;
theme.palette[Background] = Dark(Black);
theme.palette[Shadow] = Dark(Black);
theme.palette[View] = Dark(Black);
theme.palette[Primary] = Dark(White);
theme.palette[Highlight] = Dark(Cyan);
theme.palette[HighlightInactive] = Dark(Blue);
// also secondary, tertiary, TitlePrimary, TitleSecondary
}
impl UI {
/// Create a new UI
pub fn new(
controller_tx: mpsc::Sender<ControllerMessage>,
logs_rx: mpsc::Receiver<LogEntry>,
) -> UI {
let (ui_tx, ui_rx) = mpsc::channel::<UIMessage>();
let mut grin_ui = UI {
cursive: Cursive::default(),
ui_tx,
ui_rx,
controller_tx,
logs_rx,
};
// Create UI objects, etc
let status_view = status::TUIStatusView::create();
let mining_view = mining::TUIMiningView::create();
let peer_view = peers::TUIPeerView::create();
let logs_view = logs::TUILogsView::create();
let version_view = version::TUIVersionView::create();
let main_menu = menu::create();
let root_stack = StackView::new()
.layer(version_view)
.layer(mining_view)
.layer(peer_view)
.layer(logs_view)
.layer(status_view)
.with_id(ROOT_STACK)
.full_height();
let mut title_string = StyledString::new();
title_string.append(StyledString::styled(
format!(
"Grin Version {} (proto: {})",
built_info::PKG_VERSION,
Server::protocol_version()
),
Color::Dark(BaseColor::Green),
));
let main_layer = LinearLayout::new(Orientation::Vertical)
.child(Panel::new(TextView::new(title_string).full_width()))
.child(
LinearLayout::new(Orientation::Horizontal)
.child(Panel::new(ViewBox::new(main_menu)))
.child(Panel::new(root_stack)),
);
//set theme
let mut theme = grin_ui.cursive.current_theme().clone();
modify_theme(&mut theme);
grin_ui.cursive.set_theme(theme);
grin_ui.cursive.add_fullscreen_layer(main_layer);
// Configure a callback (shutdown, for the first test)
let controller_tx_clone = grin_ui.controller_tx.clone();
grin_ui.cursive.add_global_callback('q', move |c| {
let content = StyledString::styled("Shutting down...", Color::Light(BaseColor::Yellow));
c.add_layer(CircularFocus::wrap_tab(Dialog::around(TextView::new(
content,
))));
controller_tx_clone
.send(ControllerMessage::Shutdown)
.unwrap();
});
grin_ui.cursive.set_fps(3);
grin_ui
}
/// Step the UI by calling into Cursive's step function, then
/// processing any UI messages
pub fn step(&mut self) -> bool {
if !self.cursive.is_running() {
return false;
}
while let Some(message) = self.logs_rx.try_iter().next() {
logs::TUILogsView::update(&mut self.cursive, message);
}
// Process any pending UI messages
while let Some(message) = self.ui_rx.try_iter().next() {
match message {
UIMessage::UpdateStatus(update) => {
status::TUIStatusView::update(&mut self.cursive, &update);
mining::TUIMiningView::update(&mut self.cursive, &update);
peers::TUIPeerView::update(&mut self.cursive, &update);
version::TUIVersionView::update(&mut self.cursive, &update);
}
}
}
// Step the UI
self.cursive.step();
true
}
/// Stop the UI
pub fn stop(&mut self) {
self.cursive.quit();
}
}
pub struct Controller {
rx: mpsc::Receiver<ControllerMessage>,
ui: UI,
}
pub enum ControllerMessage {
Shutdown,
}
impl Controller {
/// Create a new controller
pub fn new(logs_rx: mpsc::Receiver<LogEntry>) -> Result<Controller, String> {
let (tx, rx) = mpsc::channel::<ControllerMessage>();
Ok(Controller {
rx,
ui: UI::new(tx, logs_rx),
})
}
/// Run the controller
pub fn run(&mut self, server: Server) {
let stat_update_interval = 1;
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
while self.ui.step() {
while let Some(message) = self.rx.try_iter().next() {
match message {
ControllerMessage::Shutdown => {
warn!("Shutdown in progress, please wait");
self.ui.stop();
server.stop();
return;
}
}
}
if Utc::now().timestamp() > next_stat_update {
next_stat_update = Utc::now().timestamp() + stat_update_interval;
if let Ok(stats) = server.get_server_stats() {
self.ui.ui_tx.send(UIMessage::UpdateStatus(stats)).unwrap();
}
}
}
server.stop();
}
}
| 28.490566 | 95 | 0.693709 |
228b33219415b5d566cf48621474911124ba1f94 | 5,548 | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//! Core data types.
use std::fmt::{self, Display, Formatter};
use std::hash::{Hash, Hasher};
use std::u64;
use util::codec::bytes;
use util::codec::number::{self, NumberEncoder};
use util::{codec, escape};
use storage::mvcc::{Lock, Write};
/// Value type which is essentially raw bytes.
pub type Value = Vec<u8>;
/// Key-value pair type.
///
/// The value is simply raw bytes; the key is a little bit tricky, which is
/// encoded bytes.
pub type KvPair = (Vec<u8>, Value);
/// `MvccInfo` stores all mvcc information of given key.
/// Used by `MvccGetByKey` and `MvccGetByStartTs`.
#[derive(Debug, Default)]
pub struct MvccInfo {
pub lock: Option<Lock>,
/// commit_ts and write
pub writes: Vec<(u64, Write)>,
/// start_ts and value
pub values: Vec<(u64, Value)>,
}
/// The caller should ensure the key is a timestamped key.
pub fn truncate_ts(key: &[u8]) -> &[u8] {
&key[..key.len() - number::U64_SIZE]
}
/// Key type.
///
/// Keys have 2 types of binary representation - raw and encoded. The raw
/// representation is for public interface, the encoded representation is for
/// internal storage. We can get both representations from an instance of this
/// type.
///
/// Orthogonal to binary representation, keys may or may not embed a timestamp,
/// but this information is transparent to this type, the caller must use it
/// consistently.
#[derive(Debug, Clone)]
pub struct Key(Vec<u8>);
/// Core functions for `Key`.
impl Key {
/// Creates a key from raw bytes.
pub fn from_raw(key: &[u8]) -> Key {
Key(codec::bytes::encode_bytes(key))
}
/// Gets the raw representation of this key.
pub fn raw(&self) -> Result<Vec<u8>, codec::Error> {
bytes::decode_bytes(&mut self.0.as_slice(), false)
}
/// Creates a key from encoded bytes.
pub fn from_encoded(encoded_key: Vec<u8>) -> Key {
Key(encoded_key)
}
/// Gets the encoded representation of this key.
pub fn encoded(&self) -> &Vec<u8> {
&self.0
}
/// Creates a new key by appending a `u64` timestamp to this key.
pub fn append_ts(&self, ts: u64) -> Key {
let mut encoded = self.0.clone();
encoded.encode_u64_desc(ts).unwrap();
Key(encoded)
}
/// Gets the timestamp contained in this key.
///
/// Preconditions: the caller must ensure this is actually a timestamped
/// key.
pub fn decode_ts(&self) -> Result<u64, codec::Error> {
let len = self.0.len();
if len < number::U64_SIZE {
// TODO: IMHO, this should be an assertion failure instead of
// returning an error. If this happens, it indicates a bug in
// the caller module, have to make code change to fix it.
//
// Even if it passed the length check, it still could be buggy,
// a better way is to introduce a type `TimestampedKey`, and
// functions to convert between `TimestampedKey` and `Key`.
// `TimestampedKey` is in a higher (MVCC) layer, while `Key` is
// in the core storage engine layer.
Err(codec::Error::KeyLength)
} else {
let mut ts = &self.0[len - number::U64_SIZE..];
Ok(number::decode_u64_desc(&mut ts)?)
}
}
/// Creates a new key by truncating the timestamp from this key.
///
/// Preconditions: the caller must ensure this is actually a timestamped key.
pub fn truncate_ts(&self) -> Result<Key, codec::Error> {
let len = self.0.len();
if len < number::U64_SIZE {
// TODO: (the same as above)
return Err(codec::Error::KeyLength);
}
Ok(Key::from_encoded(truncate_ts(&self.0).to_vec()))
}
}
/// Hash for `Key`.
impl Hash for Key {
fn hash<H: Hasher>(&self, state: &mut H) {
self.encoded().hash(state)
}
}
/// Display for `Key`.
impl Display for Key {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "{}", escape(&self.0))
}
}
/// Partial equality for `Key`.
impl PartialEq for Key {
fn eq(&self, other: &Key) -> bool {
self.0 == other.0
}
}
/// Creates a new key from raw bytes.
pub fn make_key(k: &[u8]) -> Key {
Key::from_raw(k)
}
/// Splits encoded key on timestamp.
/// Returns the split key and timestamp.
pub fn split_encoded_key_on_ts(key: &[u8]) -> Result<(&[u8], u64), codec::Error> {
if key.len() < number::U64_SIZE {
Err(codec::Error::KeyLength)
} else {
let pos = key.len() - number::U64_SIZE;
let k = &key[..pos];
let mut ts = &key[pos..];
Ok((k, number::decode_u64_desc(&mut ts)?))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_split_ts() {
let k = b"k";
let ts = 123;
assert!(split_encoded_key_on_ts(k).is_err());
let enc = Key::from_encoded(k.to_vec()).append_ts(ts);
let res = split_encoded_key_on_ts(enc.encoded()).unwrap();
assert_eq!(res, (k.as_ref(), ts));
}
}
| 30.483516 | 82 | 0.612833 |
87b3d978098d13c19d1c6d68ee1d4f863cc0bf23 | 1,707 | use std::fmt::{Display, Formatter};
use camp_files::Span;
#[derive(Debug, Hash, Eq, PartialEq)]
pub enum Token {
BeginDelim(TokenBeginDelim),
EndDelim(TokenEndDelim),
Ident(TokenIdent),
Punct(TokenPunct),
Literal(TokenLiteral),
}
impl Token {
pub fn span(&self) -> Span {
match self {
Token::BeginDelim(t) => t.span,
Token::EndDelim(t) => t.span,
Token::Ident(t) => t.span,
Token::Punct(t) => t.span,
Token::Literal(t) => t.span,
}
}
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct TokenBeginDelim {
pub delimiter: TokenDelim,
pub span: Span,
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct TokenEndDelim {
pub delimiter: TokenDelim,
pub span: Span,
}
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum TokenDelim {
Curly,
Sq,
Paren,
}
impl Display for TokenDelim {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
TokenDelim::Curly => write!(f, "brace"),
TokenDelim::Sq => write!(f, "bracket"),
TokenDelim::Paren => write!(f, "parenthesis"),
}
}
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct TokenIdent {
pub ident: String,
pub span: Span,
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct TokenPunct {
pub punct: char,
pub span: Span,
pub trailing_whitespace: bool,
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct TokenLiteral {
pub literal: TokenLiteralKind,
pub span: Span,
}
#[derive(Debug, Hash, Eq, PartialEq)]
pub enum TokenLiteralKind {
Number(String),
Char(char),
Lifetime(String),
StringLit(String),
}
| 21.074074 | 62 | 0.600469 |
90146ff8827cde4327ef04f537fd45d7af808340 | 4,902 | //! Test registration and login
mod common;
use common::*;
use rocket::http::{ContentType, Status};
use rocket::local::blocking::LocalResponse;
#[test]
/// Register new user, handling repeated registration as well.
fn test_register() {
let client = test_client().lock().unwrap();
let response = client
.post("/api/users")
.header(ContentType::JSON)
.body(json_string!({"user": {"username": USERNAME, "email": EMAIL, "password": PASSWORD}}))
.dispatch();
let status = response.status();
// If user was already created we should get an UnprocessableEntity or Ok otherwise.
//
// As tests are ran in an indepent order `login()` probably has already created smoketest user.
// And so we gracefully handle "user already exists" error here.
if status == Status::Ok {
check_user_response(response);
} else if status == Status::UnprocessableEntity {
check_user_validation_errors(response);
} else {
panic!("Got status: {}", status);
}
}
#[test]
/// Registration with the same email must fail
fn test_register_with_duplicated_email() {
let client = test_client().lock().unwrap();
register(&client, "clone", "[email protected]", PASSWORD);
let response = client
.post("/api/users")
.header(ContentType::JSON)
.body(json_string!({
"user": {
"username": "clone_1",
"email": "[email protected]",
"password": PASSWORD,
},
}))
.dispatch();
assert_eq!(response.status(), Status::UnprocessableEntity);
let value = response_json_value(response);
let error = value
.get("errors")
.and_then(|errors| errors.get("email"))
.and_then(|errors| errors.get(0))
.and_then(|error| error.as_str());
assert_eq!(error, Some("has already been taken"));
}
#[test]
/// Login with wrong password must fail.
fn test_incorrect_login() {
let client = test_client().lock().unwrap();
let response = client
.post("/api/users/login")
.header(ContentType::JSON)
.body(json_string!({"user": {"email": EMAIL, "password": "foo"}}))
.dispatch();
assert_eq!(response.status(), Status::UnprocessableEntity);
let value = response_json_value(response);
let login_error = value
.get("errors")
.expect("must have a 'errors' field")
.get("email or password")
.expect("must have 'email or password' errors")
.get(0)
.expect("must have non empty 'email or password' errors")
.as_str();
assert_eq!(login_error, Some("is invalid"));
}
#[test]
/// Try logging checking that access Token is present.
fn test_login() {
let client = test_client().lock().unwrap();
let response = client
.post("/api/users/login")
.header(ContentType::JSON)
.body(json_string!({"user": {"email": EMAIL, "password": PASSWORD}}))
.dispatch();
let value = response_json_value(response);
value
.get("user")
.expect("must have a 'user' field")
.get("token")
.expect("user has token")
.as_str()
.expect("token must be a string");
}
#[test]
/// Check that `/user` endpoint returns expected data.
fn test_get_user() {
let client = test_client().lock().unwrap();
let token = login(&client);
let response = client
.get("/api/user")
.header(token_header(token))
.dispatch();
check_user_response(response);
}
#[test]
/// Test user updating.
fn test_put_user() {
let client = test_client().lock().unwrap();
let token = login(&client);
let response = client
.put("/api/user")
.header(token_header(token))
.header(ContentType::JSON)
.body(json_string!({"user": {"bio": "I'm doing Rust!"}}))
.dispatch();
check_user_response(response);
}
// Utility functions
/// Assert that body contains "user" response with expected fields.
fn check_user_response(response: LocalResponse) {
let value = response_json_value(response);
let user = value.get("user").expect("must have a 'user' field");
assert_eq!(user.get("email").expect("user has email"), EMAIL);
assert_eq!(user.get("username").expect("user has username"), USERNAME);
assert!(user.get("bio").is_some());
assert!(user.get("image").is_some());
assert!(user.get("token").is_some());
}
fn check_user_validation_errors(response: LocalResponse) {
let value = response_json_value(response);
let username_error = value
.get("errors")
.expect("must have a 'errors' field")
.get("username")
.expect("must have 'username' errors")
.get(0)
.expect("must have non-empty 'username' errors")
.as_str();
assert_eq!(username_error, Some("has already been taken"))
}
| 30.07362 | 99 | 0.614035 |
22debe2f046426e7a92a3a98917d2971c018e054 | 1,560 | // Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#[fuchsia::component]
async fn main() {
monotonic::monotonic_examples();
utc::utc_examples().await;
}
mod monotonic {
// [START monotonic]
use fuchsia_zircon as zx;
pub fn monotonic_examples() {
// Read monotonic time.
let monotonic_time = zx::Time::get_monotonic();
println!("The monotonic time is {:?}.", monotonic_time);
}
// [END monotonic]
}
mod utc {
// [START utc]
use fuchsia_async as fasync;
use fuchsia_runtime::duplicate_utc_clock_handle;
use fuchsia_zircon as zx;
pub async fn utc_examples() {
// Obtain a UTC handle.
let utc_clock = duplicate_utc_clock_handle(zx::Rights::SAME_RIGHTS)
.expect("Failed to duplicate UTC clock handle.");
// Wait for the UTC clock to start.
fasync::OnSignals::new(&utc_clock, zx::Signals::CLOCK_STARTED)
.await
.expect("Failed to wait for ZX_CLOCK_STARTED.");
println!("UTC clock is started.");
// Read the UTC clock.
let utc_time = utc_clock.read().expect("Failed to read UTC clock.");
println!("The UTC time is {:?}.", utc_time);
// Read UTC clock details.
let clock_details = utc_clock.get_details().expect("Failed to read UTC clock details.");
println!("The UTC clock's backstop time is {:?}.", clock_details.backstop);
}
// [END utc]
}
| 31.2 | 96 | 0.632692 |
f4b7e4d314e49f59455a2a2d0272a5b9b4952ef3 | 2,065 | use lazy_static::lazy_static;
use onedrive_api::*;
use serde::Deserialize;
#[derive(Debug, Deserialize)]
struct Env {
client_id: String,
client_secret: Option<String>,
redirect_uri: String,
refresh_token: String,
}
async fn login() -> String {
let env: Env = envy::prefixed("ONEDRIVE_API_TEST_").from_env().unwrap();
let auth = Auth::new(
env.client_id,
Permission::new_read().write(true).offline_access(true),
env.redirect_uri,
);
auth.login_with_refresh_token(&env.refresh_token, env.client_secret.as_deref())
.await
.expect("Login failed")
.access_token
}
lazy_static! {
pub static ref TOKEN: tokio::sync::Mutex<Option<String>> = Default::default();
}
pub async fn get_logined_onedrive() -> OneDrive {
let mut guard = TOKEN.lock().await;
let token = match &*guard {
Some(token) => token.clone(),
None => {
let token = login().await;
*guard = Some(token.clone());
token
}
};
OneDrive::new(token, DriveLocation::me())
}
pub fn gen_filename() -> &'static FileName {
use std::sync::atomic::{AtomicU64, Ordering};
// Randomly initialized counter.
lazy_static! {
static ref COUNTER: AtomicU64 = {
use rand::{rngs::StdRng, Rng, SeedableRng};
// Avoid overflow to keep it monotonic.
AtomicU64::new(u64::from(StdRng::from_entropy().gen::<u32>()))
};
}
let id = COUNTER.fetch_add(1, Ordering::SeqCst);
let s = Box::leak(format!("$onedrive_api_tests.{}", id).into_boxed_str());
FileName::new(s).unwrap()
}
pub fn rooted_location(name: &FileName) -> ItemLocation<'static> {
let s = Box::leak(format!("/{}", name.as_str()).into_boxed_str());
ItemLocation::from_path(s).unwrap()
}
pub async fn download(url: &str) -> Vec<u8> {
reqwest::get(url)
.await
.expect("Failed to request for downloading file")
.bytes()
.await
.expect("Failed to download file")
.to_vec()
}
| 27.171053 | 83 | 0.60678 |
9b4d288278a349068bffcdfd2d66ef15c3ce85c3 | 3,324 | // Copyright 2018-2021 Cargill Incorporated
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Provides the "fetch service" operation for the `DieselAdminServiceStore`.
use diesel::prelude::*;
use super::AdminServiceStoreOperations;
use crate::admin::store::{
diesel::{
models::{ServiceArgumentModel, ServiceModel},
schema::{service, service_argument},
},
error::AdminServiceStoreError,
Service, ServiceBuilder, ServiceId,
};
pub(in crate::admin::store::diesel) trait AdminServiceStoreFetchServiceOperation {
fn get_service(
&self,
service_id: &ServiceId,
) -> Result<Option<Service>, AdminServiceStoreError>;
}
impl<'a, C> AdminServiceStoreFetchServiceOperation for AdminServiceStoreOperations<'a, C>
where
C: diesel::Connection,
String: diesel::deserialize::FromSql<diesel::sql_types::Text, C::Backend>,
i64: diesel::deserialize::FromSql<diesel::sql_types::BigInt, C::Backend>,
i32: diesel::deserialize::FromSql<diesel::sql_types::Integer, C::Backend>,
{
fn get_service(
&self,
service_id: &ServiceId,
) -> Result<Option<Service>, AdminServiceStoreError> {
self.conn.transaction::<Option<Service>, _, _>(|| {
// Fetch the `service` entry with the matching `service_id`.
// return None if the `service` does not exist
let service: ServiceModel = match service::table
.filter(service::circuit_id.eq(&service_id.circuit_id))
.filter(service::service_id.eq(&service_id.service_id))
.first::<ServiceModel>(self.conn)
.optional()?
{
Some(service) => service,
None => return Ok(None),
};
// Collect the `service_argument` entries with the associated `circuit_id` found
// in the `service` entry previously fetched and the provided `service_id`.
let arguments: Vec<(String, String)> = service_argument::table
.filter(service_argument::circuit_id.eq(&service_id.circuit_id))
.filter(service_argument::service_id.eq(&service_id.service_id))
.order(service_argument::position)
.load::<ServiceArgumentModel>(self.conn)?
.iter()
.map(|arg| (arg.key.to_string(), arg.value.to_string()))
.collect();
let return_service = ServiceBuilder::new()
.with_service_id(&service.service_id)
.with_service_type(&service.service_type)
.with_arguments(&arguments)
.with_node_id(&service.node_id)
.build()
.map_err(AdminServiceStoreError::InvalidStateError)?;
Ok(Some(return_service))
})
}
}
| 40.048193 | 92 | 0.638688 |
aca517b80c5342b80bb6aa2686c2bfa2b8b432d4 | 3,482 | use std::io::Write;
// Don't want to define the these consts in two files (and have to maintain them), so we'll define them here and also
// write the ones that are used in the runtime code to the autogen file
const WINDOW_SIZE: usize = 16; // This implementation uses a hard-coded window size of 16 bytes.
const TWICE_WINDOW_SIZE: usize = 2 * WINDOW_SIZE;
const WINDOW_MASK: usize = WINDOW_SIZE - 1; // 0x0F
const BITS_PER_BYTE: u8 = 8;
// The rolling hash implementation in the file uses the Rabin fingerprinting method of irreducible polynomials over a
// finited field. The Rabin fingerprint is NOT considered to be cryptographically secure, but it is a fast algorithm
// that can be used to cut a file into chunks without leaking information on the contents of the file.
// The default irreducible polynomial is x^64 + x^4 + x^3 + x + 1. This would normally require 65 bits to store, but in
// this implementation we assume that bit 64 (0 indexed) is set and do not store it. 0x1B == 00011011 which is read as:
// bit 0: constant 1 (always set)
// bit 1: x (set)
// bit 2: x^2 (not set)
// bit 3: x^3 (set)
// bit 4: x^4 (set)
// bit 5: x^5 (not set)
// etc...
// The value for this polynomial is taken from "Table of Low-Weight Binary Irreducible Polynomials" published by Hewlett
// Packard at: https://www.hpl.hp.com/techreports/98/HPL-98-135.pdf
const DEFAULT_IRREDUCIBLE_POLYNOMIAL_64: u64 = 0x1B;
// This is the basis of the Rabin fingerprint, where overflow when shifting results in dividing by a irreducible
// polynomial and using the remainder.
fn shift_left_n_bits_with_mod_64(mut number: u64, n: u8) -> u64 {
for _ in 0..n {
// We will need to mod the irreducible poly if shifting left would leave bit 65 set. Check bit 64 now to see if
// we need to do that
let needs_mod = number & 0x8000000000000000 == 0x8000000000000000;
// Do the shift
number <<= 1;
// In polynomial math of this nature, XOR is equivalent to mod
if needs_mod {
number ^= DEFAULT_IRREDUCIBLE_POLYNOMIAL_64;
}
}
number
}
fn main() {
let out_dir = std::env::var("OUT_DIR").unwrap();
let dest_path = std::path::Path::new(&out_dir).join("static_rolling_hash_autogen.rs");
let mut f = std::fs::File::create(&dest_path).unwrap();
// These consts are also used at runtime
writeln!(f, "const WINDOW_SIZE: usize = {};", WINDOW_SIZE).unwrap();
writeln!(f, "const TWICE_WINDOW_SIZE: usize = {};", TWICE_WINDOW_SIZE).unwrap();
writeln!(f, "const WINDOW_MASK: usize = {};", WINDOW_MASK).unwrap();
writeln!(f, "").unwrap();
// Create the push table by pre-computing what happens to every possible top byte when it gets modded
writeln!(f, "static ROLLING_HASH_PUSH_TABLE: [u64; 256] = [").unwrap();
for i in 0u64..256u64 {
let number = i << 56;
writeln!(f, " {},", shift_left_n_bits_with_mod_64(number, BITS_PER_BYTE)).unwrap();
}
writeln!(f, "];").unwrap();
// Create the pop table by pre computing the same value except we also need to include the size of the window
writeln!(f, "static ROLLING_HASH_POP_TABLE: [u64; 256] = [").unwrap();
for i in 0u64..256u64 {
writeln!(f, " {},", shift_left_n_bits_with_mod_64(i, BITS_PER_BYTE * WINDOW_SIZE as u8)).unwrap();
}
writeln!(f, "];").unwrap();
} | 48.361111 | 120 | 0.659391 |
8a395867b8166bcfef93128fb15ac72183994f42 | 37,001 | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn dedicated_hsm(&self) -> dedicated_hsm::Client {
dedicated_hsm::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
DedicatedHsm_Get(#[from] dedicated_hsm::get::Error),
#[error(transparent)]
DedicatedHsm_CreateOrUpdate(#[from] dedicated_hsm::create_or_update::Error),
#[error(transparent)]
DedicatedHsm_Update(#[from] dedicated_hsm::update::Error),
#[error(transparent)]
DedicatedHsm_Delete(#[from] dedicated_hsm::delete::Error),
#[error(transparent)]
DedicatedHsm_ListByResourceGroup(#[from] dedicated_hsm::list_by_resource_group::Error),
#[error(transparent)]
DedicatedHsm_ListBySubscription(#[from] dedicated_hsm::list_by_subscription::Error),
}
pub mod operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHsmOperationListResult, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.HardwareSecurityModules/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmOperationListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod dedicated_hsm {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
resource_group_name: impl Into<String>,
name: impl Into<String>,
subscription_id: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
name: name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn create_or_update(
&self,
resource_group_name: impl Into<String>,
name: impl Into<String>,
parameters: impl Into<models::DedicatedHsm>,
subscription_id: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
name: name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn update(
&self,
resource_group_name: impl Into<String>,
name: impl Into<String>,
parameters: impl Into<models::DedicatedHsmPatchParameters>,
subscription_id: impl Into<String>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
name: name.into(),
parameters: parameters.into(),
subscription_id: subscription_id.into(),
}
}
pub fn delete(
&self,
resource_group_name: impl Into<String>,
name: impl Into<String>,
subscription_id: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
name: name.into(),
subscription_id: subscription_id.into(),
}
}
pub fn list_by_resource_group(
&self,
resource_group_name: impl Into<String>,
subscription_id: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
resource_group_name: resource_group_name.into(),
subscription_id: subscription_id.into(),
top: None,
}
}
pub fn list_by_subscription(&self, subscription_id: impl Into<String>) -> list_by_subscription::Builder {
list_by_subscription::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
top: None,
}
}
}
pub mod get {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHsm, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.HardwareSecurityModules/dedicatedHSMs/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
#[derive(Debug)]
pub enum Response {
Created201(models::DedicatedHsm),
Ok200(models::DedicatedHsm),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) name: String,
pub(crate) parameters: models::DedicatedHsm,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.HardwareSecurityModules/dedicatedHSMs/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod update {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) name: String,
pub(crate) parameters: models::DedicatedHsmPatchParameters,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHsm, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.HardwareSecurityModules/dedicatedHSMs/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&self.parameters).map_err(Error::Serialize)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsm =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::models;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) name: String,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.HardwareSecurityModules/dedicatedHSMs/{}",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name,
&self.name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_resource_group {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) resource_group_name: String,
pub(crate) subscription_id: String,
pub(crate) top: Option<i32>,
}
impl Builder {
pub fn top(mut self, top: i32) -> Self {
self.top = Some(top);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHsmListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.HardwareSecurityModules/dedicatedHSMs",
self.client.endpoint(),
&self.subscription_id,
&self.resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
if let Some(top) = &self.top {
url.query_pairs_mut().append_pair("$top", &top.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_by_subscription {
use super::models;
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::DedicatedHsmError,
},
#[error("Failed to parse request URL")]
ParseUrl(#[source] url::ParseError),
#[error("Failed to build request")]
BuildRequest(#[source] http::Error),
#[error("Failed to serialize request body")]
Serialize(#[source] serde_json::Error),
#[error("Failed to get access token")]
GetToken(#[source] azure_core::Error),
#[error("Failed to execute request")]
SendRequest(#[source] azure_core::error::Error),
#[error("Failed to get response bytes")]
ResponseBytes(#[source] azure_core::error::Error),
#[error("Failed to deserialize response, body: {1:?}")]
Deserialize(#[source] serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) top: Option<i32>,
}
impl Builder {
pub fn top(mut self, top: i32) -> Self {
self.top = Some(top);
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::DedicatedHsmListResult, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.HardwareSecurityModules/dedicatedHSMs",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2018-10-31-preview");
if let Some(top) = &self.top {
url.query_pairs_mut().append_pair("$top", &top.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmListResult =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::DedicatedHsmError =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
| 49.138114 | 137 | 0.520148 |
1ab2ce46fe1e55c6aee40e0ba467a4559dbb634d | 6,225 | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::AppInfo;
use crate::File;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::glib_wrapper! {
pub struct AppLaunchContext(Object<ffi::GAppLaunchContext, ffi::GAppLaunchContextClass>);
match fn {
get_type => || ffi::g_app_launch_context_get_type(),
}
}
impl AppLaunchContext {
#[doc(alias = "g_app_launch_context_new")]
pub fn new() -> AppLaunchContext {
unsafe { from_glib_full(ffi::g_app_launch_context_new()) }
}
}
impl Default for AppLaunchContext {
fn default() -> Self {
Self::new()
}
}
pub const NONE_APP_LAUNCH_CONTEXT: Option<&AppLaunchContext> = None;
pub trait AppLaunchContextExt: 'static {
#[doc(alias = "g_app_launch_context_get_display")]
fn get_display<P: IsA<AppInfo>>(&self, info: &P, files: &[File]) -> Option<glib::GString>;
#[doc(alias = "g_app_launch_context_get_environment")]
fn get_environment(&self) -> Vec<std::ffi::OsString>;
#[doc(alias = "g_app_launch_context_get_startup_notify_id")]
fn get_startup_notify_id<P: IsA<AppInfo>>(
&self,
info: &P,
files: &[File],
) -> Option<glib::GString>;
#[doc(alias = "g_app_launch_context_launch_failed")]
fn launch_failed(&self, startup_notify_id: &str);
#[doc(alias = "g_app_launch_context_setenv")]
fn setenv<P: AsRef<std::ffi::OsStr>, Q: AsRef<std::ffi::OsStr>>(&self, variable: P, value: Q);
#[doc(alias = "g_app_launch_context_unsetenv")]
fn unsetenv<P: AsRef<std::ffi::OsStr>>(&self, variable: P);
fn connect_launch_failed<F: Fn(&Self, &str) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_launched<F: Fn(&Self, &AppInfo, &glib::Variant) + 'static>(
&self,
f: F,
) -> SignalHandlerId;
}
impl<O: IsA<AppLaunchContext>> AppLaunchContextExt for O {
fn get_display<P: IsA<AppInfo>>(&self, info: &P, files: &[File]) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::g_app_launch_context_get_display(
self.as_ref().to_glib_none().0,
info.as_ref().to_glib_none().0,
files.to_glib_none().0,
))
}
}
fn get_environment(&self) -> Vec<std::ffi::OsString> {
unsafe {
FromGlibPtrContainer::from_glib_full(ffi::g_app_launch_context_get_environment(
self.as_ref().to_glib_none().0,
))
}
}
fn get_startup_notify_id<P: IsA<AppInfo>>(
&self,
info: &P,
files: &[File],
) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::g_app_launch_context_get_startup_notify_id(
self.as_ref().to_glib_none().0,
info.as_ref().to_glib_none().0,
files.to_glib_none().0,
))
}
}
fn launch_failed(&self, startup_notify_id: &str) {
unsafe {
ffi::g_app_launch_context_launch_failed(
self.as_ref().to_glib_none().0,
startup_notify_id.to_glib_none().0,
);
}
}
fn setenv<P: AsRef<std::ffi::OsStr>, Q: AsRef<std::ffi::OsStr>>(&self, variable: P, value: Q) {
unsafe {
ffi::g_app_launch_context_setenv(
self.as_ref().to_glib_none().0,
variable.as_ref().to_glib_none().0,
value.as_ref().to_glib_none().0,
);
}
}
fn unsetenv<P: AsRef<std::ffi::OsStr>>(&self, variable: P) {
unsafe {
ffi::g_app_launch_context_unsetenv(
self.as_ref().to_glib_none().0,
variable.as_ref().to_glib_none().0,
);
}
}
fn connect_launch_failed<F: Fn(&Self, &str) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn launch_failed_trampoline<P, F: Fn(&P, &str) + 'static>(
this: *mut ffi::GAppLaunchContext,
startup_notify_id: *mut libc::c_char,
f: glib::ffi::gpointer,
) where
P: IsA<AppLaunchContext>,
{
let f: &F = &*(f as *const F);
f(
&AppLaunchContext::from_glib_borrow(this).unsafe_cast_ref(),
&glib::GString::from_glib_borrow(startup_notify_id),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"launch-failed\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
launch_failed_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
fn connect_launched<F: Fn(&Self, &AppInfo, &glib::Variant) + 'static>(
&self,
f: F,
) -> SignalHandlerId {
unsafe extern "C" fn launched_trampoline<P, F: Fn(&P, &AppInfo, &glib::Variant) + 'static>(
this: *mut ffi::GAppLaunchContext,
info: *mut ffi::GAppInfo,
platform_data: *mut glib::ffi::GVariant,
f: glib::ffi::gpointer,
) where
P: IsA<AppLaunchContext>,
{
let f: &F = &*(f as *const F);
f(
&AppLaunchContext::from_glib_borrow(this).unsafe_cast_ref(),
&from_glib_borrow(info),
&from_glib_borrow(platform_data),
)
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"launched\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
launched_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for AppLaunchContext {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("AppLaunchContext")
}
}
| 31.760204 | 99 | 0.551807 |
90573ec9ff0cbea9929e472c95e4d2a07b46f40b | 954 | //! Example to demonstrate how to remove the default ICC profile
//! Look at the file size (compared to the other tests!)
extern crate printpdf;
use printpdf::*;
use std::fs::File;
use std::io::BufWriter;
fn main() {
// This code creates the most minimal PDF file with 1.2 KB
// Currently, fonts need to use an embedded font, so if you need to write something, the file size
// will still be bloated (because of the embedded font)
// Also, OCG content is still enabled, even if you disable it here.
let mut doc = PdfDocument::new("printpdf no_icc test");
let mut page = PdfPage::new(Mm(297.0), Mm(210.0));
let layer = PdfLayer::new("Layer 1");
doc.set_conformance(PdfConformance::Custom(CustomPdfConformance {
requires_icc_profile: false,
requires_xmp_metadata: false,
..Default::default()
}));
page.add_layer(layer);
doc.add_page(page);
doc.save(&mut BufWriter::new(
File::create("test_no_icc.pdf").unwrap(),
))
.unwrap();
}
| 28.909091 | 99 | 0.71174 |
50078651c29fd0b61a344de4b76fa175af402ed0 | 9,233 | #[derive(Debug, Clone, PartialEq)]
pub enum Operator {
/// ADD `+`
ADD,
/// SUB `-`
SUB,
/// MUL `*`
MUL,
/// DIV `/`
DIV,
/// REM `%`
REM,
/// AND `&&`
AND,
/// OR `||`
OR,
/// NOT `!`
NOT,
/// EQ `==`
EQ,
/// NEQ `!=`
NEQ,
/// LT `<`
LT,
/// GT `>`
GT,
/// LE `<=`
LE,
/// GE `>=`
GE,
}
#[derive(Debug, Clone, PartialEq)]
pub enum Expression {
/// `Number(f64)` stores a `f64` number
Number(f64),
/// `Unary` stores unary operation, like `!` or `-`
Unary(Operator, Box<Expression>),
/// `Binary` stores binary operation, like `+` `-` `*` `/` and so on
Binary(Box<Expression>, Operator, Box<Expression>),
/// `Ternary` stores question mark expression: `cond ? true-expr : false-expr`
Ternary(Box<Expression>, Box<Expression>, Box<Expression>),
/// `Variable` stores variable, not implemented yet
Variable(String),
/// `Function` stores function, and will used to call f64 function of Rust
Function(String, Vec<Box<Expression>>),
}
impl Expression {
pub fn eval(&self) -> f64 {
match self {
Expression::Number(x) => *x,
Expression::Unary(Operator::SUB, expr) => -expr.eval(),
Expression::Unary(Operator::NOT, expr) => {
if expr.eval() != 0.0 {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::ADD, rexpr) => lexpr.eval() + rexpr.eval(),
Expression::Binary(lexpr, Operator::SUB, rexpr) => lexpr.eval() - rexpr.eval(),
Expression::Binary(lexpr, Operator::MUL, rexpr) => lexpr.eval() * rexpr.eval(),
Expression::Binary(lexpr, Operator::DIV, rexpr) => lexpr.eval() / rexpr.eval(),
Expression::Binary(lexpr, Operator::REM, rexpr) => lexpr.eval() % rexpr.eval(),
Expression::Binary(lexpr, Operator::AND, rexpr) => {
if lexpr.eval() != 0.0 && rexpr.eval() != 0.0 {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::OR, rexpr) => {
if lexpr.eval() != 0.0 || rexpr.eval() != 0.0 {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::EQ, rexpr) => {
if lexpr.eval() == rexpr.eval() {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::NEQ, rexpr) => {
if lexpr.eval() != rexpr.eval() {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::LT, rexpr) => {
if lexpr.eval() < rexpr.eval() {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::LE, rexpr) => {
if lexpr.eval() <= rexpr.eval() {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::GT, rexpr) => {
if lexpr.eval() > rexpr.eval() {
1.0
} else {
0.0
}
}
Expression::Binary(lexpr, Operator::GE, rexpr) => {
if lexpr.eval() >= rexpr.eval() {
1.0
} else {
0.0
}
}
Expression::Ternary(cond, texpr, fexpr) => {
if cond.eval() != 0.0 {
texpr.eval()
} else {
fexpr.eval()
}
}
Expression::Variable(_) => 0.0,
Expression::Function(fun, exprs) => match fun.as_str() {
"floor" => f64::floor(exprs[0].eval()),
"ceil" => f64::ceil(exprs[0].eval()),
"round" => f64::round(exprs[0].eval()),
"trunc" => f64::trunc(exprs[0].eval()),
"fract" => f64::fract(exprs[0].eval()),
"abs" => f64::abs(exprs[0].eval()),
"signum" => f64::signum(exprs[0].eval()),
"mul_add" => f64::mul_add(exprs[0].eval(), exprs[1].eval(), exprs[2].eval()),
"div_euclid" => f64::div_euclid(exprs[0].eval(), exprs[1].eval()),
"rem_euclid" => f64::rem_euclid(exprs[0].eval(), exprs[1].eval()),
"powf" => f64::powf(exprs[0].eval(), exprs[1].eval()),
"sqrt" => f64::sqrt(exprs[0].eval()),
"exp" => f64::exp(exprs[0].eval()),
"exp2" => f64::exp2(exprs[0].eval()),
"ln" => f64::ln(exprs[0].eval()),
"log" => f64::log(exprs[0].eval(), exprs[1].eval()),
"log2" => f64::log2(exprs[0].eval()),
"log10" => f64::log10(exprs[0].eval()),
"cbrt" => f64::cbrt(exprs[0].eval()),
"hypot" => f64::hypot(exprs[0].eval(), exprs[1].eval()),
"sin" => f64::sin(exprs[0].eval()),
"cos" => f64::cos(exprs[0].eval()),
"tan" => f64::tan(exprs[0].eval()),
"asin" => f64::asin(exprs[0].eval()),
"acos" => f64::acos(exprs[0].eval()),
"atan" => f64::atan(exprs[0].eval()),
"atan2" => f64::atan2(exprs[0].eval(), exprs[1].eval()),
"exp_m1" => f64::exp_m1(exprs[0].eval()),
"ln_1p" => f64::ln_1p(exprs[0].eval()),
"sinh" => f64::sinh(exprs[0].eval()),
"cosh" => f64::cosh(exprs[0].eval()),
"tanh" => f64::tanh(exprs[0].eval()),
"asinh" => f64::asinh(exprs[0].eval()),
"acosh" => f64::acosh(exprs[0].eval()),
"atanh" => f64::atanh(exprs[0].eval()),
_ => panic!("Not support function"),
},
_ => 0.0,
}
}
}
#[cfg(test)]
mod test {
use crate::{ast::*, calculator};
#[test]
fn test_factor() {
assert_eq!(
calculator::FactorParser::new()
.parse("2.5e10")
.unwrap()
.eval(),
2.5e10
);
assert_eq!(
calculator::FactorParser::new()
.parse("!2.5e10")
.unwrap()
.eval(),
1.0
);
assert_eq!(
calculator::FactorParser::new().parse("abc").unwrap(),
Box::new(Expression::Variable("abc".to_string()))
);
assert_eq!(
calculator::FactorParser::new().parse("abc(1, 2)").unwrap(),
Box::new(Expression::Function(
"abc".to_string(),
vec![
Box::new(Expression::Number(1.0)),
Box::new(Expression::Number(2.0))
]
))
);
assert_eq!(
calculator::FactorParser::new()
.parse("abs(-1)")
.unwrap()
.eval(),
1.0
)
}
#[test]
fn test_multiplicative() {
assert_eq!(
calculator::MultiplicativeParser::new()
.parse("1 * 2")
.unwrap()
.eval(),
2.0
)
}
#[test]
fn test_additive() {
assert_eq!(
calculator::AdditiveParser::new()
.parse("1 * 2 + 3")
.unwrap()
.eval(),
5.0
)
}
#[test]
fn test_relational() {
assert_eq!(
calculator::RelationalParser::new()
.parse("4 < 1 * 2 + 3")
.unwrap()
.eval(),
1.0
)
}
#[test]
fn test_equality() {
assert_eq!(
calculator::EqualityParser::new()
.parse("1 * 2 + 3 == 5")
.unwrap()
.eval(),
1.0
)
}
#[test]
fn test_logic_and() {
assert_eq!(
calculator::LogicAndParser::new()
.parse("1 * 2 + 3 && 4")
.unwrap()
.eval(),
1.0
)
}
#[test]
fn test_logic_or() {
assert_eq!(
calculator::LogicOrParser::new()
.parse("1 * 2 + 3 || 0")
.unwrap()
.eval(),
1.0
)
}
#[test]
fn test_conditional() {
assert_eq!(
calculator::ConditionalParser::new()
.parse("1 ? 2 : 3")
.unwrap()
.eval(),
2.0
)
}
#[test]
fn test_expression() {
assert_eq!(
calculator::ExprParser::new()
.parse("1 * 2 + 3 == 5 ? (6 < 4 || 7 >= 7) : (8 != 8 && 9 >= 10)")
.unwrap()
.eval(),
1.0
)
}
}
| 30.371711 | 93 | 0.389256 |
de600ee68caad2fbb127ba6629f4655023c994d3 | 5,879 | use NVML;
use error::{nvml_try, Result};
use ffi::bindings::*;
use std::io;
use std::io::Write;
use std::marker::PhantomData;
use std::mem;
use struct_wrappers::event::EventData;
/**
Handle to a set of events.
**Operations on a set are not thread-safe.** It does not, therefore, implement `Sync`.
You can get yourself an `EventSet` via `NVML.create_event_set()`. Once again, Rust's
lifetimes will ensure that it does not outlive the `NVML` instance that it was created
from.
*/
// Checked against local
#[derive(Debug)]
pub struct EventSet<'nvml> {
set: nvmlEventSet_t,
_phantom: PhantomData<&'nvml NVML>
}
unsafe impl<'nvml> Send for EventSet<'nvml> {}
impl<'nvml> From<nvmlEventSet_t> for EventSet<'nvml> {
fn from(set: nvmlEventSet_t) -> Self {
EventSet {
set,
_phantom: PhantomData
}
}
}
impl<'nvml> EventSet<'nvml> {
/**
Use this to release the set's events if you care about handling
potential errors (*the `Drop` implementation ignores errors!*).
# Errors
* `Uninitialized`, if the library has not been successfully initialized
* `Unknown`, on any unexpected error
*/
// Checked against local
#[inline]
pub fn release_events(self) -> Result<()> {
unsafe {
nvml_try(nvmlEventSetFree(self.set))?;
}
Ok(mem::forget(self))
}
/**
Waits on events for the given timeout (in ms) and delivers one when it arrives.
See the `high_level::event_loop` module for an abstracted version of this.
This method returns immediately if an event is ready to be delivered when it
is called. If no events are ready it will sleep until an event arrives, but
not longer than the specified timeout. In certain conditions, this method
could return before the timeout passes (e.g. when an interrupt arrives).
In the case of an XID error, the function returns the most recent XID error
type seen by the system. If there are multiple XID errors generated before
this method is called, the last seen XID error type will be returned for
all XID error events.
# Errors
* `Uninitialized`, if the library has not been successfully initialized
* `Timeout`, if no event arrived in the specified timeout or an interrupt
arrived
* `GpuLost`, if a GPU has fallen off the bus or is otherwise inaccessible
* `Unknown`, on any unexpected error
# Device Support
Supports Fermi and newer fully supported devices.
*/
// Checked against local
#[inline]
pub fn wait(&self, timeout_ms: u32) -> Result<EventData<'nvml>> {
unsafe {
let mut data: nvmlEventData_t = mem::zeroed();
nvml_try(nvmlEventSetWait(self.set, &mut data, timeout_ms))?;
Ok(data.into())
}
}
/// Consume the struct and obtain the raw set handle that it contains.
#[inline]
pub fn into_raw(self) -> nvmlEventSet_t {
let set = self.set;
mem::forget(self);
set
}
/// Obtain a reference to the raw set handle contained in the struct.
#[inline]
pub fn as_raw(&self) -> &nvmlEventSet_t {
&(self.set)
}
/// Obtain a mutable reference to the raw set handle contained in the
/// struct.
#[inline]
pub fn as_mut_raw(&mut self) -> &mut nvmlEventSet_t {
&mut (self.set)
}
#[inline]
/// Sometimes necessary for C interop. Use carefully.
pub unsafe fn unsafe_raw(&self) -> nvmlEventSet_t {
self.set
}
}
/// This `Drop` implementation ignores errors! Use the `.release_events()`
/// method on the `EventSet` struct if you care about handling them.
impl<'nvml> Drop for EventSet<'nvml> {
fn drop(&mut self) {
#[allow(unused_must_use)]
unsafe {
match nvml_try(nvmlEventSetFree(self.set)) {
Ok(()) => (),
Err(e) => {
io::stderr().write(
format!(
"WARNING: Error returned by `nvmlEventSetFree()` in Drop \
implementation: {:?}",
e
).as_bytes()
);
},
}
}
}
}
#[cfg(test)]
mod test {
use super::EventSet;
#[cfg(target_os = "linux")]
use bitmasks::event::*;
use test_utils::*;
// Ensuring that double-free issues don't crop up here.
#[test]
fn into_raw() {
let nvml = nvml();
let raw;
{
let set = nvml.create_event_set().expect("set");
raw = set.into_raw();
}
EventSet::from(raw);
}
#[cfg(target_os = "linux")]
#[test]
fn release_events() {
let nvml = nvml();
test_with_device(3, &nvml, |device| {
let set = nvml.create_event_set()?;
let set = device.register_events(
EventTypes::PSTATE_CHANGE |
EventTypes::CRITICAL_XID_ERROR |
EventTypes::CLOCK_CHANGE,
set
)?;
set.release_events()
})
}
#[cfg(target_os = "linux")]
#[cfg(feature = "test-local")]
#[test]
fn wait() {
use error::*;
let nvml = nvml();
let device = device(&nvml);
let set = nvml.create_event_set().expect("event set");
let set = device.register_events(
EventTypes::PSTATE_CHANGE |
EventTypes::CRITICAL_XID_ERROR |
EventTypes::CLOCK_CHANGE,
set
).expect("registration");
let data = match set.wait(10_000) {
Err(Error(ErrorKind::Timeout, _)) => return (),
Ok(d) => d,
_ => panic!("An error other than `Timeout` occurred"),
};
print!("{:?} ...", data);
}
}
| 28.264423 | 86 | 0.576969 |
db66240d2a51e00815346865668c81e2239be38d | 9,411 | use config::{parse_key, parse_mac, ConfigMount, ConfigMountFsType, ConfigMountOptions};
use rcore_fs_mountfs::MNode;
use std::convert::TryFrom;
use std::path::PathBuf;
use std::sync::Once;
use util::host_file_util::{write_host_file, HostFile};
use util::mem_util::from_user;
use super::rootfs::{mount_nonroot_fs_according_to, open_root_fs_according_to, umount_nonroot_fs};
use super::*;
lazy_static! {
static ref MOUNT_ONCE: Once = Once::new();
}
pub fn do_mount_rootfs(
user_config: &config::Config,
user_key: &Option<sgx_key_128bit_t>,
) -> Result<()> {
debug!("mount rootfs");
if MOUNT_ONCE.is_completed() {
return_errno!(EPERM, "rootfs cannot be mounted more than once");
}
let new_rootfs = open_root_fs_according_to(&user_config.mount, user_key)?;
mount_nonroot_fs_according_to(&new_rootfs.root_inode(), &user_config.mount, user_key, true)?;
MOUNT_ONCE.call_once(|| {
let mut rootfs = ROOT_FS.write().unwrap();
rootfs.sync().expect("failed to sync old rootfs");
*rootfs = new_rootfs;
*ENTRY_POINTS.write().unwrap() = user_config.entry_points.to_owned();
});
// Write resolv.conf file into mounted file system
write_host_file(HostFile::RESOLV_CONF)?;
*RESOLV_CONF_STR.write().unwrap() = None;
// Write hostname file into mounted file system
write_host_file(HostFile::HOSTNAME)?;
*HOSTNAME_STR.write().unwrap() = None;
// Write hosts file into mounted file system
write_host_file(HostFile::HOSTS)?;
*HOSTS_STR.write().unwrap() = None;
Ok(())
}
pub fn do_mount(
source: &str,
target: &str,
flags: MountFlags,
options: MountOptions,
) -> Result<()> {
debug!(
"mount: source: {}, target: {}, flags: {:?}, options: {:?}",
source, target, flags, options
);
let target = if target == "/" {
return_errno!(EPERM, "can not mount on root");
} else {
let fs_path = FsPath::try_from(target)?;
let thread = current!();
let fs = thread.fs().read().unwrap();
PathBuf::from(fs.convert_fspath_to_abs(&fs_path)?)
};
if flags.contains(MountFlags::MS_REMOUNT)
|| flags.contains(MountFlags::MS_BIND)
|| flags.contains(MountFlags::MS_SHARED)
|| flags.contains(MountFlags::MS_PRIVATE)
|| flags.contains(MountFlags::MS_SLAVE)
|| flags.contains(MountFlags::MS_UNBINDABLE)
|| flags.contains(MountFlags::MS_MOVE)
{
return_errno!(EINVAL, "Only support to create a new mount");
}
let (mount_configs, user_key) = match options {
MountOptions::UnionFS(unionfs_options) => {
let mc = {
let image_mc = ConfigMount {
type_: ConfigMountFsType::TYPE_SEFS,
target: target.clone(),
source: Some(unionfs_options.lower_dir.clone()),
options: Default::default(),
};
let container_mc = ConfigMount {
type_: ConfigMountFsType::TYPE_SEFS,
target: target.clone(),
source: Some(unionfs_options.upper_dir.clone()),
options: Default::default(),
};
ConfigMount {
type_: ConfigMountFsType::TYPE_UNIONFS,
target,
source: None,
options: ConfigMountOptions {
layers: Some(vec![image_mc, container_mc]),
..Default::default()
},
}
};
(vec![mc], unionfs_options.key)
}
MountOptions::SEFS(sefs_options) => {
let mc = ConfigMount {
type_: ConfigMountFsType::TYPE_SEFS,
target,
source: Some(sefs_options.dir.clone()),
options: ConfigMountOptions {
mac: sefs_options.mac,
..Default::default()
},
};
(vec![mc], sefs_options.key)
}
MountOptions::HostFS(dir) => {
let mc = ConfigMount {
type_: ConfigMountFsType::TYPE_HOSTFS,
target,
source: Some(dir.clone()),
options: Default::default(),
};
(vec![mc], None)
}
MountOptions::RamFS => {
let mc = ConfigMount {
type_: ConfigMountFsType::TYPE_RAMFS,
target,
source: None,
options: Default::default(),
};
(vec![mc], None)
}
};
let mut rootfs = ROOT_FS.write().unwrap();
// Should we sync the fs before mount?
rootfs.sync()?;
let follow_symlink = !flags.contains(MountFlags::MS_NOSYMFOLLOW);
mount_nonroot_fs_according_to(
&rootfs.root_inode(),
&mount_configs,
&user_key,
follow_symlink,
)?;
Ok(())
}
pub fn do_umount(target: &str, flags: UmountFlags) -> Result<()> {
debug!("umount: target: {}, flags: {:?}", target, flags);
let target = if target == "/" {
return_errno!(EPERM, "cannot umount rootfs");
} else {
let fs_path = FsPath::try_from(target)?;
let thread = current!();
let fs = thread.fs().read().unwrap();
fs.convert_fspath_to_abs(&fs_path)?
};
let mut rootfs = ROOT_FS.write().unwrap();
// Should we sync the fs before umount?
rootfs.sync()?;
let follow_symlink = !flags.contains(UmountFlags::UMOUNT_NOFOLLOW);
umount_nonroot_fs(&rootfs.root_inode(), &target, follow_symlink)?;
Ok(())
}
#[derive(Debug)]
pub enum MountOptions {
UnionFS(UnionFSMountOptions),
SEFS(SEFSMountOptions),
HostFS(PathBuf),
RamFS,
}
impl MountOptions {
pub fn from_fs_type_and_options(type_: &ConfigMountFsType, options: *const i8) -> Result<Self> {
Ok(match type_ {
ConfigMountFsType::TYPE_SEFS => {
let sefs_mount_options = {
let options = from_user::clone_cstring_safely(options)?
.to_string_lossy()
.into_owned();
SEFSMountOptions::from_input(options.as_str())?
};
Self::SEFS(sefs_mount_options)
}
ConfigMountFsType::TYPE_UNIONFS => {
let unionfs_mount_options = {
let options = from_user::clone_cstring_safely(options)?
.to_string_lossy()
.into_owned();
UnionFSMountOptions::from_input(options.as_str())?
};
Self::UnionFS(unionfs_mount_options)
}
ConfigMountFsType::TYPE_HOSTFS => {
let options = from_user::clone_cstring_safely(options)?
.to_string_lossy()
.into_owned();
let dir = {
let options: Vec<&str> = options.split(",").collect();
let dir = options
.iter()
.find_map(|s| s.strip_prefix("dir="))
.ok_or_else(|| errno!(EINVAL, "no dir options"))?;
PathBuf::from(dir)
};
Self::HostFS(dir)
}
ConfigMountFsType::TYPE_RAMFS => Self::RamFS,
_ => {
return_errno!(EINVAL, "unsupported fs type");
}
})
}
}
#[derive(Debug)]
pub struct UnionFSMountOptions {
lower_dir: PathBuf,
upper_dir: PathBuf,
key: Option<sgx_key_128bit_t>,
}
impl UnionFSMountOptions {
pub fn from_input(input: &str) -> Result<Self> {
let options: Vec<&str> = input.split(",").collect();
let lower_dir = options
.iter()
.find_map(|s| s.strip_prefix("lowerdir="))
.ok_or_else(|| errno!(EINVAL, "no lowerdir options"))?;
let upper_dir = options
.iter()
.find_map(|s| s.strip_prefix("upperdir="))
.ok_or_else(|| errno!(EINVAL, "no upperdir options"))?;
let key = match options.iter().find_map(|s| s.strip_prefix("key=")) {
Some(key_str) => Some(parse_key(key_str)?),
None => None,
};
Ok(Self {
lower_dir: PathBuf::from(lower_dir),
upper_dir: PathBuf::from(upper_dir),
key,
})
}
}
#[derive(Debug)]
pub struct SEFSMountOptions {
dir: PathBuf,
key: Option<sgx_key_128bit_t>,
mac: Option<sgx_aes_gcm_128bit_tag_t>,
}
impl SEFSMountOptions {
pub fn from_input(input: &str) -> Result<Self> {
let options: Vec<&str> = input.split(",").collect();
let dir = options
.iter()
.find_map(|s| s.strip_prefix("dir="))
.ok_or_else(|| errno!(EINVAL, "no dir options"))?;
let key = match options.iter().find_map(|s| s.strip_prefix("key=")) {
Some(key_str) => Some(parse_key(key_str)?),
None => None,
};
let mac = match options.iter().find_map(|s| s.strip_prefix("mac=")) {
Some(mac_str) => Some(parse_mac(mac_str)?),
None => None,
};
Ok(Self {
dir: PathBuf::from(dir),
key,
mac,
})
}
}
| 32.790941 | 100 | 0.540113 |
ede9202c7d2cf5181a7b2bdff72389a98ba7e970 | 7,589 | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! The Archivist collects and stores diagnostic data from components.
#![warn(missing_docs)]
use {
anyhow::{Context, Error},
archivist_lib::{
archivist, configs, diagnostics, events::sources::LogConnectorEventSource, logs,
},
argh::FromArgs,
fdio::service_connect,
fidl_fuchsia_sys2::EventSourceMarker,
fidl_fuchsia_sys_internal::{ComponentEventProviderMarker, LogConnectorMarker},
fuchsia_async::{self as fasync, LocalExecutor, SendExecutor},
fuchsia_component::client::connect_to_protocol,
fuchsia_component::server::MissingStartupHandle,
fuchsia_syslog, fuchsia_zircon as zx,
std::path::PathBuf,
tracing::{debug, error, info, warn},
};
/// Monitor, collect, and store diagnostics from components.
// TODO(fxbug.dev/67983) make flags positive rather than negative
#[derive(Debug, Default, FromArgs)]
pub struct Args {
/// disables proxying kernel logger
#[argh(switch)]
disable_klog: bool,
/// disables log connector so that indivisual instances of
/// observer don't compete for log connector listener.
#[argh(switch)]
disable_log_connector: bool,
/// whether to connecto to event source or not. This can be set to true when the archivist won't
/// consume events from the Component Framework v2 to remove log spam.
#[argh(switch)]
disable_event_source: bool,
/// whether to connecto to the component event provider or not. This can be set to true when the
/// archivist won't consume events from the Component Framework v1 to remove log spam.
#[argh(switch)]
disable_component_event_provider: bool,
// TODO(fxbug.dev/72046) delete when netemul no longer using
/// initializes syslog library with a log socket to itself
#[argh(switch)]
consume_own_logs: bool,
/// initializes logging to debuglog via fuchsia.boot.WriteOnlyLog
#[argh(switch)]
log_to_debuglog: bool,
/// serve fuchsia.diagnostics.test.Controller
#[argh(switch)]
install_controller: bool,
/// retrieve a fuchsia.process.Lifecycle handle from the runtime and listen to shutdown events
#[argh(switch)]
listen_to_lifecycle: bool,
/// path to a JSON configuration file
#[argh(option)]
config_path: PathBuf,
/// path to additional configuration for services to connect to
#[argh(option)]
service_config_path: Option<PathBuf>,
}
fn main() -> Result<(), Error> {
let opt: Args = argh::from_env();
let log_server = init_diagnostics(&opt).context("initializing diagnostics")?;
let config = configs::parse_config(&opt.config_path).context("parsing configuration")?;
debug!("Configuration parsed.");
let num_threads = config.num_threads;
debug!("Running executor with {} threads.", num_threads);
SendExecutor::new(num_threads)?
.run(async_main(opt, config, log_server))
.context("async main")?;
debug!("Exiting.");
Ok(())
}
fn init_diagnostics(opt: &Args) -> Result<Option<zx::Socket>, Error> {
let mut log_server = None;
if opt.consume_own_logs {
assert!(!opt.log_to_debuglog, "cannot specify both consume-own-logs and log-to-debuglog");
let (log_client, server) = zx::Socket::create(zx::SocketOpts::DATAGRAM)?;
log_server = Some(server);
fuchsia_syslog::init_with_socket_and_name(log_client, "archivist")?;
} else if opt.log_to_debuglog {
assert!(!opt.consume_own_logs, "cannot specify both consume-own-logs and log-to-debuglog");
LocalExecutor::new()?.run_singlethreaded(stdout_to_debuglog::init()).unwrap();
log::set_logger(&STDOUT_LOGGER).unwrap();
log::set_max_level(log::LevelFilter::Info);
} else {
fuchsia_syslog::init_with_tags(&["embedded"])?;
}
if opt.consume_own_logs || opt.log_to_debuglog {
info!("Logging started.");
// Always emit the log redaction canary during startup.
logs::redact::emit_canary();
}
diagnostics::init();
Ok(log_server)
}
async fn async_main(
opt: Args,
archivist_configuration: configs::Config,
log_server: Option<zx::Socket>,
) -> Result<(), Error> {
let mut archivist = archivist::ArchivistBuilder::new(archivist_configuration)?;
debug!("Archivist initialized from configuration.");
archivist
.install_log_services(archivist::LogOpts { ingest_v2_logs: !opt.disable_event_source })
.await;
if !opt.disable_component_event_provider {
let legacy_event_provider = connect_to_protocol::<ComponentEventProviderMarker>()
.context("failed to connect to event provider")?;
archivist.add_event_source("v1", Box::new(legacy_event_provider)).await;
}
if !opt.disable_event_source {
let event_source = connect_to_protocol::<EventSourceMarker>()
.context("failed to connect to event source")?;
archivist.add_event_source("v2", Box::new(event_source)).await;
}
if let Some(socket) = log_server {
archivist.consume_own_logs(socket);
}
assert!(
!(opt.install_controller && opt.listen_to_lifecycle),
"only one shutdown mechanism can be specified."
);
if opt.install_controller {
archivist.install_controller_service();
}
if opt.listen_to_lifecycle {
archivist.install_lifecycle_listener();
}
if !opt.disable_log_connector {
let connector = connect_to_protocol::<LogConnectorMarker>()?;
archivist
.add_event_source("log_connector", Box::new(LogConnectorEventSource::new(connector)))
.await;
}
if !opt.disable_klog {
let debuglog = logs::KernelDebugLog::new().await.context("Failed to read kernel logs")?;
fasync::Task::spawn(archivist.data_repo().clone().drain_debuglog(debuglog)).detach();
}
let mut services = vec![];
if let Some(service_config_path) = &opt.service_config_path {
match configs::parse_service_config(service_config_path) {
Err(e) => {
error!("Couldn't parse service config: {}", e);
}
Ok(config) => {
for name in config.service_list.iter() {
info!("Connecting to service {}", name);
let (local, remote) = zx::Channel::create().expect("cannot create channels");
match service_connect(&format!("/svc/{}", name), remote) {
Ok(_) => {
services.push(local);
}
Err(e) => {
error!("Couldn't connect to service {}: {:?}", name, e);
}
}
}
}
}
}
let startup_handle =
fuchsia_runtime::take_startup_handle(fuchsia_runtime::HandleType::DirectoryRequest.into())
.ok_or(MissingStartupHandle)?;
archivist.run(zx::Channel::from(startup_handle)).await?;
Ok(())
}
static STDOUT_LOGGER: StdoutLogger = StdoutLogger;
struct StdoutLogger;
impl log::Log for StdoutLogger {
fn enabled(&self, metadata: &log::Metadata<'_>) -> bool {
metadata.level() <= log::Level::Info
}
fn log(&self, record: &log::Record<'_>) {
if self.enabled(record.metadata()) {
println!("[archivist] {}: {}", record.level(), record.args());
}
}
fn flush(&self) {}
}
| 34.97235 | 100 | 0.646857 |
ef7783a89ea061e9cd2f3c3b66bdf0515dd14987 | 64,590 | use crate::Element;
use crate::{UncertainFloat, Isotope, AtomicScatteringFactor, XrayScatteringFactor, NeutronScatteringFactor};
pub fn load() -> Element {
Element {
atomic_number: 27,
name: "Cobalt",
symbol: "Co",
mass: 58.933_2_f64,
common_ions: vec![2, 3],
uncommon_ions: vec![-3, -1, 1, 4, 5],
xray_scattering: Some(XrayScatteringFactor {
table: vec![
AtomicScatteringFactor { energy: 0.01_f64, f1: None, f2: Some(1.420_71_f64) },
AtomicScatteringFactor { energy: 0.010_161_7_f64, f1: None, f2: Some(1.459_25_f64) },
AtomicScatteringFactor { energy: 0.010_326_1_f64, f1: None, f2: Some(1.498_84_f64) },
AtomicScatteringFactor { energy: 0.010_493_1_f64, f1: None, f2: Some(1.539_49_f64) },
AtomicScatteringFactor { energy: 0.010_662_8_f64, f1: None, f2: Some(1.581_25_f64) },
AtomicScatteringFactor { energy: 0.010_835_3_f64, f1: None, f2: Some(1.624_15_f64) },
AtomicScatteringFactor { energy: 0.011_010_6_f64, f1: None, f2: Some(1.668_2_f64) },
AtomicScatteringFactor { energy: 0.011_188_6_f64, f1: None, f2: Some(1.713_45_f64) },
AtomicScatteringFactor { energy: 0.011_369_6_f64, f1: None, f2: Some(1.759_93_f64) },
AtomicScatteringFactor { energy: 0.011_553_5_f64, f1: None, f2: Some(1.807_67_f64) },
AtomicScatteringFactor { energy: 0.011_740_4_f64, f1: None, f2: Some(1.856_71_f64) },
AtomicScatteringFactor { energy: 0.011_930_3_f64, f1: None, f2: Some(1.907_07_f64) },
AtomicScatteringFactor { energy: 0.012_123_2_f64, f1: None, f2: Some(1.946_82_f64) },
AtomicScatteringFactor { energy: 0.012_319_3_f64, f1: None, f2: Some(1.986_f64) },
AtomicScatteringFactor { energy: 0.012_518_6_f64, f1: None, f2: Some(2.025_96_f64) },
AtomicScatteringFactor { energy: 0.012_721_f64, f1: None, f2: Some(2.066_74_f64) },
AtomicScatteringFactor { energy: 0.012_926_8_f64, f1: None, f2: Some(2.108_33_f64) },
AtomicScatteringFactor { energy: 0.013_135_9_f64, f1: None, f2: Some(2.150_76_f64) },
AtomicScatteringFactor { energy: 0.013_348_3_f64, f1: None, f2: Some(2.194_04_f64) },
AtomicScatteringFactor { energy: 0.013_564_2_f64, f1: None, f2: Some(2.238_19_f64) },
AtomicScatteringFactor { energy: 0.013_783_6_f64, f1: None, f2: Some(2.283_23_f64) },
AtomicScatteringFactor { energy: 0.014_006_6_f64, f1: None, f2: Some(2.329_18_f64) },
AtomicScatteringFactor { energy: 0.014_233_1_f64, f1: None, f2: Some(2.376_05_f64) },
AtomicScatteringFactor { energy: 0.014_463_3_f64, f1: None, f2: Some(2.423_71_f64) },
AtomicScatteringFactor { energy: 0.014_697_3_f64, f1: None, f2: Some(2.471_99_f64) },
AtomicScatteringFactor { energy: 0.014_935_f64, f1: None, f2: Some(2.521_23_f64) },
AtomicScatteringFactor { energy: 0.015_176_5_f64, f1: None, f2: Some(2.571_46_f64) },
AtomicScatteringFactor { energy: 0.015_422_f64, f1: None, f2: Some(2.622_68_f64) },
AtomicScatteringFactor { energy: 0.015_671_4_f64, f1: None, f2: Some(2.674_92_f64) },
AtomicScatteringFactor { energy: 0.015_924_9_f64, f1: None, f2: Some(2.728_21_f64) },
AtomicScatteringFactor { energy: 0.016_182_5_f64, f1: None, f2: Some(2.782_55_f64) },
AtomicScatteringFactor { energy: 0.016_444_2_f64, f1: None, f2: Some(2.837_98_f64) },
AtomicScatteringFactor { energy: 0.016_710_2_f64, f1: None, f2: Some(2.894_52_f64) },
AtomicScatteringFactor { energy: 0.016_980_5_f64, f1: None, f2: Some(2.952_17_f64) },
AtomicScatteringFactor { energy: 0.017_255_1_f64, f1: None, f2: Some(3.002_03_f64) },
AtomicScatteringFactor { energy: 0.017_534_2_f64, f1: None, f2: Some(3.044_55_f64) },
AtomicScatteringFactor { energy: 0.017_817_8_f64, f1: None, f2: Some(3.087_68_f64) },
AtomicScatteringFactor { energy: 0.018_106_f64, f1: None, f2: Some(3.131_42_f64) },
AtomicScatteringFactor { energy: 0.018_398_9_f64, f1: None, f2: Some(3.175_78_f64) },
AtomicScatteringFactor { energy: 0.018_696_4_f64, f1: None, f2: Some(3.220_76_f64) },
AtomicScatteringFactor { energy: 0.018_998_8_f64, f1: None, f2: Some(3.266_39_f64) },
AtomicScatteringFactor { energy: 0.019_306_1_f64, f1: None, f2: Some(3.312_66_f64) },
AtomicScatteringFactor { energy: 0.019_618_4_f64, f1: None, f2: Some(3.359_58_f64) },
AtomicScatteringFactor { energy: 0.019_935_7_f64, f1: None, f2: Some(3.407_f64) },
AtomicScatteringFactor { energy: 0.020_258_2_f64, f1: None, f2: Some(3.451_66_f64) },
AtomicScatteringFactor { energy: 0.020_585_8_f64, f1: None, f2: Some(3.496_9_f64) },
AtomicScatteringFactor { energy: 0.020_918_8_f64, f1: None, f2: Some(3.542_74_f64) },
AtomicScatteringFactor { energy: 0.021_257_1_f64, f1: None, f2: Some(3.589_17_f64) },
AtomicScatteringFactor { energy: 0.021_600_9_f64, f1: None, f2: Some(3.636_22_f64) },
AtomicScatteringFactor { energy: 0.021_950_3_f64, f1: None, f2: Some(3.683_88_f64) },
AtomicScatteringFactor { energy: 0.022_305_3_f64, f1: None, f2: Some(3.732_17_f64) },
AtomicScatteringFactor { energy: 0.022_666_1_f64, f1: None, f2: Some(3.781_09_f64) },
AtomicScatteringFactor { energy: 0.023_032_7_f64, f1: None, f2: Some(3.830_65_f64) },
AtomicScatteringFactor { energy: 0.023_405_3_f64, f1: None, f2: Some(3.880_86_f64) },
AtomicScatteringFactor { energy: 0.023_783_8_f64, f1: None, f2: Some(3.931_73_f64) },
AtomicScatteringFactor { energy: 0.024_168_5_f64, f1: None, f2: Some(3.983_27_f64) },
AtomicScatteringFactor { energy: 0.024_559_4_f64, f1: None, f2: Some(4.027_21_f64) },
AtomicScatteringFactor { energy: 0.024_956_6_f64, f1: None, f2: Some(4.064_35_f64) },
AtomicScatteringFactor { energy: 0.025_360_3_f64, f1: None, f2: Some(4.101_83_f64) },
AtomicScatteringFactor { energy: 0.025_770_5_f64, f1: None, f2: Some(4.139_66_f64) },
AtomicScatteringFactor { energy: 0.026_187_3_f64, f1: None, f2: Some(4.177_84_f64) },
AtomicScatteringFactor { energy: 0.026_610_9_f64, f1: None, f2: Some(4.216_37_f64) },
AtomicScatteringFactor { energy: 0.027_041_3_f64, f1: None, f2: Some(4.255_26_f64) },
AtomicScatteringFactor { energy: 0.027_478_6_f64, f1: None, f2: Some(4.294_5_f64) },
AtomicScatteringFactor { energy: 0.027_923_1_f64, f1: None, f2: Some(4.334_11_f64) },
AtomicScatteringFactor { energy: 0.028_374_7_f64, f1: None, f2: Some(4.363_66_f64) },
AtomicScatteringFactor { energy: 0.028_833_7_f64, f1: None, f2: Some(4.358_57_f64) },
AtomicScatteringFactor { energy: 0.029_3_f64, f1: Some(3.982_51_f64), f2: Some(4.353_48_f64) },
AtomicScatteringFactor { energy: 0.029_773_9_f64, f1: Some(4.050_81_f64), f2: Some(4.348_4_f64) },
AtomicScatteringFactor { energy: 0.030_255_5_f64, f1: Some(4.114_38_f64), f2: Some(4.343_33_f64) },
AtomicScatteringFactor { energy: 0.030_744_9_f64, f1: Some(4.174_42_f64), f2: Some(4.338_26_f64) },
AtomicScatteringFactor { energy: 0.031_242_1_f64, f1: Some(4.231_78_f64), f2: Some(4.333_2_f64) },
AtomicScatteringFactor { energy: 0.031_747_5_f64, f1: Some(4.287_23_f64), f2: Some(4.328_14_f64) },
AtomicScatteringFactor { energy: 0.032_260_9_f64, f1: Some(4.341_62_f64), f2: Some(4.323_08_f64) },
AtomicScatteringFactor { energy: 0.032_782_7_f64, f1: Some(4.398_19_f64), f2: Some(4.318_04_f64) },
AtomicScatteringFactor { energy: 0.033_313_f64, f1: Some(4.461_59_f64), f2: Some(4.302_f64) },
AtomicScatteringFactor { energy: 0.033_851_8_f64, f1: Some(4.510_57_f64), f2: Some(4.277_1_f64) },
AtomicScatteringFactor { energy: 0.034_399_3_f64, f1: Some(4.551_07_f64), f2: Some(4.252_34_f64) },
AtomicScatteringFactor { energy: 0.034_955_7_f64, f1: Some(4.585_79_f64), f2: Some(4.227_73_f64) },
AtomicScatteringFactor { energy: 0.035_521_1_f64, f1: Some(4.615_16_f64), f2: Some(4.203_26_f64) },
AtomicScatteringFactor { energy: 0.036_095_6_f64, f1: Some(4.639_25_f64), f2: Some(4.178_93_f64) },
AtomicScatteringFactor { energy: 0.036_679_4_f64, f1: Some(4.657_65_f64), f2: Some(4.154_73_f64) },
AtomicScatteringFactor { energy: 0.037_272_7_f64, f1: Some(4.668_4_f64), f2: Some(4.130_69_f64) },
AtomicScatteringFactor { energy: 0.037_875_5_f64, f1: Some(4.674_46_f64), f2: Some(4.115_92_f64) },
AtomicScatteringFactor { energy: 0.038_488_2_f64, f1: Some(4.680_6_f64), f2: Some(4.101_81_f64) },
AtomicScatteringFactor { energy: 0.039_110_7_f64, f1: Some(4.684_68_f64), f2: Some(4.087_75_f64) },
AtomicScatteringFactor { energy: 0.039_743_2_f64, f1: Some(4.685_56_f64), f2: Some(4.073_74_f64) },
AtomicScatteringFactor { energy: 0.040_386_1_f64, f1: Some(4.682_76_f64), f2: Some(4.059_77_f64) },
AtomicScatteringFactor { energy: 0.041_039_3_f64, f1: Some(4.675_85_f64), f2: Some(4.045_86_f64) },
AtomicScatteringFactor { energy: 0.041_703_1_f64, f1: Some(4.664_34_f64), f2: Some(4.031_99_f64) },
AtomicScatteringFactor { energy: 0.042_377_6_f64, f1: Some(4.647_8_f64), f2: Some(4.018_53_f64) },
AtomicScatteringFactor { energy: 0.043_063_f64, f1: Some(4.626_11_f64), f2: Some(4.005_28_f64) },
AtomicScatteringFactor { energy: 0.043_759_5_f64, f1: Some(4.598_55_f64), f2: Some(3.992_06_f64) },
AtomicScatteringFactor { energy: 0.044_467_3_f64, f1: Some(4.564_4_f64), f2: Some(3.978_89_f64) },
AtomicScatteringFactor { energy: 0.045_186_5_f64, f1: Some(4.522_8_f64), f2: Some(3.965_76_f64) },
AtomicScatteringFactor { energy: 0.045_917_4_f64, f1: Some(4.472_71_f64), f2: Some(3.952_68_f64) },
AtomicScatteringFactor { energy: 0.046_66_f64, f1: Some(4.412_79_f64), f2: Some(3.939_68_f64) },
AtomicScatteringFactor { energy: 0.047_414_7_f64, f1: Some(4.341_77_f64), f2: Some(3.927_25_f64) },
AtomicScatteringFactor { energy: 0.048_181_6_f64, f1: Some(4.257_82_f64), f2: Some(3.914_87_f64) },
AtomicScatteringFactor { energy: 0.048_960_9_f64, f1: Some(4.158_2_f64), f2: Some(3.902_52_f64) },
AtomicScatteringFactor { energy: 0.049_752_8_f64, f1: Some(4.038_7_f64), f2: Some(3.890_21_f64) },
AtomicScatteringFactor { energy: 0.050_557_6_f64, f1: Some(3.892_04_f64), f2: Some(3.877_94_f64) },
AtomicScatteringFactor { energy: 0.051_375_3_f64, f1: Some(3.713_61_f64), f2: Some(3.879_3_f64) },
AtomicScatteringFactor { energy: 0.052_206_2_f64, f1: Some(3.504_55_f64), f2: Some(3.885_47_f64) },
AtomicScatteringFactor { energy: 0.053_050_6_f64, f1: Some(3.231_45_f64), f2: Some(3.898_69_f64) },
AtomicScatteringFactor { energy: 0.053_908_7_f64, f1: Some(2.924_56_f64), f2: Some(3.990_62_f64) },
AtomicScatteringFactor { energy: 0.054_780_6_f64, f1: Some(2.578_71_f64), f2: Some(4.084_73_f64) },
AtomicScatteringFactor { energy: 0.055_666_7_f64, f1: Some(2.120_07_f64), f2: Some(4.216_66_f64) },
AtomicScatteringFactor { energy: 0.056_567_f64, f1: Some(1.507_81_f64), f2: Some(4.403_44_f64) },
AtomicScatteringFactor { energy: 0.057_482_f64, f1: Some(0.481_044_f64), f2: Some(5.046_5_f64) },
AtomicScatteringFactor { energy: 0.058_411_7_f64, f1: Some(-0.135_5_f64), f2: Some(6.268_67_f64) },
AtomicScatteringFactor { energy: 0.059_356_4_f64, f1: Some(-0.348_01_f64), f2: Some(7.786_83_f64) },
AtomicScatteringFactor { energy: 0.060_316_5_f64, f1: Some(0.174_934_f64), f2: Some(9.672_73_f64) },
AtomicScatteringFactor { energy: 0.061_292_1_f64, f1: Some(1.785_64_f64), f2: Some(10.653_7_f64) },
AtomicScatteringFactor { energy: 0.062_283_4_f64, f1: Some(3.050_37_f64), f2: Some(11.049_1_f64) },
AtomicScatteringFactor { energy: 0.063_290_8_f64, f1: Some(4.178_87_f64), f2: Some(10.944_f64) },
AtomicScatteringFactor { energy: 0.064_314_5_f64, f1: Some(4.941_2_f64), f2: Some(10.695_f64) },
AtomicScatteringFactor { energy: 0.065_354_7_f64, f1: Some(5.472_62_f64), f2: Some(10.472_2_f64) },
AtomicScatteringFactor { energy: 0.066_411_8_f64, f1: Some(5.920_47_f64), f2: Some(10.275_7_f64) },
AtomicScatteringFactor { energy: 0.067_485_9_f64, f1: Some(6.301_38_f64), f2: Some(10.082_8_f64) },
AtomicScatteringFactor { energy: 0.068_577_5_f64, f1: Some(6.623_07_f64), f2: Some(9.893_49_f64) },
AtomicScatteringFactor { energy: 0.069_686_7_f64, f1: Some(6.891_14_f64), f2: Some(9.707_77_f64) },
AtomicScatteringFactor { energy: 0.070_813_8_f64, f1: Some(7.097_57_f64), f2: Some(9.542_02_f64) },
AtomicScatteringFactor { energy: 0.071_959_1_f64, f1: Some(7.291_07_f64), f2: Some(9.409_58_f64) },
AtomicScatteringFactor { energy: 0.073_123_f64, f1: Some(7.466_32_f64), f2: Some(9.278_99_f64) },
AtomicScatteringFactor { energy: 0.074_305_7_f64, f1: Some(7.617_56_f64), f2: Some(9.150_2_f64) },
AtomicScatteringFactor { energy: 0.075_507_6_f64, f1: Some(7.735_33_f64), f2: Some(9.023_21_f64) },
AtomicScatteringFactor { energy: 0.076_728_9_f64, f1: Some(7.787_07_f64), f2: Some(8.899_86_f64) },
AtomicScatteringFactor { energy: 0.077_969_9_f64, f1: Some(7.841_23_f64), f2: Some(8.898_39_f64) },
AtomicScatteringFactor { energy: 0.079_231_f64, f1: Some(7.934_3_f64), f2: Some(8.896_92_f64) },
AtomicScatteringFactor { energy: 0.080_512_5_f64, f1: Some(8.040_89_f64), f2: Some(8.895_45_f64) },
AtomicScatteringFactor { energy: 0.081_814_7_f64, f1: Some(8.153_64_f64), f2: Some(8.893_97_f64) },
AtomicScatteringFactor { energy: 0.083_138_f64, f1: Some(8.269_45_f64), f2: Some(8.892_51_f64) },
AtomicScatteringFactor { energy: 0.084_482_7_f64, f1: Some(8.386_56_f64), f2: Some(8.891_03_f64) },
AtomicScatteringFactor { energy: 0.085_849_1_f64, f1: Some(8.503_8_f64), f2: Some(8.889_57_f64) },
AtomicScatteringFactor { energy: 0.087_237_7_f64, f1: Some(8.620_13_f64), f2: Some(8.888_1_f64) },
AtomicScatteringFactor { energy: 0.088_648_7_f64, f1: Some(8.733_91_f64), f2: Some(8.886_63_f64) },
AtomicScatteringFactor { energy: 0.090_082_5_f64, f1: Some(8.840_08_f64), f2: Some(8.885_16_f64) },
AtomicScatteringFactor { energy: 0.091_539_5_f64, f1: Some(8.943_02_f64), f2: Some(8.901_85_f64) },
AtomicScatteringFactor { energy: 0.093_020_1_f64, f1: Some(9.057_32_f64), f2: Some(8.924_56_f64) },
AtomicScatteringFactor { energy: 0.094_524_6_f64, f1: Some(9.178_02_f64), f2: Some(8.947_31_f64) },
AtomicScatteringFactor { energy: 0.096_053_5_f64, f1: Some(9.303_36_f64), f2: Some(8.970_14_f64) },
AtomicScatteringFactor { energy: 0.097_607_1_f64, f1: Some(9.433_02_f64), f2: Some(8.993_01_f64) },
AtomicScatteringFactor { energy: 0.099_185_8_f64, f1: Some(9.567_27_f64), f2: Some(9.015_95_f64) },
AtomicScatteringFactor { energy: 0.100_79_f64, f1: Some(9.707_15_f64), f2: Some(9.038_94_f64) },
AtomicScatteringFactor { energy: 0.102_42_f64, f1: Some(9.855_58_f64), f2: Some(9.061_99_f64) },
AtomicScatteringFactor { energy: 0.104_077_f64, f1: Some(10.024_4_f64), f2: Some(9.076_17_f64) },
AtomicScatteringFactor { energy: 0.105_76_f64, f1: Some(10.182_1_f64), f2: Some(9.059_84_f64) },
AtomicScatteringFactor { energy: 0.107_471_f64, f1: Some(10.328_9_f64), f2: Some(9.043_53_f64) },
AtomicScatteringFactor { energy: 0.109_209_f64, f1: Some(10.469_6_f64), f2: Some(9.027_26_f64) },
AtomicScatteringFactor { energy: 0.110_975_f64, f1: Some(10.607_f64), f2: Some(9.011_02_f64) },
AtomicScatteringFactor { energy: 0.112_77_f64, f1: Some(10.741_9_f64), f2: Some(8.994_79_f64) },
AtomicScatteringFactor { energy: 0.114_594_f64, f1: Some(10.875_1_f64), f2: Some(8.978_6_f64) },
AtomicScatteringFactor { energy: 0.116_448_f64, f1: Some(11.006_8_f64), f2: Some(8.962_45_f64) },
AtomicScatteringFactor { energy: 0.118_331_f64, f1: Some(11.137_5_f64), f2: Some(8.946_33_f64) },
AtomicScatteringFactor { energy: 0.120_245_f64, f1: Some(11.267_4_f64), f2: Some(8.930_22_f64) },
AtomicScatteringFactor { energy: 0.122_19_f64, f1: Some(11.396_7_f64), f2: Some(8.914_15_f64) },
AtomicScatteringFactor { energy: 0.124_166_f64, f1: Some(11.525_6_f64), f2: Some(8.898_11_f64) },
AtomicScatteringFactor { energy: 0.126_175_f64, f1: Some(11.653_3_f64), f2: Some(8.882_1_f64) },
AtomicScatteringFactor { energy: 0.128_215_f64, f1: Some(11.768_9_f64), f2: Some(8.876_6_f64) },
AtomicScatteringFactor { energy: 0.130_289_f64, f1: Some(11.924_3_f64), f2: Some(8.887_91_f64) },
AtomicScatteringFactor { energy: 0.132_397_f64, f1: Some(12.079_1_f64), f2: Some(8.851_18_f64) },
AtomicScatteringFactor { energy: 0.134_538_f64, f1: Some(12.220_9_f64), f2: Some(8.814_53_f64) },
AtomicScatteringFactor { energy: 0.136_714_f64, f1: Some(12.355_3_f64), f2: Some(8.778_04_f64) },
AtomicScatteringFactor { energy: 0.138_925_f64, f1: Some(12.486_2_f64), f2: Some(8.741_72_f64) },
AtomicScatteringFactor { energy: 0.141_172_f64, f1: Some(12.614_7_f64), f2: Some(8.705_59_f64) },
AtomicScatteringFactor { energy: 0.143_456_f64, f1: Some(12.741_6_f64), f2: Some(8.669_63_f64) },
AtomicScatteringFactor { energy: 0.145_776_f64, f1: Some(12.867_4_f64), f2: Some(8.633_79_f64) },
AtomicScatteringFactor { energy: 0.148_134_f64, f1: Some(12.992_6_f64), f2: Some(8.598_04_f64) },
AtomicScatteringFactor { energy: 0.150_53_f64, f1: Some(13.117_5_f64), f2: Some(8.562_4_f64) },
AtomicScatteringFactor { energy: 0.152_964_f64, f1: Some(13.243_f64), f2: Some(8.527_1_f64) },
AtomicScatteringFactor { energy: 0.155_439_f64, f1: Some(13.370_5_f64), f2: Some(8.491_88_f64) },
AtomicScatteringFactor { energy: 0.157_953_f64, f1: Some(13.503_6_f64), f2: Some(8.456_77_f64) },
AtomicScatteringFactor { energy: 0.160_507_f64, f1: Some(13.637_7_f64), f2: Some(8.409_11_f64) },
AtomicScatteringFactor { energy: 0.163_103_f64, f1: Some(13.772_3_f64), f2: Some(8.356_f64) },
AtomicScatteringFactor { energy: 0.165_742_f64, f1: Some(13.897_3_f64), f2: Some(8.295_84_f64) },
AtomicScatteringFactor { energy: 0.168_422_f64, f1: Some(14.016_7_f64), f2: Some(8.236_02_f64) },
AtomicScatteringFactor { energy: 0.171_146_f64, f1: Some(14.132_3_f64), f2: Some(8.176_63_f64) },
AtomicScatteringFactor { energy: 0.173_915_f64, f1: Some(14.245_3_f64), f2: Some(8.117_67_f64) },
AtomicScatteringFactor { energy: 0.176_727_f64, f1: Some(14.356_2_f64), f2: Some(8.059_21_f64) },
AtomicScatteringFactor { energy: 0.179_586_f64, f1: Some(14.465_7_f64), f2: Some(8.001_18_f64) },
AtomicScatteringFactor { energy: 0.182_491_f64, f1: Some(14.574_5_f64), f2: Some(7.943_57_f64) },
AtomicScatteringFactor { energy: 0.185_442_f64, f1: Some(14.683_5_f64), f2: Some(7.886_32_f64) },
AtomicScatteringFactor { energy: 0.188_442_f64, f1: Some(14.794_7_f64), f2: Some(7.827_54_f64) },
AtomicScatteringFactor { energy: 0.191_489_f64, f1: Some(14.908_6_f64), f2: Some(7.765_16_f64) },
AtomicScatteringFactor { energy: 0.194_587_f64, f1: Some(15.016_2_f64), f2: Some(7.693_06_f64) },
AtomicScatteringFactor { energy: 0.197_734_f64, f1: Some(15.118_3_f64), f2: Some(7.621_72_f64) },
AtomicScatteringFactor { energy: 0.200_932_f64, f1: Some(15.217_f64), f2: Some(7.551_1_f64) },
AtomicScatteringFactor { energy: 0.204_182_f64, f1: Some(15.313_6_f64), f2: Some(7.481_f64) },
AtomicScatteringFactor { energy: 0.207_485_f64, f1: Some(15.409_8_f64), f2: Some(7.410_66_f64) },
AtomicScatteringFactor { energy: 0.210_84_f64, f1: Some(15.506_f64), f2: Some(7.339_87_f64) },
AtomicScatteringFactor { energy: 0.214_251_f64, f1: Some(15.598_8_f64), f2: Some(7.261_83_f64) },
AtomicScatteringFactor { energy: 0.217_716_f64, f1: Some(15.686_4_f64), f2: Some(7.184_23_f64) },
AtomicScatteringFactor { energy: 0.221_237_f64, f1: Some(15.77_f64), f2: Some(7.107_47_f64) },
AtomicScatteringFactor { energy: 0.224_816_f64, f1: Some(15.851_f64), f2: Some(7.031_72_f64) },
AtomicScatteringFactor { energy: 0.228_452_f64, f1: Some(15.93_f64), f2: Some(6.956_7_f64) },
AtomicScatteringFactor { energy: 0.232_147_f64, f1: Some(16.007_3_f64), f2: Some(6.882_36_f64) },
AtomicScatteringFactor { energy: 0.235_902_f64, f1: Some(16.084_f64), f2: Some(6.808_89_f64) },
AtomicScatteringFactor { energy: 0.239_717_f64, f1: Some(16.161_4_f64), f2: Some(6.733_46_f64) },
AtomicScatteringFactor { energy: 0.243_595_f64, f1: Some(16.235_4_f64), f2: Some(6.655_5_f64) },
AtomicScatteringFactor { energy: 0.247_535_f64, f1: Some(16.305_9_f64), f2: Some(6.575_87_f64) },
AtomicScatteringFactor { energy: 0.251_538_f64, f1: Some(16.372_5_f64), f2: Some(6.496_9_f64) },
AtomicScatteringFactor { energy: 0.255_607_f64, f1: Some(16.436_4_f64), f2: Some(6.418_91_f64) },
AtomicScatteringFactor { energy: 0.259_741_f64, f1: Some(16.498_2_f64), f2: Some(6.341_75_f64) },
AtomicScatteringFactor { energy: 0.263_942_f64, f1: Some(16.558_4_f64), f2: Some(6.265_51_f64) },
AtomicScatteringFactor { energy: 0.268_211_f64, f1: Some(16.619_1_f64), f2: Some(6.190_16_f64) },
AtomicScatteringFactor { energy: 0.272_549_f64, f1: Some(16.677_9_f64), f2: Some(6.110_7_f64) },
AtomicScatteringFactor { energy: 0.276_957_f64, f1: Some(16.732_7_f64), f2: Some(6.030_7_f64) },
AtomicScatteringFactor { energy: 0.281_437_f64, f1: Some(16.783_9_f64), f2: Some(5.951_19_f64) },
AtomicScatteringFactor { energy: 0.285_989_f64, f1: Some(16.832_3_f64), f2: Some(5.872_52_f64) },
AtomicScatteringFactor { energy: 0.290_615_f64, f1: Some(16.878_2_f64), f2: Some(5.794_87_f64) },
AtomicScatteringFactor { energy: 0.295_315_f64, f1: Some(16.922_7_f64), f2: Some(5.718_31_f64) },
AtomicScatteringFactor { energy: 0.300_092_f64, f1: Some(16.965_5_f64), f2: Some(5.641_28_f64) },
AtomicScatteringFactor { energy: 0.304_945_f64, f1: Some(17.006_8_f64), f2: Some(5.563_94_f64) },
AtomicScatteringFactor { energy: 0.309_878_f64, f1: Some(17.044_6_f64), f2: Some(5.486_05_f64) },
AtomicScatteringFactor { energy: 0.314_89_f64, f1: Some(17.079_3_f64), f2: Some(5.409_04_f64) },
AtomicScatteringFactor { energy: 0.319_983_f64, f1: Some(17.111_5_f64), f2: Some(5.333_16_f64) },
AtomicScatteringFactor { energy: 0.325_158_f64, f1: Some(17.142_5_f64), f2: Some(5.258_52_f64) },
AtomicScatteringFactor { energy: 0.330_418_f64, f1: Some(17.171_5_f64), f2: Some(5.182_98_f64) },
AtomicScatteringFactor { energy: 0.335_762_f64, f1: Some(17.198_2_f64), f2: Some(5.107_62_f64) },
AtomicScatteringFactor { energy: 0.341_192_f64, f1: Some(17.221_8_f64), f2: Some(5.032_29_f64) },
AtomicScatteringFactor { energy: 0.346_711_f64, f1: Some(17.242_4_f64), f2: Some(4.957_93_f64) },
AtomicScatteringFactor { energy: 0.352_319_f64, f1: Some(17.260_6_f64), f2: Some(4.884_64_f64) },
AtomicScatteringFactor { energy: 0.358_017_f64, f1: Some(17.277_4_f64), f2: Some(4.812_43_f64) },
AtomicScatteringFactor { energy: 0.363_808_f64, f1: Some(17.291_9_f64), f2: Some(4.739_13_f64) },
AtomicScatteringFactor { energy: 0.369_692_f64, f1: Some(17.303_1_f64), f2: Some(4.666_42_f64) },
AtomicScatteringFactor { energy: 0.375_672_f64, f1: Some(17.311_3_f64), f2: Some(4.594_58_f64) },
AtomicScatteringFactor { energy: 0.381_748_f64, f1: Some(17.317_f64), f2: Some(4.523_85_f64) },
AtomicScatteringFactor { energy: 0.387_922_f64, f1: Some(17.320_7_f64), f2: Some(4.453_74_f64) },
AtomicScatteringFactor { energy: 0.394_197_f64, f1: Some(17.321_5_f64), f2: Some(4.383_67_f64) },
AtomicScatteringFactor { energy: 0.400_573_f64, f1: Some(17.319_4_f64), f2: Some(4.313_98_f64) },
AtomicScatteringFactor { energy: 0.407_052_f64, f1: Some(17.314_f64), f2: Some(4.245_1_f64) },
AtomicScatteringFactor { energy: 0.413_635_f64, f1: Some(17.305_9_f64), f2: Some(4.177_33_f64) },
AtomicScatteringFactor { energy: 0.420_326_f64, f1: Some(17.294_9_f64), f2: Some(4.109_64_f64) },
AtomicScatteringFactor { energy: 0.427_124_f64, f1: Some(17.280_9_f64), f2: Some(4.042_63_f64) },
AtomicScatteringFactor { energy: 0.434_032_f64, f1: Some(17.263_1_f64), f2: Some(3.975_9_f64) },
AtomicScatteringFactor { energy: 0.441_052_f64, f1: Some(17.241_7_f64), f2: Some(3.910_23_f64) },
AtomicScatteringFactor { energy: 0.448_186_f64, f1: Some(17.217_2_f64), f2: Some(3.845_46_f64) },
AtomicScatteringFactor { energy: 0.455_435_f64, f1: Some(17.188_9_f64), f2: Some(3.780_9_f64) },
AtomicScatteringFactor { energy: 0.462_802_f64, f1: Some(17.156_5_f64), f2: Some(3.717_17_f64) },
AtomicScatteringFactor { energy: 0.470_287_f64, f1: Some(17.12_f64), f2: Some(3.654_38_f64) },
AtomicScatteringFactor { energy: 0.477_894_f64, f1: Some(17.08_f64), f2: Some(3.592_62_f64) },
AtomicScatteringFactor { energy: 0.485_623_f64, f1: Some(17.035_5_f64), f2: Some(3.530_09_f64) },
AtomicScatteringFactor { energy: 0.493_478_f64, f1: Some(16.985_2_f64), f2: Some(3.467_97_f64) },
AtomicScatteringFactor { energy: 0.501_459_f64, f1: Some(16.929_1_f64), f2: Some(3.406_95_f64) },
AtomicScatteringFactor { energy: 0.509_57_f64, f1: Some(16.867_3_f64), f2: Some(3.347_01_f64) },
AtomicScatteringFactor { energy: 0.517_812_f64, f1: Some(16.799_f64), f2: Some(3.288_28_f64) },
AtomicScatteringFactor { energy: 0.526_187_f64, f1: Some(16.724_3_f64), f2: Some(3.231_15_f64) },
AtomicScatteringFactor { energy: 0.534_698_f64, f1: Some(16.643_1_f64), f2: Some(3.176_12_f64) },
AtomicScatteringFactor { energy: 0.543_346_f64, f1: Some(16.555_8_f64), f2: Some(3.122_57_f64) },
AtomicScatteringFactor { energy: 0.552_134_f64, f1: Some(16.462_2_f64), f2: Some(3.069_98_f64) },
AtomicScatteringFactor { energy: 0.561_065_f64, f1: Some(16.360_7_f64), f2: Some(3.015_58_f64) },
AtomicScatteringFactor { energy: 0.570_139_f64, f1: Some(16.247_8_f64), f2: Some(2.961_12_f64) },
AtomicScatteringFactor { energy: 0.579_361_f64, f1: Some(16.123_6_f64), f2: Some(2.909_27_f64) },
AtomicScatteringFactor { energy: 0.588_732_f64, f1: Some(15.988_1_f64), f2: Some(2.858_82_f64) },
AtomicScatteringFactor { energy: 0.598_254_f64, f1: Some(15.840_1_f64), f2: Some(2.809_54_f64) },
AtomicScatteringFactor { energy: 0.607_93_f64, f1: Some(15.677_9_f64), f2: Some(2.761_06_f64) },
AtomicScatteringFactor { energy: 0.617_763_f64, f1: Some(15.499_4_f64), f2: Some(2.713_46_f64) },
AtomicScatteringFactor { energy: 0.627_755_f64, f1: Some(15.302_8_f64), f2: Some(2.666_56_f64) },
AtomicScatteringFactor { energy: 0.637_908_f64, f1: Some(15.084_7_f64), f2: Some(2.618_48_f64) },
AtomicScatteringFactor { energy: 0.648_226_f64, f1: Some(14.839_6_f64), f2: Some(2.568_38_f64) },
AtomicScatteringFactor { energy: 0.658_711_f64, f1: Some(14.559_6_f64), f2: Some(2.517_37_f64) },
AtomicScatteringFactor { energy: 0.669_365_f64, f1: Some(14.237_6_f64), f2: Some(2.467_41_f64) },
AtomicScatteringFactor { energy: 0.680_191_f64, f1: Some(13.865_1_f64), f2: Some(2.423_81_f64) },
AtomicScatteringFactor { energy: 0.691_193_f64, f1: Some(13.435_2_f64), f2: Some(2.385_06_f64) },
AtomicScatteringFactor { energy: 0.702_372_f64, f1: Some(12.932_6_f64), f2: Some(2.347_79_f64) },
AtomicScatteringFactor { energy: 0.713_733_f64, f1: Some(12.325_9_f64), f2: Some(2.306_98_f64) },
AtomicScatteringFactor { energy: 0.725_277_f64, f1: Some(11.564_7_f64), f2: Some(2.264_06_f64) },
AtomicScatteringFactor { energy: 0.737_008_f64, f1: Some(10.567_7_f64), f2: Some(2.221_89_f64) },
AtomicScatteringFactor { energy: 0.748_928_f64, f1: Some(9.163_23_f64), f2: Some(2.180_51_f64) },
AtomicScatteringFactor { energy: 0.761_042_f64, f1: Some(6.884_03_f64), f2: Some(2.139_89_f64) },
AtomicScatteringFactor { energy: 0.773_351_f64, f1: Some(1.200_92_f64), f2: Some(2.100_04_f64) },
AtomicScatteringFactor { energy: 0.778_f64, f1: Some(-16.669_7_f64), f2: Some(2.085_34_f64) },
AtomicScatteringFactor { energy: 0.778_2_f64, f1: Some(-16.657_4_f64), f2: Some(16.755_4_f64) },
AtomicScatteringFactor { energy: 0.785_859_f64, f1: Some(3.900_51_f64), f2: Some(16.566_5_f64) },
AtomicScatteringFactor { energy: 0.798_57_f64, f1: Some(8.677_26_f64), f2: Some(16.261_8_f64) },
AtomicScatteringFactor { energy: 0.811_486_f64, f1: Some(11.142_4_f64), f2: Some(15.962_8_f64) },
AtomicScatteringFactor { energy: 0.824_611_f64, f1: Some(12.829_f64), f2: Some(15.669_2_f64) },
AtomicScatteringFactor { energy: 0.837_949_f64, f1: Some(14.111_5_f64), f2: Some(15.380_9_f64) },
AtomicScatteringFactor { energy: 0.851_502_f64, f1: Some(15.144_7_f64), f2: Some(15.098_8_f64) },
AtomicScatteringFactor { energy: 0.865_274_f64, f1: Some(16.001_9_f64), f2: Some(14.799_1_f64) },
AtomicScatteringFactor { energy: 0.879_269_f64, f1: Some(16.697_5_f64), f2: Some(14.485_f64) },
AtomicScatteringFactor { energy: 0.893_491_f64, f1: Some(17.123_6_f64), f2: Some(14.165_f64) },
AtomicScatteringFactor { energy: 0.907_943_f64, f1: Some(17.357_f64), f2: Some(14.128_1_f64) },
AtomicScatteringFactor { energy: 0.922_628_f64, f1: Some(17.803_4_f64), f2: Some(14.397_9_f64) },
AtomicScatteringFactor { energy: 0.937_551_f64, f1: Some(18.511_8_f64), f2: Some(14.672_8_f64) },
AtomicScatteringFactor { energy: 0.952_715_f64, f1: Some(19.468_3_f64), f2: Some(14.630_3_f64) },
AtomicScatteringFactor { energy: 0.968_124_f64, f1: Some(20.257_1_f64), f2: Some(14.281_4_f64) },
AtomicScatteringFactor { energy: 0.983_783_f64, f1: Some(20.818_f64), f2: Some(13.941_2_f64) },
AtomicScatteringFactor { energy: 0.999_695_f64, f1: Some(21.302_2_f64), f2: Some(13.612_2_f64) },
AtomicScatteringFactor { energy: 1.015_86_f64, f1: Some(21.715_1_f64), f2: Some(13.294_3_f64) },
AtomicScatteringFactor { energy: 1.032_29_f64, f1: Some(22.092_f64), f2: Some(13.013_6_f64) },
AtomicScatteringFactor { energy: 1.048_99_f64, f1: Some(22.450_6_f64), f2: Some(12.734_f64) },
AtomicScatteringFactor { energy: 1.065_96_f64, f1: Some(22.780_2_f64), f2: Some(12.454_8_f64) },
AtomicScatteringFactor { energy: 1.083_2_f64, f1: Some(23.082_2_f64), f2: Some(12.181_9_f64) },
AtomicScatteringFactor { energy: 1.100_72_f64, f1: Some(23.361_7_f64), f2: Some(11.914_9_f64) },
AtomicScatteringFactor { energy: 1.118_52_f64, f1: Some(23.621_4_f64), f2: Some(11.653_9_f64) },
AtomicScatteringFactor { energy: 1.136_61_f64, f1: Some(23.863_1_f64), f2: Some(11.398_4_f64) },
AtomicScatteringFactor { energy: 1.155_f64, f1: Some(24.088_5_f64), f2: Some(11.148_6_f64) },
AtomicScatteringFactor { energy: 1.173_68_f64, f1: Some(24.299_4_f64), f2: Some(10.904_3_f64) },
AtomicScatteringFactor { energy: 1.192_66_f64, f1: Some(24.497_2_f64), f2: Some(10.665_3_f64) },
AtomicScatteringFactor { energy: 1.211_95_f64, f1: Some(24.683_f64), f2: Some(10.431_5_f64) },
AtomicScatteringFactor { energy: 1.231_55_f64, f1: Some(24.859_8_f64), f2: Some(10.202_9_f64) },
AtomicScatteringFactor { energy: 1.251_47_f64, f1: Some(25.027_f64), f2: Some(9.976_41_f64) },
AtomicScatteringFactor { energy: 1.271_72_f64, f1: Some(25.185_1_f64), f2: Some(9.751_68_f64) },
AtomicScatteringFactor { energy: 1.292_29_f64, f1: Some(25.331_3_f64), f2: Some(9.528_08_f64) },
AtomicScatteringFactor { energy: 1.313_19_f64, f1: Some(25.465_7_f64), f2: Some(9.309_75_f64) },
AtomicScatteringFactor { energy: 1.334_43_f64, f1: Some(25.591_f64), f2: Some(9.096_4_f64) },
AtomicScatteringFactor { energy: 1.356_01_f64, f1: Some(25.708_f64), f2: Some(8.887_87_f64) },
AtomicScatteringFactor { energy: 1.377_94_f64, f1: Some(25.817_4_f64), f2: Some(8.684_15_f64) },
AtomicScatteringFactor { energy: 1.400_23_f64, f1: Some(25.92_f64), f2: Some(8.485_11_f64) },
AtomicScatteringFactor { energy: 1.422_88_f64, f1: Some(26.016_4_f64), f2: Some(8.290_65_f64) },
AtomicScatteringFactor { energy: 1.445_89_f64, f1: Some(26.107_5_f64), f2: Some(8.100_59_f64) },
AtomicScatteringFactor { energy: 1.469_28_f64, f1: Some(26.193_8_f64), f2: Some(7.914_22_f64) },
AtomicScatteringFactor { energy: 1.493_04_f64, f1: Some(26.276_8_f64), f2: Some(7.731_12_f64) },
AtomicScatteringFactor { energy: 1.517_19_f64, f1: Some(26.353_9_f64), f2: Some(7.547_75_f64) },
AtomicScatteringFactor { energy: 1.541_73_f64, f1: Some(26.423_6_f64), f2: Some(7.368_08_f64) },
AtomicScatteringFactor { energy: 1.566_67_f64, f1: Some(26.487_7_f64), f2: Some(7.192_61_f64) },
AtomicScatteringFactor { energy: 1.592_01_f64, f1: Some(26.546_9_f64), f2: Some(7.021_42_f64) },
AtomicScatteringFactor { energy: 1.617_76_f64, f1: Some(26.601_9_f64), f2: Some(6.854_3_f64) },
AtomicScatteringFactor { energy: 1.643_92_f64, f1: Some(26.653_f64), f2: Some(6.691_18_f64) },
AtomicScatteringFactor { energy: 1.670_51_f64, f1: Some(26.700_9_f64), f2: Some(6.532_01_f64) },
AtomicScatteringFactor { energy: 1.697_53_f64, f1: Some(26.746_2_f64), f2: Some(6.376_59_f64) },
AtomicScatteringFactor { energy: 1.724_99_f64, f1: Some(26.790_8_f64), f2: Some(6.224_8_f64) },
AtomicScatteringFactor { energy: 1.752_89_f64, f1: Some(26.832_4_f64), f2: Some(6.071_19_f64) },
AtomicScatteringFactor { energy: 1.781_24_f64, f1: Some(26.869_4_f64), f2: Some(5.919_59_f64) },
AtomicScatteringFactor { energy: 1.810_05_f64, f1: Some(26.901_f64), f2: Some(5.770_49_f64) },
AtomicScatteringFactor { energy: 1.839_32_f64, f1: Some(26.928_6_f64), f2: Some(5.625_17_f64) },
AtomicScatteringFactor { energy: 1.869_07_f64, f1: Some(26.952_8_f64), f2: Some(5.483_47_f64) },
AtomicScatteringFactor { energy: 1.899_3_f64, f1: Some(26.974_1_f64), f2: Some(5.345_28_f64) },
AtomicScatteringFactor { energy: 1.930_02_f64, f1: Some(26.992_6_f64), f2: Some(5.210_58_f64) },
AtomicScatteringFactor { energy: 1.961_24_f64, f1: Some(27.008_9_f64), f2: Some(5.079_32_f64) },
AtomicScatteringFactor { energy: 1.992_96_f64, f1: Some(27.022_3_f64), f2: Some(4.951_48_f64) },
AtomicScatteringFactor { energy: 2.025_2_f64, f1: Some(27.036_1_f64), f2: Some(4.829_06_f64) },
AtomicScatteringFactor { energy: 2.057_95_f64, f1: Some(27.048_8_f64), f2: Some(4.704_87_f64) },
AtomicScatteringFactor { energy: 2.091_24_f64, f1: Some(27.058_5_f64), f2: Some(4.583_52_f64) },
AtomicScatteringFactor { energy: 2.125_06_f64, f1: Some(27.065_7_f64), f2: Some(4.464_56_f64) },
AtomicScatteringFactor { energy: 2.159_43_f64, f1: Some(27.070_5_f64), f2: Some(4.348_37_f64) },
AtomicScatteringFactor { energy: 2.194_36_f64, f1: Some(27.073_7_f64), f2: Some(4.235_09_f64) },
AtomicScatteringFactor { energy: 2.229_85_f64, f1: Some(27.075_1_f64), f2: Some(4.124_25_f64) },
AtomicScatteringFactor { energy: 2.265_92_f64, f1: Some(27.074_8_f64), f2: Some(4.015_92_f64) },
AtomicScatteringFactor { energy: 2.302_57_f64, f1: Some(27.072_9_f64), f2: Some(3.910_18_f64) },
AtomicScatteringFactor { energy: 2.339_81_f64, f1: Some(27.069_5_f64), f2: Some(3.806_84_f64) },
AtomicScatteringFactor { energy: 2.377_66_f64, f1: Some(27.064_7_f64), f2: Some(3.706_09_f64) },
AtomicScatteringFactor { energy: 2.416_11_f64, f1: Some(27.058_6_f64), f2: Some(3.607_55_f64) },
AtomicScatteringFactor { energy: 2.455_19_f64, f1: Some(27.051_3_f64), f2: Some(3.511_69_f64) },
AtomicScatteringFactor { energy: 2.494_9_f64, f1: Some(27.042_9_f64), f2: Some(3.417_89_f64) },
AtomicScatteringFactor { energy: 2.535_26_f64, f1: Some(27.033_3_f64), f2: Some(3.326_45_f64) },
AtomicScatteringFactor { energy: 2.576_26_f64, f1: Some(27.022_7_f64), f2: Some(3.237_28_f64) },
AtomicScatteringFactor { energy: 2.617_93_f64, f1: Some(27.011_2_f64), f2: Some(3.150_31_f64) },
AtomicScatteringFactor { energy: 2.660_27_f64, f1: Some(26.998_7_f64), f2: Some(3.065_47_f64) },
AtomicScatteringFactor { energy: 2.703_3_f64, f1: Some(26.985_4_f64), f2: Some(2.982_78_f64) },
AtomicScatteringFactor { energy: 2.747_03_f64, f1: Some(26.971_3_f64), f2: Some(2.902_13_f64) },
AtomicScatteringFactor { energy: 2.791_46_f64, f1: Some(26.956_4_f64), f2: Some(2.823_54_f64) },
AtomicScatteringFactor { energy: 2.836_61_f64, f1: Some(26.940_9_f64), f2: Some(2.746_95_f64) },
AtomicScatteringFactor { energy: 2.882_49_f64, f1: Some(26.924_7_f64), f2: Some(2.672_29_f64) },
AtomicScatteringFactor { energy: 2.929_11_f64, f1: Some(26.907_8_f64), f2: Some(2.599_56_f64) },
AtomicScatteringFactor { energy: 2.976_48_f64, f1: Some(26.890_4_f64), f2: Some(2.528_68_f64) },
AtomicScatteringFactor { energy: 3.024_63_f64, f1: Some(26.872_4_f64), f2: Some(2.459_62_f64) },
AtomicScatteringFactor { energy: 3.073_55_f64, f1: Some(26.853_9_f64), f2: Some(2.392_38_f64) },
AtomicScatteringFactor { energy: 3.123_26_f64, f1: Some(26.834_9_f64), f2: Some(2.326_86_f64) },
AtomicScatteringFactor { energy: 3.173_78_f64, f1: Some(26.815_5_f64), f2: Some(2.263_06_f64) },
AtomicScatteringFactor { energy: 3.225_11_f64, f1: Some(26.795_6_f64), f2: Some(2.200_96_f64) },
AtomicScatteringFactor { energy: 3.277_27_f64, f1: Some(26.775_3_f64), f2: Some(2.140_49_f64) },
AtomicScatteringFactor { energy: 3.330_28_f64, f1: Some(26.754_6_f64), f2: Some(2.081_6_f64) },
AtomicScatteringFactor { energy: 3.384_15_f64, f1: Some(26.733_6_f64), f2: Some(2.024_26_f64) },
AtomicScatteringFactor { energy: 3.438_88_f64, f1: Some(26.712_2_f64), f2: Some(1.968_45_f64) },
AtomicScatteringFactor { energy: 3.494_5_f64, f1: Some(26.690_4_f64), f2: Some(1.914_17_f64) },
AtomicScatteringFactor { energy: 3.551_02_f64, f1: Some(26.668_4_f64), f2: Some(1.861_31_f64) },
AtomicScatteringFactor { energy: 3.608_46_f64, f1: Some(26.646_f64), f2: Some(1.809_87_f64) },
AtomicScatteringFactor { energy: 3.666_82_f64, f1: Some(26.623_3_f64), f2: Some(1.759_83_f64) },
AtomicScatteringFactor { energy: 3.726_13_f64, f1: Some(26.600_4_f64), f2: Some(1.711_12_f64) },
AtomicScatteringFactor { energy: 3.786_4_f64, f1: Some(26.577_1_f64), f2: Some(1.663_73_f64) },
AtomicScatteringFactor { energy: 3.847_64_f64, f1: Some(26.553_5_f64), f2: Some(1.617_63_f64) },
AtomicScatteringFactor { energy: 3.909_87_f64, f1: Some(26.529_7_f64), f2: Some(1.572_78_f64) },
AtomicScatteringFactor { energy: 3.973_11_f64, f1: Some(26.505_5_f64), f2: Some(1.529_14_f64) },
AtomicScatteringFactor { energy: 4.037_38_f64, f1: Some(26.481_f64), f2: Some(1.486_72_f64) },
AtomicScatteringFactor { energy: 4.102_68_f64, f1: Some(26.456_3_f64), f2: Some(1.445_46_f64) },
AtomicScatteringFactor { energy: 4.169_03_f64, f1: Some(26.431_2_f64), f2: Some(1.405_33_f64) },
AtomicScatteringFactor { energy: 4.236_46_f64, f1: Some(26.405_8_f64), f2: Some(1.366_31_f64) },
AtomicScatteringFactor { energy: 4.304_98_f64, f1: Some(26.38_f64), f2: Some(1.328_38_f64) },
AtomicScatteringFactor { energy: 4.374_62_f64, f1: Some(26.353_9_f64), f2: Some(1.291_45_f64) },
AtomicScatteringFactor { energy: 4.445_37_f64, f1: Some(26.327_3_f64), f2: Some(1.255_6_f64) },
AtomicScatteringFactor { energy: 4.517_27_f64, f1: Some(26.300_4_f64), f2: Some(1.220_69_f64) },
AtomicScatteringFactor { energy: 4.590_33_f64, f1: Some(26.273_f64), f2: Some(1.186_82_f64) },
AtomicScatteringFactor { energy: 4.664_58_f64, f1: Some(26.245_1_f64), f2: Some(1.153_83_f64) },
AtomicScatteringFactor { energy: 4.740_03_f64, f1: Some(26.216_6_f64), f2: Some(1.121_78_f64) },
AtomicScatteringFactor { energy: 4.816_69_f64, f1: Some(26.187_6_f64), f2: Some(1.090_64_f64) },
AtomicScatteringFactor { energy: 4.894_6_f64, f1: Some(26.158_f64), f2: Some(1.060_35_f64) },
AtomicScatteringFactor { energy: 4.973_77_f64, f1: Some(26.127_6_f64), f2: Some(1.030_91_f64) },
AtomicScatteringFactor { energy: 5.054_21_f64, f1: Some(26.096_5_f64), f2: Some(1.002_35_f64) },
AtomicScatteringFactor { energy: 5.135_96_f64, f1: Some(26.064_6_f64), f2: Some(0.974_532_f64) },
AtomicScatteringFactor { energy: 5.219_03_f64, f1: Some(26.031_7_f64), f2: Some(0.947_518_f64) },
AtomicScatteringFactor { energy: 5.303_44_f64, f1: Some(25.997_8_f64), f2: Some(0.921_275_f64) },
AtomicScatteringFactor { energy: 5.389_22_f64, f1: Some(25.962_7_f64), f2: Some(0.895_781_f64) },
AtomicScatteringFactor { energy: 5.476_39_f64, f1: Some(25.926_3_f64), f2: Some(0.871_002_f64) },
AtomicScatteringFactor { energy: 5.564_97_f64, f1: Some(25.888_4_f64), f2: Some(0.846_905_f64) },
AtomicScatteringFactor { energy: 5.654_98_f64, f1: Some(25.848_9_f64), f2: Some(0.823_513_f64) },
AtomicScatteringFactor { energy: 5.746_44_f64, f1: Some(25.807_5_f64), f2: Some(0.800_73_f64) },
AtomicScatteringFactor { energy: 5.839_39_f64, f1: Some(25.763_9_f64), f2: Some(0.778_663_f64) },
AtomicScatteringFactor { energy: 5.933_83_f64, f1: Some(25.717_9_f64), f2: Some(0.757_205_f64) },
AtomicScatteringFactor { energy: 6.029_81_f64, f1: Some(25.669_1_f64), f2: Some(0.736_352_f64) },
AtomicScatteringFactor { energy: 6.127_33_f64, f1: Some(25.617_f64), f2: Some(0.716_096_f64) },
AtomicScatteringFactor { energy: 6.226_44_f64, f1: Some(25.561_f64), f2: Some(0.696_42_f64) },
AtomicScatteringFactor { energy: 6.327_15_f64, f1: Some(25.500_5_f64), f2: Some(0.677_309_f64) },
AtomicScatteringFactor { energy: 6.429_48_f64, f1: Some(25.434_5_f64), f2: Some(0.658_738_f64) },
AtomicScatteringFactor { energy: 6.533_48_f64, f1: Some(25.361_9_f64), f2: Some(0.640_705_f64) },
AtomicScatteringFactor { energy: 6.639_15_f64, f1: Some(25.281_3_f64), f2: Some(0.623_185_f64) },
AtomicScatteringFactor { energy: 6.746_54_f64, f1: Some(25.190_5_f64), f2: Some(0.606_17_f64) },
AtomicScatteringFactor { energy: 6.855_65_f64, f1: Some(25.086_8_f64), f2: Some(0.589_643_f64) },
AtomicScatteringFactor { energy: 6.966_54_f64, f1: Some(24.965_8_f64), f2: Some(0.573_591_f64) },
AtomicScatteringFactor { energy: 7.079_22_f64, f1: Some(24.821_2_f64), f2: Some(0.557_998_f64) },
AtomicScatteringFactor { energy: 7.193_72_f64, f1: Some(24.642_5_f64), f2: Some(0.542_853_f64) },
AtomicScatteringFactor { energy: 7.310_07_f64, f1: Some(24.410_3_f64), f2: Some(0.528_143_f64) },
AtomicScatteringFactor { energy: 7.428_31_f64, f1: Some(24.083_8_f64), f2: Some(0.513_866_f64) },
AtomicScatteringFactor { energy: 7.548_45_f64, f1: Some(23.548_6_f64), f2: Some(0.499_994_f64) },
AtomicScatteringFactor { energy: 7.670_54_f64, f1: Some(22.113_6_f64), f2: Some(0.486_51_f64) },
AtomicScatteringFactor { energy: 7.708_8_f64, f1: Some(15.879_5_f64), f2: Some(0.482_403_f64) },
AtomicScatteringFactor { energy: 7.709_f64, f1: Some(15.879_9_f64), f2: Some(3.793_98_f64) },
AtomicScatteringFactor { energy: 7.794_61_f64, f1: Some(23.064_9_f64), f2: Some(3.733_39_f64) },
AtomicScatteringFactor { energy: 7.920_68_f64, f1: Some(24.078_4_f64), f2: Some(3.647_08_f64) },
AtomicScatteringFactor { energy: 8.048_79_f64, f1: Some(24.621_3_f64), f2: Some(3.561_98_f64) },
AtomicScatteringFactor { energy: 8.178_98_f64, f1: Some(24.997_4_f64), f2: Some(3.478_1_f64) },
AtomicScatteringFactor { energy: 8.311_26_f64, f1: Some(25.285_7_f64), f2: Some(3.395_47_f64) },
AtomicScatteringFactor { energy: 8.445_69_f64, f1: Some(25.519_f64), f2: Some(3.314_28_f64) },
AtomicScatteringFactor { energy: 8.582_29_f64, f1: Some(25.714_3_f64), f2: Some(3.234_28_f64) },
AtomicScatteringFactor { energy: 8.721_11_f64, f1: Some(25.881_6_f64), f2: Some(3.155_69_f64) },
AtomicScatteringFactor { energy: 8.862_16_f64, f1: Some(26.027_2_f64), f2: Some(3.078_41_f64) },
AtomicScatteringFactor { energy: 9.005_5_f64, f1: Some(26.155_5_f64), f2: Some(3.002_55_f64) },
AtomicScatteringFactor { energy: 9.151_16_f64, f1: Some(26.269_6_f64), f2: Some(2.928_11_f64) },
AtomicScatteringFactor { energy: 9.299_17_f64, f1: Some(26.371_7_f64), f2: Some(2.854_96_f64) },
AtomicScatteringFactor { energy: 9.449_58_f64, f1: Some(26.463_7_f64), f2: Some(2.783_22_f64) },
AtomicScatteringFactor { energy: 9.602_42_f64, f1: Some(26.546_9_f64), f2: Some(2.712_87_f64) },
AtomicScatteringFactor { energy: 9.757_73_f64, f1: Some(26.622_3_f64), f2: Some(2.643_82_f64) },
AtomicScatteringFactor { energy: 9.915_55_f64, f1: Some(26.690_9_f64), f2: Some(2.576_31_f64) },
AtomicScatteringFactor { energy: 10.075_9_f64, f1: Some(26.753_6_f64), f2: Some(2.510_07_f64) },
AtomicScatteringFactor { energy: 10.238_9_f64, f1: Some(26.810_9_f64), f2: Some(2.445_21_f64) },
AtomicScatteringFactor { energy: 10.404_5_f64, f1: Some(26.863_3_f64), f2: Some(2.381_69_f64) },
AtomicScatteringFactor { energy: 10.572_8_f64, f1: Some(26.911_3_f64), f2: Some(2.319_52_f64) },
AtomicScatteringFactor { energy: 10.743_8_f64, f1: Some(26.955_3_f64), f2: Some(2.258_69_f64) },
AtomicScatteringFactor { energy: 10.917_6_f64, f1: Some(26.995_6_f64), f2: Some(2.199_18_f64) },
AtomicScatteringFactor { energy: 11.094_2_f64, f1: Some(27.032_6_f64), f2: Some(2.140_98_f64) },
AtomicScatteringFactor { energy: 11.273_6_f64, f1: Some(27.066_5_f64), f2: Some(2.084_07_f64) },
AtomicScatteringFactor { energy: 11.455_9_f64, f1: Some(27.097_6_f64), f2: Some(2.028_44_f64) },
AtomicScatteringFactor { energy: 11.641_2_f64, f1: Some(27.126_f64), f2: Some(1.974_08_f64) },
AtomicScatteringFactor { energy: 11.829_5_f64, f1: Some(27.152_f64), f2: Some(1.920_96_f64) },
AtomicScatteringFactor { energy: 12.020_8_f64, f1: Some(27.175_8_f64), f2: Some(1.869_07_f64) },
AtomicScatteringFactor { energy: 12.215_3_f64, f1: Some(27.197_5_f64), f2: Some(1.818_39_f64) },
AtomicScatteringFactor { energy: 12.412_8_f64, f1: Some(27.217_3_f64), f2: Some(1.768_91_f64) },
AtomicScatteringFactor { energy: 12.613_6_f64, f1: Some(27.235_3_f64), f2: Some(1.720_6_f64) },
AtomicScatteringFactor { energy: 12.817_6_f64, f1: Some(27.251_6_f64), f2: Some(1.673_45_f64) },
AtomicScatteringFactor { energy: 13.025_f64, f1: Some(27.266_4_f64), f2: Some(1.627_44_f64) },
AtomicScatteringFactor { energy: 13.235_6_f64, f1: Some(27.279_7_f64), f2: Some(1.582_55_f64) },
AtomicScatteringFactor { energy: 13.449_7_f64, f1: Some(27.291_6_f64), f2: Some(1.538_76_f64) },
AtomicScatteringFactor { energy: 13.667_2_f64, f1: Some(27.302_3_f64), f2: Some(1.496_04_f64) },
AtomicScatteringFactor { energy: 13.888_3_f64, f1: Some(27.311_8_f64), f2: Some(1.454_39_f64) },
AtomicScatteringFactor { energy: 14.112_9_f64, f1: Some(27.320_3_f64), f2: Some(1.413_78_f64) },
AtomicScatteringFactor { energy: 14.341_2_f64, f1: Some(27.327_6_f64), f2: Some(1.374_19_f64) },
AtomicScatteringFactor { energy: 14.573_1_f64, f1: Some(27.334_1_f64), f2: Some(1.335_6_f64) },
AtomicScatteringFactor { energy: 14.808_9_f64, f1: Some(27.339_6_f64), f2: Some(1.297_99_f64) },
AtomicScatteringFactor { energy: 15.048_4_f64, f1: Some(27.344_3_f64), f2: Some(1.261_34_f64) },
AtomicScatteringFactor { energy: 15.291_8_f64, f1: Some(27.348_2_f64), f2: Some(1.225_64_f64) },
AtomicScatteringFactor { energy: 15.539_1_f64, f1: Some(27.351_3_f64), f2: Some(1.190_85_f64) },
AtomicScatteringFactor { energy: 15.790_4_f64, f1: Some(27.353_8_f64), f2: Some(1.156_97_f64) },
AtomicScatteringFactor { energy: 16.045_8_f64, f1: Some(27.355_7_f64), f2: Some(1.123_98_f64) },
AtomicScatteringFactor { energy: 16.305_4_f64, f1: Some(27.356_9_f64), f2: Some(1.091_85_f64) },
AtomicScatteringFactor { energy: 16.569_1_f64, f1: Some(27.357_6_f64), f2: Some(1.060_56_f64) },
AtomicScatteringFactor { energy: 16.837_1_f64, f1: Some(27.357_8_f64), f2: Some(1.030_11_f64) },
AtomicScatteringFactor { energy: 17.109_4_f64, f1: Some(27.357_5_f64), f2: Some(1.000_46_f64) },
AtomicScatteringFactor { energy: 17.386_1_f64, f1: Some(27.356_8_f64), f2: Some(0.971_604_f64) },
AtomicScatteringFactor { energy: 17.667_4_f64, f1: Some(27.355_6_f64), f2: Some(0.943_521_f64) },
AtomicScatteringFactor { energy: 17.953_1_f64, f1: Some(27.354_1_f64), f2: Some(0.916_193_f64) },
AtomicScatteringFactor { energy: 18.243_5_f64, f1: Some(27.352_2_f64), f2: Some(0.889_602_f64) },
AtomicScatteringFactor { energy: 18.538_6_f64, f1: Some(27.35_f64), f2: Some(0.863_733_f64) },
AtomicScatteringFactor { energy: 18.838_4_f64, f1: Some(27.347_4_f64), f2: Some(0.838_566_f64) },
AtomicScatteringFactor { energy: 19.143_1_f64, f1: Some(27.344_6_f64), f2: Some(0.814_087_f64) },
AtomicScatteringFactor { energy: 19.452_7_f64, f1: Some(27.341_6_f64), f2: Some(0.790_277_f64) },
AtomicScatteringFactor { energy: 19.767_4_f64, f1: Some(27.338_2_f64), f2: Some(0.767_122_f64) },
AtomicScatteringFactor { energy: 20.087_1_f64, f1: Some(27.334_7_f64), f2: Some(0.744_605_f64) },
AtomicScatteringFactor { energy: 20.412_f64, f1: Some(27.331_f64), f2: Some(0.722_71_f64) },
AtomicScatteringFactor { energy: 20.742_1_f64, f1: Some(27.327_1_f64), f2: Some(0.701_422_f64) },
AtomicScatteringFactor { energy: 21.077_6_f64, f1: Some(27.323_f64), f2: Some(0.680_726_f64) },
AtomicScatteringFactor { energy: 21.418_5_f64, f1: Some(27.318_8_f64), f2: Some(0.660_608_f64) },
AtomicScatteringFactor { energy: 21.765_f64, f1: Some(27.314_4_f64), f2: Some(0.641_052_f64) },
AtomicScatteringFactor { energy: 22.117_f64, f1: Some(27.309_9_f64), f2: Some(0.622_045_f64) },
AtomicScatteringFactor { energy: 22.474_7_f64, f1: Some(27.305_3_f64), f2: Some(0.603_573_f64) },
AtomicScatteringFactor { energy: 22.838_2_f64, f1: Some(27.300_6_f64), f2: Some(0.585_621_f64) },
AtomicScatteringFactor { energy: 23.207_6_f64, f1: Some(27.295_8_f64), f2: Some(0.568_177_f64) },
AtomicScatteringFactor { energy: 23.583_f64, f1: Some(27.290_9_f64), f2: Some(0.551_227_f64) },
AtomicScatteringFactor { energy: 23.964_4_f64, f1: Some(27.286_f64), f2: Some(0.534_759_f64) },
AtomicScatteringFactor { energy: 24.352_f64, f1: Some(27.281_f64), f2: Some(0.518_759_f64) },
AtomicScatteringFactor { energy: 24.745_9_f64, f1: Some(27.276_f64), f2: Some(0.503_217_f64) },
AtomicScatteringFactor { energy: 25.146_2_f64, f1: Some(27.270_9_f64), f2: Some(0.488_119_f64) },
AtomicScatteringFactor { energy: 25.552_9_f64, f1: Some(27.265_8_f64), f2: Some(0.473_454_f64) },
AtomicScatteringFactor { energy: 25.966_2_f64, f1: Some(27.260_7_f64), f2: Some(0.459_21_f64) },
AtomicScatteringFactor { energy: 26.386_1_f64, f1: Some(27.255_5_f64), f2: Some(0.445_376_f64) },
AtomicScatteringFactor { energy: 26.812_9_f64, f1: Some(27.250_4_f64), f2: Some(0.431_942_f64) },
AtomicScatteringFactor { energy: 27.246_6_f64, f1: Some(27.245_2_f64), f2: Some(0.418_896_f64) },
AtomicScatteringFactor { energy: 27.687_3_f64, f1: Some(27.240_1_f64), f2: Some(0.406_228_f64) },
AtomicScatteringFactor { energy: 28.135_1_f64, f1: Some(27.234_9_f64), f2: Some(0.393_927_f64) },
AtomicScatteringFactor { energy: 28.590_2_f64, f1: Some(27.229_8_f64), f2: Some(0.381_985_f64) },
AtomicScatteringFactor { energy: 29.052_6_f64, f1: Some(27.224_8_f64), f2: Some(0.370_39_f64) },
AtomicScatteringFactor { energy: 29.522_5_f64, f1: Some(27.220_8_f64), f2: Some(0.359_134_f64) },
AtomicScatteringFactor { energy: 30.0_f64, f1: Some(27.211_3_f64), f2: Some(0.348_207_f64) },
]
}),
neutron_scattering: Some(NeutronScatteringFactor {
b_c: UncertainFloat::new(2.49_f64, 0.02_f64),
b_p: Some(UncertainFloat::new(-9.21_f64, 0.10_f64)),
b_m: Some(UncertainFloat::new(3.58_f64, 0.10_f64)),
bound_coherent_scattering_xs: Some(UncertainFloat::new(0.779_f64, 0.013_f64)),
bound_incoherent_scattering_xs: Some(UncertainFloat::new(4.8_f64, 0.3_f64)),
total_bound_scattering_xs: Some(UncertainFloat::new(5.6_f64, 0.3_f64)),
absorption_xs: Some(UncertainFloat::new(37.18_f64, 0.06_f64)),
}),
isotopes: vec![
Isotope {
mass_number: 48,
mass: UncertainFloat::new(48.001_76_f64, 0.000_43_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 49,
mass: UncertainFloat::new(48.989_72_f64, 0.000_28_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 50,
mass: UncertainFloat::new(49.981_54_f64, 0.000_18_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 51,
mass: UncertainFloat::new(50.970_72_f64, 0.000_16_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 52,
mass: UncertainFloat::new(51.963_59_f64, 0.000_70_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 53,
mass: UncertainFloat::new(52.954_225_f64, 0.000_019_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 54,
mass: UncertainFloat::new(53.948_464_1_f64, 0.000_001_4_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 55,
mass: UncertainFloat::new(54.942_003_1_f64, 0.000_001_5_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 56,
mass: UncertainFloat::new(55.939_843_9_f64, 0.000_002_6_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 57,
mass: UncertainFloat::new(56.936_296_2_f64, 0.000_001_5_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 58,
mass: UncertainFloat::new(57.935_757_6_f64, 0.000_001_9_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 59,
mass: UncertainFloat::new(58.933_200_2_f64, 0.000_001_5_f64),
abundance: UncertainFloat::new(100.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 60,
mass: UncertainFloat::new(59.933_822_2_f64, 0.000_001_5_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 61,
mass: UncertainFloat::new(60.932_479_4_f64, 0.000_001_7_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 62,
mass: UncertainFloat::new(61.934_054_f64, 0.000_022_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 63,
mass: UncertainFloat::new(62.933_615_f64, 0.000_022_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 64,
mass: UncertainFloat::new(63.935_814_f64, 0.000_022_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 65,
mass: UncertainFloat::new(64.936_485_f64, 0.000_014_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 66,
mass: UncertainFloat::new(65.939_83_f64, 0.000_29_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 67,
mass: UncertainFloat::new(66.940_61_f64, 0.000_30_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 68,
mass: UncertainFloat::new(67.944_36_f64, 0.000_35_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 69,
mass: UncertainFloat::new(68.945_2_f64, 0.004_0_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 70,
mass: UncertainFloat::new(69.949_81_f64, 0.000_75_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 71,
mass: UncertainFloat::new(70.951_73_f64, 0.000_86_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
Isotope {
mass_number: 72,
mass: UncertainFloat::new(71.956_41_f64, 0.000_86_f64),
abundance: UncertainFloat::new(0.0, 0.0),
xray_scattering: None,
neutron_scattering: None
},
]
}
}
| 91.228814 | 116 | 0.627775 |
33c89bc1c67524bc5f4415cc1efc553ecfef7bd4 | 810 | // disable console on windows for release builds
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
#[cfg(target_arch = "wasm32")]
use bevy_webgl2;
use bevy::prelude::{App, ClearColor, Color, WindowDescriptor};
use bevy::DefaultPlugins;
use game_plugin::GamePlugin;
fn main() {
let mut app = App::new();
app
// .insert_resource(Msaa { samples: 4 })
.insert_resource(ClearColor(Color::rgb(0.4, 0.4, 0.4)))
.insert_resource(WindowDescriptor {
width: 800.,
height: 600.,
title: "Bevy game".to_string(), // ToDo
..Default::default()
})
.add_plugins(DefaultPlugins)
.add_plugin(GamePlugin);
#[cfg(target_arch = "wasm32")]
app.add_plugin(bevy_webgl2::WebGL2Plugin);
app.run();
}
| 27 | 66 | 0.619753 |
c14dd0471f91e4505564ab5a87a66fd166e5252d | 1,924 | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Example from lkuper's intern talk, August 2012 -- now with static
// methods!
trait Equal {
fn isEq(a: Self, b: Self) -> bool;
}
enum Color { cyan, magenta, yellow, black }
impl Equal for Color {
fn isEq(a: Color, b: Color) -> bool {
match (a, b) {
(cyan, cyan) => { true }
(magenta, magenta) => { true }
(yellow, yellow) => { true }
(black, black) => { true }
_ => { false }
}
}
}
enum ColorTree {
leaf(Color),
branch(@ColorTree, @ColorTree)
}
impl Equal for ColorTree {
fn isEq(a: ColorTree, b: ColorTree) -> bool {
match (a, b) {
(leaf(x), leaf(y)) => { Equal::isEq(x, y) }
(branch(l1, r1), branch(l2, r2)) => {
Equal::isEq(*l1, *l2) && Equal::isEq(*r1, *r2)
}
_ => { false }
}
}
}
pub fn main() {
assert!(Equal::isEq(cyan, cyan));
assert!(Equal::isEq(magenta, magenta));
assert!(!Equal::isEq(cyan, yellow));
assert!(!Equal::isEq(magenta, cyan));
assert!(Equal::isEq(leaf(cyan), leaf(cyan)));
assert!(!Equal::isEq(leaf(cyan), leaf(yellow)));
assert!(Equal::isEq(branch(@leaf(magenta), @leaf(cyan)),
branch(@leaf(magenta), @leaf(cyan))));
assert!(!Equal::isEq(branch(@leaf(magenta), @leaf(cyan)),
branch(@leaf(magenta), @leaf(magenta))));
error!("Assertions all succeeded!");
}
| 29.151515 | 68 | 0.567568 |
72652897b4538e2c7ea16d78265dba090f281914 | 12,922 | // This file is generated by rust-protobuf 2.6.2. Do not edit
// @generated
// https://github.com/Manishearth/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy)]
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;
#[derive(PartialEq,Clone,Default)]
pub struct Chat {
// message fields
pub query: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Chat {
fn default() -> &'a Chat {
<Chat as ::protobuf::Message>::default_instance()
}
}
impl Chat {
pub fn new() -> Chat {
::std::default::Default::default()
}
// string query = 1;
pub fn get_query(&self) -> &str {
&self.query
}
pub fn clear_query(&mut self) {
self.query.clear();
}
// Param is passed by value, moved
pub fn set_query(&mut self, v: ::std::string::String) {
self.query = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_query(&mut self) -> &mut ::std::string::String {
&mut self.query
}
// Take field
pub fn take_query(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.query, ::std::string::String::new())
}
}
impl ::protobuf::Message for Chat {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.query)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.query.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.query);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if !self.query.is_empty() {
os.write_string(1, &self.query)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Chat {
Chat::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"query",
|m: &Chat| { &m.query },
|m: &mut Chat| { &mut m.query },
));
::protobuf::reflect::MessageDescriptor::new::<Chat>(
"Chat",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static Chat {
static mut instance: ::protobuf::lazy::Lazy<Chat> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const Chat,
};
unsafe {
instance.get(Chat::new)
}
}
}
impl ::protobuf::Clear for Chat {
fn clear(&mut self) {
self.query.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for Chat {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Chat {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct ChatReply {
// message fields
pub result: ::std::string::String,
pub info: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a ChatReply {
fn default() -> &'a ChatReply {
<ChatReply as ::protobuf::Message>::default_instance()
}
}
impl ChatReply {
pub fn new() -> ChatReply {
::std::default::Default::default()
}
// string result = 1;
pub fn get_result(&self) -> &str {
&self.result
}
pub fn clear_result(&mut self) {
self.result.clear();
}
// Param is passed by value, moved
pub fn set_result(&mut self, v: ::std::string::String) {
self.result = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_result(&mut self) -> &mut ::std::string::String {
&mut self.result
}
// Take field
pub fn take_result(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.result, ::std::string::String::new())
}
// string info = 2;
pub fn get_info(&self) -> &str {
&self.info
}
pub fn clear_info(&mut self) {
self.info.clear();
}
// Param is passed by value, moved
pub fn set_info(&mut self, v: ::std::string::String) {
self.info = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_info(&mut self) -> &mut ::std::string::String {
&mut self.info
}
// Take field
pub fn take_info(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.info, ::std::string::String::new())
}
}
impl ::protobuf::Message for ChatReply {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.result)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.info)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.result.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.result);
}
if !self.info.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.info);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream) -> ::protobuf::ProtobufResult<()> {
if !self.result.is_empty() {
os.write_string(1, &self.result)?;
}
if !self.info.is_empty() {
os.write_string(2, &self.info)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &::std::any::Any {
self as &::std::any::Any
}
fn as_any_mut(&mut self) -> &mut ::std::any::Any {
self as &mut ::std::any::Any
}
fn into_any(self: Box<Self>) -> ::std::boxed::Box<::std::any::Any> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> ChatReply {
ChatReply::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::reflect::MessageDescriptor,
};
unsafe {
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"result",
|m: &ChatReply| { &m.result },
|m: &mut ChatReply| { &mut m.result },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"info",
|m: &ChatReply| { &m.info },
|m: &mut ChatReply| { &mut m.info },
));
::protobuf::reflect::MessageDescriptor::new::<ChatReply>(
"ChatReply",
fields,
file_descriptor_proto()
)
})
}
}
fn default_instance() -> &'static ChatReply {
static mut instance: ::protobuf::lazy::Lazy<ChatReply> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ChatReply,
};
unsafe {
instance.get(ChatReply::new)
}
}
}
impl ::protobuf::Clear for ChatReply {
fn clear(&mut self) {
self.result.clear();
self.info.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for ChatReply {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for ChatReply {
fn as_ref(&self) -> ::protobuf::reflect::ProtobufValueRef {
::protobuf::reflect::ProtobufValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\ngame.proto\"\x1c\n\x04Chat\x12\x14\n\x05query\x18\x01\x20\x01(\tR\
\x05query\"7\n\tChatReply\x12\x16\n\x06result\x18\x01\x20\x01(\tR\x06res\
ult\x12\x12\n\x04info\x18\x02\x20\x01(\tR\x04infob\x06proto3\
";
static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy {
lock: ::protobuf::lazy::ONCE_INIT,
ptr: 0 as *const ::protobuf::descriptor::FileDescriptorProto,
};
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
unsafe {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
}
| 30.333333 | 133 | 0.565083 |
d976ef77a778810c48ff25dd4ddb34b327093f5f | 24,577 | //! Image manipulation effects in HSL, LCh and HSV.
extern crate image;
extern crate rand;
use image::{GenericImageView};
use palette::{Hsl, Lch, Shade, Pixel, Saturate, Srgba, Hue, Hsv};
use crate::{PhotonImage, Rgb, helpers};
extern crate wasm_bindgen;
use wasm_bindgen::prelude::*;
/// Apply gamma correction.
// #[wasm_bindgen]
// pub fn gamma_correction(mut photon_image: &mut PhotonImage, red: f32, green: f32, blue: f32) {
// let img = helpers::dyn_image_from_raw(&photon_image);
// let (width, height) = img.dimensions();
// let mut img = img.to_rgba();
// // Initialize gamma arrays
// let mut gammaR: Vec<u8> = vec![];
// let mut gammaG: Vec<u8> = vec![];
// let mut gammaB: Vec<u8> = vec![];
// let MAX_VALUE_INT = 255;
// let MAX_VALUE_FLT = 255.0;
// let REVERSE = 1.0;
// // Set values within gamma arrays
// for i in 0..256 {
// gammaR[i] = min(MAX_VALUE_INT, ((MAX_VALUE_FLT * ((i as f32 / MAX_VALUE_FLT) as u32).powf(REVERSE / red) + 0.5 ) as u8));
// gammaG[i] = min(MAX_VALUE_INT, ((MAX_VALUE_FLT * ((i as f32 / MAX_VALUE_FLT) as u32).powf(REVERSE / green) + 0.5 ) as u8);
// gammaB[i] = min(MAX_VALUE_INT, ((MAX_VALUE_FLT * ((i as f32 / MAX_VALUE_FLT) as u32).powf(REVERSE / blue) + 0.5 ) as u8);
// }
// for x in 0..width {
// for y in 0..height {
// let px_data = img.get_pixel(x, y).data;
// let r_val = px_data[0];
// let g_val = px_data[1];
// let b_val = px_data[2];
// px_data[0] = gammaR[r_val as usize];
// px_data[1] = gammaG[g_val as usize];
// px_data[2] = gammaB[b_val as usize];
// img.put_pixel(x, y, px);
// }
// }
// photon_image.raw_pixels = img.to_vec();
// }
/// Image manipulation effects in the LCh colour space
///
/// Effects include:
/// * **saturate** - Saturation increase.
/// * **desaturate** - Desaturate the image.
/// * **shift_hue** - Hue rotation by a specified number of degrees.
/// * **darken** - Decrease the brightness.
/// * **lighten** - Increase the brightness.
///
/// # Arguments
/// * `photon_image` - A PhotonImage.
/// * `mode` - The effect desired to be applied. Choose from: `saturate`, `desaturate`, `shift_hue`, `darken`, `lighten`
/// * `amt` - A float value from 0 to 1 which represents the amount the effect should be increased by.
/// # Example
/// ```
/// // For example to increase the saturation by 10%:
/// use photon::color_spaces::lch;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// lch(&mut img, "saturate", 0.1);
/// ```
#[wasm_bindgen]
pub fn lch(mut photon_image: &mut PhotonImage, mode: &str, amt: f32) {
let img = helpers::dyn_image_from_raw(&photon_image);
let (width, height) = img.dimensions();
let mut img = img.to_rgba();
for x in 0..width {
for y in 0..height {
let px_data = img.get_pixel(x, y).data;
let lch_colour: Lch = Srgba::from_raw(&px_data)
.into_format()
.into_linear()
.into();
let new_color = match mode {
// Match a single value
"desaturate" => lch_colour.desaturate(amt),
"saturate" => lch_colour.saturate(amt),
"lighten" => lch_colour.lighten(amt),
"darken" => lch_colour.darken(amt),
"shift_hue" => lch_colour.shift_hue(amt * 360.0),
_ => lch_colour.saturate(amt),
};
img.put_pixel(x, y, image::Rgba {
data: Srgba::from_linear(new_color.into()).into_format().into_raw()
});
}
}
photon_image.raw_pixels = img.to_vec();
}
/// Image manipulation effects in the HSL colour space.
///
/// Effects include:
/// * **saturate** - Saturation increase.
/// * **desaturate** - Desaturate the image.
/// * **shift_hue** - Hue rotation by a specified number of degrees.
/// * **darken** - Decrease the brightness.
/// * **lighten** - Increase the brightness.
///
/// # Arguments
/// * `photon_image` - A PhotonImage.
/// * `mode` - The effect desired to be applied. Choose from: `saturate`, `desaturate`, `shift_hue`, `darken`, `lighten`
/// * `amt` - A float value from 0 to 1 which represents the amount the effect should be increased by.
/// # Example
/// ```
/// // For example to increase the saturation by 10%:
/// use photon::color_spaces::hsl;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// hsl(&mut img, "saturate", 0.1);
/// ```
#[wasm_bindgen]
pub fn hsl(mut photon_image: &mut PhotonImage, mode: &str, amt: f32) {
// The function logic is kept separate from other colour spaces for now,
// since other HSL-specific logic may be implemented here, which isn't available in other colour spaces
let mut img = helpers::dyn_image_from_raw(&photon_image).to_rgba();
let (width, height) = img.dimensions();
for x in 0..width {
for y in 0..height {
let px_data = img.get_pixel(x, y).data;
let colour = Srgba::from_raw(&px_data).into_format();
let hsl_colour = Hsl::from(colour);
let new_color = match mode {
// Match a single value
"desaturate" => hsl_colour.desaturate(amt),
"saturate" => hsl_colour.saturate(amt),
"lighten" => hsl_colour.lighten(amt),
"darken" => hsl_colour.darken(amt),
"shift_hue" => hsl_colour.shift_hue(amt * 360.0),
_ => hsl_colour.saturate(amt),
};
img.put_pixel(x, y, image::Rgba {
data: Srgba::from_linear(new_color.into()).into_format().into_raw()
});
}
}
photon_image.raw_pixels = img.to_vec();
}
/// Image manipulation in the HSV colour space.
///
/// Effects include:
/// * **saturate** - Saturation increase.
/// * **desaturate** - Desaturate the image.
/// * **shift_hue** - Hue rotation by a specified number of degrees.
/// * **darken** - Decrease the brightness.
/// * **lighten** - Increase the brightness.
///
/// # Arguments
/// * `photon_image` - A PhotonImage.
/// * `mode` - The effect desired to be applied. Choose from: `saturate`, `desaturate`, `shift_hue`, `darken`, `lighten`
/// * `amt` - A float value from 0 to 1 which represents the amount the effect should be increased by.
///
/// # Example
/// ```
/// // For example to increase the saturation by 10%:
/// use photon::color_spaces::hsv;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// hsv(&mut img, "saturate", 0.1);
/// ```
#[wasm_bindgen]
pub fn hsv(photon_image: &mut PhotonImage, mode: &str, amt: f32) {
let img = helpers::dyn_image_from_raw(&photon_image);
let mut img = img.to_rgba();
let (width, height) = img.dimensions();
for x in 0..width {
for y in 0..height {
let px_data = img.get_pixel(x, y).data;
let color = Srgba::from_raw(&px_data).into_format();
let hsv_colour = Hsv::from(color);
let new_color = match mode {
// Match a single value
"desaturate" => hsv_colour.desaturate(amt),
"saturate" => hsv_colour.saturate(amt),
"lighten" => hsv_colour.lighten(amt),
"darken" => hsv_colour.darken(amt),
"shift_hue" => hsv_colour.shift_hue(amt * 360.0),
_ => hsv_colour.saturate(amt),
};
img.put_pixel(x, y, image::Rgba {
data: Srgba::from_linear(new_color.into()).into_format().into_raw()
});
}
}
photon_image.raw_pixels = img.to_vec();
}
/// Shift hue by a specified number of degrees in the HSL colour space.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `mode` - A float value from 0 to 1 which is the amount to shift the hue by, or hue rotate by.
///
/// # Example
/// ```
/// // For example to hue rotate/shift the hue by 120 degrees in the HSL colour space:
/// use photon::color_spaces::hue_rotate_hsl;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// hue_rotate_hsl(&mut img, 120);
/// ```
#[wasm_bindgen]
pub fn hue_rotate_hsl(img: &mut PhotonImage, degrees: f32) {
hsl(img, "shift_hue", degrees);
}
/// Shift hue by a specified number of degrees in the HSV colour space.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `mode` - A float value from 0 to 1 which is the amount to shift the hue by, or hue rotate by.
///
/// # Example
/// ```
/// // For example to hue rotate/shift the hue by 120 degrees in the HSV colour space:
/// use photon::color_spaces::hue_rotate_hsv;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// hue_rotate_hsv(&mut img, 120);
/// ```
#[wasm_bindgen]
pub fn hue_rotate_hsv(img: &mut PhotonImage, degrees: f32) {
hsv(img, "shift_hue", degrees);
}
/// Shift hue by a specified number of degrees in the LCh colour space.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `mode` - A float value from 0 to 1 which is the amount to shift the hue by, or hue rotate by.
///
/// # Example
/// ```
/// // For example to hue rotate/shift the hue by 120 degrees in the HSL colour space:
/// use photon::color_spaces::hue_rotate_lch;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// hue_rotate_lch(&mut img, 120);
/// ```
#[wasm_bindgen]
pub fn hue_rotate_lch(img: &mut PhotonImage, degrees: f32) {
lch(img, "shift_hue", degrees)
}
/// Increase the image's saturation by converting each pixel's colour to the HSL colour space
/// and increasing the colour's saturation.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to increase the saturation by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Increasing saturation by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to increase saturation by 10% in the HSL colour space:
/// use photon::color_spaces::saturate_hsl;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// saturate_hsl(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn saturate_hsl(img: &mut PhotonImage, level: f32) {
return hsl(img, "saturate", level);
}
/// Increase the image's saturation in the LCh colour space.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to increase the saturation by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Increasing saturation by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to increase saturation by 40% in the Lch colour space:
/// use photon::color_spaces::saturate_lch;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// saturate_lch(&mut img, 0.4);
/// ```
#[wasm_bindgen]
pub fn saturate_lch(img: &mut PhotonImage, level: f32) {
return lch(img, "saturate", level);
}
/// Increase the image's saturation in the HSV colour space.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level by which to increase the saturation by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Increasing saturation by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to increase saturation by 30% in the HSV colour space:
/// use photon::color_spaces::saturate_hsv;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// saturate_hsv(&mut img, 0.3);
/// ```
#[wasm_bindgen]
pub fn saturate_hsv(img: &mut PhotonImage, level: f32) {
return hsv(img, "saturate", level);
}
/// Lighten an image by a specified amount in the LCh colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to lighten the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Lightening by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to lighten an image by 10% in the LCh colour space:
/// use photon::color_spaces::lighten_lch;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// lighten_lch(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn lighten_lch(img: &mut PhotonImage, level: f32) {
return lch(img, "lighten", level);
}
/// Lighten an image by a specified amount in the HSL colour space.
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to lighten the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Lightening by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to lighten an image by 10% in the HSL colour space:
/// use photon::color_spaces::lighten_hsl;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// lighten_hsl(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn lighten_hsl(img: &mut PhotonImage, level: f32) {
return hsl(img, "lighten", level);
}
/// Lighten an image by a specified amount in the HSV colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to lighten the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Lightening by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to lighten an image by 10% in the HSV colour space:
/// use photon::color_spaces::lighten_hsv;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// lighten_hsv(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn lighten_hsv(img: &mut PhotonImage, level: f32) {
return hsv(img, "lighten", level);
}
/// Darken the image by a specified amount in the LCh colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to darken the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Darkening by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to darken an image by 10% in the LCh colour space:
/// use photon::color_spaces::darken_lch;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// darken_lch(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn darken_lch(img: &mut PhotonImage, level: f32) {
return lch(img, "darken", level);
}
/// Darken the image by a specified amount in the HSL colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to darken the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Darkening by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to darken an image by 10% in the HSL colour space:
/// use photon::color_spaces::darken_hsl;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// darken_hsl(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn darken_hsl(img: &mut PhotonImage, level: f32) {
return hsl(img, "darken", level);
}
/// Darken the image's colours by a specified amount in the HSV colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to darken the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Darkening by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to darken an image by 10% in the HSV colour space:
/// use photon::color_spaces::darken_hsv;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// darken_hsv(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn darken_hsv(img: &mut PhotonImage, level: f32) {
return hsv(img, "darken", level);
}
/// Desaturate the image by a specified amount in the HSV colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to desaturate the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Desaturating by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to desaturate an image by 10% in the HSV colour space:
/// use photon::color_spaces::desaturate_hsv;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/mountains.PNG");
///
/// desaturate_hsv(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn desaturate_hsv(img: &mut PhotonImage, level: f32) {
return hsv(img, "desaturate", level);
}
/// Desaturate the image by a specified amount in the HSL colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to desaturate the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Desaturating by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to desaturate an image by 10% in the LCh colour space:
/// use photon::color_spaces::desaturate_hsl;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// desaturate_hsl(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn desaturate_hsl(img: &mut PhotonImage, level: f32) {
return hsl(img, "desaturate", level);
}
/// Desaturate the image by a specified amount in the LCh colour space.
///
/// # Arguments
/// * `img` - A PhotonImage.
/// * `level` - Float value from 0 to 1 representing the level to which to desaturate the image by.
/// The `level` must be from 0 to 1 in floating-point, `f32` format.
/// Desaturating by 80% would be represented by a `level` of 0.8
///
/// # Example
/// ```
/// // For example to desaturate an image by 10% in the LCh colour space:
/// use photon::color_spaces::desaturate_lch;
///
/// // Open the image. A PhotonImage is returned.
/// let img: PhotonImage = open_image("images/flowers.PNG");
///
/// desaturate_lch(&mut img, 0.1);
/// ```
#[wasm_bindgen]
pub fn desaturate_lch(img: &mut PhotonImage, level: f32) {
return lch(img, "desaturate", level);
}
/// Mix image with a single color, supporting passing `opacity`.
/// The algorithm comes from Jimp. See `function mix` and `function colorFn` at following link:
/// https://github.com/oliver-moran/jimp/blob/29679faa597228ff2f20d34c5758e4d2257065a3/packages/plugin-color/src/index.js
/// Specifically, result_value = (mix_color_value - origin_value) * opacity + origin_value =
/// mix_color_value * opacity + (1 - opacity) * origin_value for each
/// of RGB channel.
///
/// # Arguments
/// * `photon_image` - A PhotonImage that contains a view into the image.
/// * `mix_color` - the color to be mixed in, as an RGB value.
/// * `opacity` - the opacity of color when mixed to image. Float value from 0 to 1.
/// # Example
///
/// ```
/// // For example, to mix an image with rgb (50, 255, 254) and opacity 0.4:
/// use photon::colour_spaces::mix_with_colour;
///
/// let mix_colour = Rgb{50, 255, 254};
/// mix_with_colour(photon_image, mix_colour, 0.4);
/// ```
#[wasm_bindgen]
pub fn mix_with_colour(photon_image: &mut PhotonImage, mix_colour: Rgb, opacity: f32) {
let img = helpers::dyn_image_from_raw(&photon_image);
let (_width, _height) = img.dimensions();
let mut img = img.to_rgba();
// cache (mix_color_value * opacity) and (1 - opacity) so we dont need to calculate them each time during loop.
let mix_red_offset = mix_colour.r as f32 * opacity;
let mix_green_offset = mix_colour.g as f32 * opacity;
let mix_blue_offset = mix_colour.b as f32 * opacity;
let factor = 1.0 - opacity;
for x in 0.._width {
for y in 0.._height {
let px = img.get_pixel(x, y);
let r_value = mix_red_offset + (px.data[0] as f32) * factor;
let g_value = mix_green_offset + (px.data[1] as f32) * factor;
let b_value = mix_blue_offset + (px.data[2] as f32) * factor;
let alpha = px.data[3];
img.put_pixel(x, y, image::Rgba (
[r_value as u8, g_value as u8, b_value as u8, alpha]
));
}
}
photon_image.raw_pixels = img.to_vec();
}
// #[wasm_bindgen]
// pub fn selective_color_convert(mut photon_image: &mut PhotonImage, ref_color:Rgb, new_color:Rgb, fraction: f32) {
// let img = helpers::dyn_image_from_raw(&photon_image);
// let (_width, _height) = img.dimensions();
// let mut img = img.to_rgba();
// for x in 0.._width {
// for y in 0.._height {
// let mut px = img.get_pixel(x, y);
// // Reference colour to compare the current pixel's colour to
// let lab: Lab = Srgb::new(ref_color.r as f32 / 255.0, ref_color.g as f32 / 255.0, ref_color.b as f32 / 255.0).into();
// // Convert the current pixel's colour to the l*a*b colour space
// let r_val: f32 = px.data[0] as f32 / 255.0;
// let g_val: f32 = px.data[1] as f32 / 255.0;
// let b_val: f32 = px.data[2] as f32 / 255.0;
// let px_lab: Lab = Srgb::new(r_val, g_val, b_val).into();
// let sim = color_sim(lab, px_lab);
// if sim > 0 && sim < 40 {
// let newr = (((new_color.r - ref_color.r) as f32) * fraction + new_color.r as f32) as u8;
// let newg = (((new_color.g - ref_color.g) as f32) * fraction + new_color.g as f32) as u8;
// let newb = (((new_color.b - ref_color.b) as f32) * fraction + new_color.b as f32) as u8;
// img.put_pixel(x, y, image::Rgba([newr, newg, newb, 255]));
// }
// }
// }
// photon_image.raw_pixels = img.to_vec();
// }
// pub fn correct(img: &DynamicImage, mode: &'static str, colour_space: &'static str, amt: f32) -> DynamicImage {
// let mut img = img.to_rgb();
// let (width, height) = img.dimensions();
// for x in 0..width {
// for y in 0..height {
// let px_data = img.get_pixel(x, y).data;
// let colour_to_cspace;
// if colour_space == "hsv" {
// colour_to_cspace: Hsv = Srgb::from_raw(&px_data).into_format();
// }
// else if colour_space == "hsl" {
// colour_to_cspace = Hsl::from(color);
// }
// else {
// colour_to_cspace = Lch::from(color);
// }
// let new_color = match mode {
// // Match a single value
// "desaturate" => colour_to_cspace.desaturate(amt),
// "saturate" => colour_to_cspace.saturate(amt),
// "lighten" => colour_to_cspace.lighten(amt),
// "darken" => colour_to_cspace.darken(amt),
// _ => colour_to_cspace.saturate(amt),
// };
// img.put_pixel(x, y, image::Rgb {
// data: Srgb::from_linear(new_color.into()).into_format().into_raw()
// });
// }
// }
// let dynimage = image::ImageRgb8(img);
// return dynimage;
// } | 36.68209 | 133 | 0.603857 |
22cc9b821df30a2f4631b16b7fc2a552abca3897 | 2,767 | extern crate chrono;
extern crate chrono_tz;
extern crate futures;
extern crate hyper;
extern crate toml;
#[macro_use]
extern crate serde_derive;
mod config;
use std::fs;
use std::net::{SocketAddr, IpAddr, Ipv4Addr};
use std::path::Path;
use std::thread;
use std::io::Write;
use chrono::Utc;
use chrono_tz::Tz;
use futures::future::Future;
use hyper::header::Referer;
use hyper::server::{Http, Request, Response, Service};
use hyper::Uri;
struct Tracker {
logs_dir: String,
timezone: Tz,
}
impl Service for Tracker {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = Box<Future<Item=Self::Response, Error=Self::Error>>;
fn call(&self, req: Request) -> Self::Future {
let logs_dir = self.logs_dir.to_owned();
let timezone = self.timezone.to_owned();
thread::spawn(move || {
log_visit(req, logs_dir, timezone);
});
// An empty response will do.
Box::new(futures::future::ok(Response::new()))
}
}
fn log_visit(req: Request, logs_dir_path: String, timezone: Tz) {
let page = match req.headers().get::<Referer>() {
Some(referer) => match referer.parse::<Uri>() {
Ok(uri) => uri.path().to_string(),
// Ignore malformed (and malicious) Referer headers.
Err(_) => return,
},
None => String::from("unknown"),
};
let create_err = fs::create_dir_all(&logs_dir_path).err();
match fs::metadata(&logs_dir_path) {
Ok(ref metadata) if !metadata.is_dir() => {
panic!("'{}' is not a directory.", &logs_dir_path)
},
Err(_) => {
panic!("Couldn't create logs directory: {:?}", create_err.unwrap());
},
_ => {},
};
let time = Utc::now().with_timezone(&timezone);
let file_path = Path::new(&logs_dir_path)
.join(format!("{}.csv", time.format("%Y%m%d")));
let mut file = fs::OpenOptions::new()
.create(true)
.append(true)
.open(&file_path)
.expect(&format!("Couldn't open log file '{}'.", file_path.display()));
writeln!(&mut file, "{},{}", page, time.to_rfc3339())
.expect("Couldn't write to log file.");
}
fn main() {
let config = config::load();
// Fail fast if timezone is invalid.
let timezone: Tz = config.timezone.parse()
.expect("Invalid timezone");
// Listen on all interfaces by default.
let addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), config.port);
let server = Http::new().bind(&addr, move || {
Ok(Tracker {
logs_dir: config.logs_dir.to_string(),
timezone: timezone
})
}).unwrap();
server.run().unwrap();
}
| 25.385321 | 80 | 0.585833 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.