hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
895f218e2d7d209d730434e2cb9055d501ace2e3
555
use ndarray::Array2; use num_complex::Complex64; use std::collections::BTreeMap; /// Tight-binding model specified by (R, H(R)) pairs. pub trait Model: Clone { /// A collection of (displacement vector, hopping matrix) pairs, (R, H(R)). fn hrs(&self) -> &BTreeMap<[i32; 3], Array2<Complex64>>; /// The number of bands in the model. Each matrix value in hrs is /// nbands x nbands. fn bands(&self) -> usize; /// A matrix with columns giving the lattice vectors in Cartesian /// coordinates. fn d(&self) -> &Array2<f64>; }
30.833333
79
0.652252
0910f02dc29f399753c1edf39d237d3123c2c29d
854
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::cell::Cell; use std::gc::GC; enum newtype { newtype(int) } pub fn main() { // Test that borrowck treats enums with a single variant // specially. let x = box(GC) Cell::new(5); let y = box(GC) Cell::new(newtype(3)); let z = match y.get() { newtype(b) => { x.set(x.get() + 1); x.get() * b } }; assert_eq!(z, 18); }
25.117647
68
0.637002
0ec6e829139648efc77a18fa940a9b129e4a7da0
12,218
// Copyright 2021, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ fmt::{Display, Formatter}, io, }; use integer_encoding::VarIntWriter; use tari_common_types::types::{Commitment, FixedHash, PublicKey}; use tari_script::TariScript; use tari_utilities::hex::{to_hex, Hex}; use crate::{ consensus::{ConsensusDecoding, ConsensusEncoding, MaxSizeBytes}, covenants::{ byte_codes, covenant::Covenant, decoder::{CovenantDecodeError, CovenantReadExt}, encoder::CovenentWriteExt, error::CovenantError, fields::{OutputField, OutputFields}, }, }; const MAX_COVENANT_ARG_SIZE: usize = 4096; const MAX_BYTES_ARG_SIZE: usize = 4096; #[derive(Debug, Clone, PartialEq, Eq)] pub enum CovenantArg { Hash(FixedHash), PublicKey(PublicKey), Commitment(Commitment), TariScript(TariScript), Covenant(Covenant), Uint(u64), OutputField(OutputField), OutputFields(OutputFields), Bytes(Vec<u8>), } impl CovenantArg { pub fn is_valid_code(code: u8) -> bool { byte_codes::is_valid_arg_code(code) } pub fn read_from<R: io::Read>(reader: &mut R, code: u8) -> Result<Self, CovenantDecodeError> { use byte_codes::*; match code { ARG_HASH => { let mut hash = [0u8; 32]; reader.read_exact(&mut hash)?; Ok(CovenantArg::Hash(hash.into())) }, ARG_PUBLIC_KEY => { let pk = PublicKey::consensus_decode(reader)?; Ok(CovenantArg::PublicKey(pk)) }, ARG_COMMITMENT => Ok(CovenantArg::Commitment(Commitment::consensus_decode(reader)?)), ARG_TARI_SCRIPT => { let script = TariScript::consensus_decode(reader)?; Ok(CovenantArg::TariScript(script)) }, ARG_COVENANT => { let buf = reader.read_variable_length_bytes(MAX_COVENANT_ARG_SIZE)?; // Do not use consensus_decoding here because the compiler infinitely recurses to resolve the R generic, // R becomes the reader of this call and so on. This impl has an arg limit anyway and so is safe // TODO: Impose a limit on depth of covenants within covenants let covenant = Covenant::from_bytes(&buf)?; Ok(CovenantArg::Covenant(covenant)) }, ARG_UINT => { let v = u64::consensus_decode(reader)?; Ok(CovenantArg::Uint(v)) }, ARG_OUTPUT_FIELD => { let v = reader .read_next_byte_code()? .ok_or(CovenantDecodeError::UnexpectedEof { expected: "Output field byte code", })?; let field = OutputField::from_byte(v)?; Ok(CovenantArg::OutputField(field)) }, ARG_OUTPUT_FIELDS => { // Each field code is a byte let fields = OutputFields::read_from(reader)?; Ok(CovenantArg::OutputFields(fields)) }, ARG_BYTES => { let buf = MaxSizeBytes::<MAX_BYTES_ARG_SIZE>::consensus_decode(reader)?; Ok(CovenantArg::Bytes(buf.into())) }, _ => Err(CovenantDecodeError::UnknownArgByteCode { code }), } } pub fn write_to<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> { use byte_codes::*; use CovenantArg::{Bytes, Commitment, Covenant, Hash, OutputField, OutputFields, PublicKey, TariScript, Uint}; match self { Hash(hash) => { writer.write_u8_fixed(ARG_HASH)?; hash.len(); writer.write_all(&hash[..])?; }, PublicKey(pk) => { writer.write_u8_fixed(ARG_PUBLIC_KEY)?; pk.consensus_encode(writer)?; }, Commitment(commitment) => { writer.write_u8_fixed(ARG_COMMITMENT)?; commitment.consensus_encode(writer)?; }, TariScript(script) => { writer.write_u8_fixed(ARG_TARI_SCRIPT)?; script.consensus_encode(writer)?; }, Covenant(covenant) => { writer.write_u8_fixed(ARG_COVENANT)?; let len = covenant.get_byte_length(); writer.write_varint(len)?; covenant.write_to(writer)?; }, Uint(int) => { writer.write_u8_fixed(ARG_UINT)?; int.consensus_encode(writer)?; }, OutputField(field) => { writer.write_u8_fixed(ARG_OUTPUT_FIELD)?; writer.write_u8_fixed(field.as_byte())?; }, OutputFields(fields) => { writer.write_u8_fixed(ARG_OUTPUT_FIELDS)?; fields.write_to(writer)?; }, Bytes(bytes) => { writer.write_u8_fixed(ARG_BYTES)?; bytes.consensus_encode(writer)?; }, } Ok(()) } } macro_rules! require_x_impl { ($name:ident, $output:ident, $expected: expr, $output_type:ident) => { #[allow(dead_code)] pub(super) fn $name(self) -> Result<$output_type, CovenantError> { match self { CovenantArg::$output(obj) => Ok(obj), got => Err(CovenantError::UnexpectedArgument { expected: $expected, got: got.to_string(), }), } } }; ($name:ident, $output:ident, $expected:expr) => { require_x_impl!($name, $output, $expected, $output); }; } impl CovenantArg { require_x_impl!(require_hash, Hash, "hash", FixedHash); require_x_impl!(require_publickey, PublicKey, "publickey"); require_x_impl!(require_commitment, Commitment, "commitment"); require_x_impl!(require_tariscript, TariScript, "script"); require_x_impl!(require_covenant, Covenant, "covenant"); require_x_impl!(require_outputfield, OutputField, "outputfield"); require_x_impl!(require_outputfields, OutputFields, "outputfields"); pub fn require_bytes(self) -> Result<Vec<u8>, CovenantError> { match self { CovenantArg::Bytes(val) => Ok(val), got => Err(CovenantError::UnexpectedArgument { expected: "bytes", got: got.to_string(), }), } } pub fn require_uint(self) -> Result<u64, CovenantError> { match self { CovenantArg::Uint(val) => Ok(val), got => Err(CovenantError::UnexpectedArgument { expected: "uint", got: got.to_string(), }), } } } impl Display for CovenantArg { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { use CovenantArg::{Bytes, Commitment, Covenant, Hash, OutputField, OutputFields, PublicKey, TariScript, Uint}; match self { Hash(hash) => write!(f, "Hash({})", to_hex(&hash[..])), PublicKey(public_key) => write!(f, "PublicKey({})", public_key.to_hex()), Commitment(commitment) => write!(f, "Commitment({})", commitment.to_hex()), TariScript(_) => write!(f, "TariScript(...)"), Covenant(_) => write!(f, "Covenant(...)"), Uint(v) => write!(f, "Uint({})", v), OutputField(field) => write!(f, "OutputField({})", field.as_byte()), OutputFields(fields) => write!(f, "OutputFields({} field(s))", fields.len()), Bytes(bytes) => write!(f, "Bytes({} byte(s))", bytes.len()), } } } #[cfg(test)] mod test { use tari_common_types::types::Commitment; use tari_script::script; use tari_utilities::hex::from_hex; use super::*; use crate::{covenant, covenants::byte_codes::*}; mod require_x_impl { use super::*; #[test] fn test() { // This is mostly to remove unused function warnings let arg = CovenantArg::Uint(123); arg.clone().require_bytes().unwrap_err(); let v = arg.clone().require_uint().unwrap(); assert_eq!(v, 123); arg.clone().require_hash().unwrap_err(); arg.clone().require_outputfield().unwrap_err(); arg.clone().require_covenant().unwrap_err(); arg.clone().require_commitment().unwrap_err(); arg.clone().require_outputfields().unwrap_err(); arg.clone().require_publickey().unwrap_err(); arg.require_tariscript().unwrap_err(); } } mod write_to_and_read_from { use super::*; fn test_case(argument: CovenantArg, mut data: &[u8]) { let mut buf = Vec::new(); argument.write_to(&mut buf).unwrap(); assert_eq!(buf, data); let reader = &mut data; let code = reader.read_next_byte_code().unwrap().unwrap(); let arg = CovenantArg::read_from(&mut data, code).unwrap(); assert_eq!(arg, argument); } #[test] fn test() { test_case(CovenantArg::Uint(2048), &[ARG_UINT, 0x80, 0x10][..]); test_case( CovenantArg::Covenant(covenant!(identity())), &[ARG_COVENANT, 0x01, 0x20][..], ); test_case( CovenantArg::Bytes(vec![0x01, 0x02, 0xaa]), &[ARG_BYTES, 0x03, 0x01, 0x02, 0xaa][..], ); test_case( CovenantArg::Commitment(Commitment::default()), &from_hex("030000000000000000000000000000000000000000000000000000000000000000").unwrap(), ); test_case( CovenantArg::PublicKey(PublicKey::default()), &from_hex("020000000000000000000000000000000000000000000000000000000000000000").unwrap(), ); test_case( CovenantArg::Hash(FixedHash::zero()), &from_hex("010000000000000000000000000000000000000000000000000000000000000000").unwrap(), ); test_case(CovenantArg::TariScript(script!(Nop)), &[ARG_TARI_SCRIPT, 0x01, 0x73]); test_case(CovenantArg::OutputField(OutputField::Covenant), &[ ARG_OUTPUT_FIELD, FIELD_COVENANT, ]); test_case( CovenantArg::OutputFields(OutputFields::from(vec![OutputField::Features, OutputField::Commitment])), &[ARG_OUTPUT_FIELDS, 0x02, FIELD_FEATURES, FIELD_COMMITMENT], ); } } }
38.542587
120
0.576117
8a7b9a46db93d78b552ed1b3958af2236638fd99
6,176
use bellperson::{ gadgets::{ boolean::Boolean, multipack, num::AllocatedNum, sha256::sha256 as sha256_circuit, uint32::UInt32, }, ConstraintSystem, SynthesisError, }; use ff::PrimeField; use storage_proofs_core::{gadgets::uint64::UInt64, util::reverse_bit_numbering}; use crate::stacked::vanilla::TOTAL_PARENTS; /// Compute a single label. pub fn create_label_circuit<Scalar, CS>( mut cs: CS, replica_id: &[Boolean], parents: Vec<Vec<Boolean>>, layer_index: UInt32, node: UInt64, ) -> Result<AllocatedNum<Scalar>, SynthesisError> where Scalar: PrimeField, CS: ConstraintSystem<Scalar>, { assert!(replica_id.len() >= 32, "replica id is too small"); assert!(replica_id.len() <= 256, "replica id is too large"); assert_eq!(parents.len(), TOTAL_PARENTS, "invalid sized parents"); // ciphertexts will become a buffer of the layout // id | node | parent_node_0 | parent_node_1 | ... let mut ciphertexts = replica_id.to_vec(); // pad to 32 bytes while ciphertexts.len() < 256 { ciphertexts.push(Boolean::constant(false)); } ciphertexts.extend_from_slice(&layer_index.into_bits_be()); ciphertexts.extend_from_slice(&node.to_bits_be()); // pad to 64 bytes while ciphertexts.len() < 512 { ciphertexts.push(Boolean::constant(false)); } for parent in parents.iter() { ciphertexts.extend_from_slice(parent); // pad such that each parents take 32 bytes while ciphertexts.len() % 256 != 0 { ciphertexts.push(Boolean::constant(false)); } } // 32b replica id // 32b layer_index + node // 37 * 32b = 1184b parents assert_eq!(ciphertexts.len(), (1 + 1 + TOTAL_PARENTS) * 32 * 8); // Compute Sha256 let alloc_bits = sha256_circuit(cs.namespace(|| "hash"), &ciphertexts[..])?; // Convert the hash result into a single Fr. let bits = reverse_bit_numbering(alloc_bits); multipack::pack_bits( cs.namespace(|| "result_num"), &bits[0..(Scalar::CAPACITY as usize)], ) } #[cfg(test)] mod tests { use super::*; use bellperson::util_cs::test_cs::TestConstraintSystem; use blstrs::Scalar as Fr; use ff::Field; use filecoin_hashers::sha256::Sha256Hasher; use fr32::{bytes_into_fr, fr_into_bytes}; use rand::SeedableRng; use rand_xorshift::XorShiftRng; use storage_proofs_core::{ api_version::ApiVersion, drgraph::{Graph, BASE_DEGREE}, util::{bytes_into_boolean_vec_be, data_at_node, NODE_SIZE}, TEST_SEED, }; use crate::stacked::vanilla::{create_label, StackedBucketGraph, EXP_DEGREE}; #[test] fn test_create_label() { let mut cs = TestConstraintSystem::<Fr>::new(); let mut rng = XorShiftRng::from_seed(TEST_SEED); let size = 64; let porep_id = [32; 32]; let graph = StackedBucketGraph::<Sha256Hasher>::new_stacked( size, BASE_DEGREE, EXP_DEGREE, porep_id, ApiVersion::V1_1_0, ) .expect("stacked bucket graph new_stacked failed"); let id_fr = Fr::random(&mut rng); let id: Vec<u8> = fr_into_bytes(&id_fr); let layer = 3; let node = 22; let mut data: Vec<u8> = (0..2 * size) .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng))) .collect(); let mut parents = vec![0; BASE_DEGREE + EXP_DEGREE]; graph.parents(node, &mut parents).expect("parents failed"); let raw_parents_bytes: Vec<Vec<u8>> = parents .iter() .enumerate() .map(|(i, p)| { if i < BASE_DEGREE { // base data_at_node(&data[..size * NODE_SIZE], *p as usize) .expect("data_at_node failed") .to_vec() } else { // exp data_at_node(&data[size * NODE_SIZE..], *p as usize) .expect("data_at_node failed") .to_vec() } }) .collect(); let mut parents_bytes = raw_parents_bytes.clone(); // 14 parents_bytes.extend_from_slice(&raw_parents_bytes); // 28 parents_bytes.extend_from_slice(&raw_parents_bytes[..9]); // 37 assert_eq!(parents_bytes.len(), TOTAL_PARENTS); let parents_bits: Vec<Vec<Boolean>> = parents_bytes .iter() .enumerate() .map(|(i, p)| { let mut cs = cs.namespace(|| format!("parents {}", i)); bytes_into_boolean_vec_be(&mut cs, Some(p), p.len()) .expect("bytes_into_boolean_vec_be failed") }) .collect(); let id_bits: Vec<Boolean> = { let mut cs = cs.namespace(|| "id"); bytes_into_boolean_vec_be(&mut cs, Some(id.as_slice()), id.len()) .expect("bytes_into_boolean_vec_be failed") }; let layer_alloc = UInt32::constant(layer as u32); let node_alloc = UInt64::constant(node as u64); let out = create_label_circuit( cs.namespace(|| "create_label"), &id_bits, parents_bits, layer_alloc, node_alloc, ) .expect("key derivation function failed"); assert!(cs.is_satisfied(), "constraints not satisfied"); assert_eq!(cs.num_constraints(), 532_025); let (l1, l2) = data.split_at_mut(size * NODE_SIZE); create_label::single::create_label_exp( &graph, None, fr_into_bytes(&id_fr), &*l2, l1, layer, node, ) .expect("create_label_exp failed"); let expected_raw = data_at_node(&l1, node).expect("data_at_node failed"); let expected = bytes_into_fr(expected_raw).expect("bytes_into_fr failed"); assert_eq!( expected, out.get_value().expect("get_value failed"), "circuit and non circuit do not match" ); } }
31.510204
89
0.572539
119f9b4235f7e183f17750e8dc5c1226642d3c14
1,761
#![no_std] #![feature(lang_items, const_extern_fn)] #![deny( warnings, nonstandard_style, unused, future_incompatible, rust_2018_idioms )] #![deny(clippy::all, clippy::nursery, clippy::pedantic)] use windows_kernel_rs::{ device::{ Completion, Device, DeviceDoFlags, DeviceFlags, DeviceOperations, DeviceType, RequestError, }, kernel_module, println, request::IoRequest, Access, Driver, Error, KernelModule, SymbolicLink, }; struct MyDevice; impl DeviceOperations for MyDevice { fn create(&mut self, _device: &Device, request: IoRequest) -> Result<Completion, RequestError> { println!("userspace opened the device"); Ok(Completion::Complete(0, request)) } fn close(&mut self, _device: &Device, request: IoRequest) -> Result<Completion, RequestError> { println!("userspace closed the device"); Ok(Completion::Complete(0, request)) } fn cleanup(&mut self, _device: &Device, request: IoRequest) -> Result<Completion, RequestError> { println!("device is no longer in use by userspace"); Ok(Completion::Complete(0, request)) } } struct Module { _device: Device, _symbolic_link: SymbolicLink, } impl KernelModule for Module { fn init(mut driver: Driver, _: &str) -> Result<Self, Error> { let device = driver.create_device( "\\Device\\Example", DeviceType::Unknown, DeviceFlags::SECURE_OPEN, DeviceDoFlags::DO_BUFFERED_IO, Access::NonExclusive, MyDevice, )?; let symbolic_link = SymbolicLink::new("\\??\\Example", "\\Device\\Example")?; Ok(Module { _device: device, _symbolic_link: symbolic_link, }) } fn cleanup(&mut self, _driver: Driver) {} } kernel_module!(Module);
21.740741
99
0.664963
bf578b1687f836f72a8238631c445d6699f02c22
12,817
use crate::ir::types as ir; use crate::ir::{ dispatch_table::{DispatchTable, DispatchableFunction}, function, type_table::TypeTable, }; use crate::type_info::TypeInfo; use crate::value::{AsValue, CanInternalize, Global, IrValueContext, IterAsIrValue, Value}; use crate::IrDatabase; use hir::Ty; use inkwell::{attributes::Attribute, module::Linkage, AddressSpace}; use std::collections::HashSet; use std::ffi::CString; /// Construct a `MunFunctionPrototype` struct for the specified HIR function. fn gen_prototype_from_function<D: IrDatabase>( db: &D, context: &IrValueContext, function: hir::Function, ) -> ir::FunctionPrototype { let module = context.module; let name = function.name(db).to_string(); // Internalize the name of the function prototype let name_str = CString::new(name.clone()) .expect("function prototype name is not a valid CString") .intern(format!("fn_sig::<{}>::name", &name), context); // Get the `ir::TypeInfo` pointer for the return type of the function let fn_sig = function.ty(db).callable_sig(db).unwrap(); let return_type = gen_signature_return_type(db, context, fn_sig.ret().clone()); // Construct an array of pointers to `ir::TypeInfo`s for the arguments of the prototype let arg_types = fn_sig .params() .iter() .map(|ty| { TypeTable::get(module, &db.type_info(ty.clone())) .expect("expected a TypeInfo for a prototype argument but it was not found") .as_value(context) }) .into_const_private_pointer_or_null(format!("fn_sig::<{}>::arg_types", &name), context); ir::FunctionPrototype { name: name_str.as_value(context), signature: ir::FunctionSignature { arg_types, return_type, num_arg_types: fn_sig.params().len() as u16, }, } } /// Construct a `MunFunctionPrototype` struct for the specified dispatch table function. fn gen_prototype_from_dispatch_entry( context: &IrValueContext, function: &DispatchableFunction, ) -> ir::FunctionPrototype { let module = context.module; // Internalize the name of the function prototype let name_str = CString::new(function.prototype.name.clone()) .expect("function prototype name is not a valid CString") .intern( format!("fn_sig::<{}>::name", function.prototype.name), context, ); // Get the `ir::TypeInfo` pointer for the return type of the function let return_type = gen_signature_return_type_from_type_info(context, function.prototype.ret_type.clone()); // Construct an array of pointers to `ir::TypeInfo`s for the arguments of the prototype let arg_types = function .prototype .arg_types .iter() .map(|type_info| { TypeTable::get(module, type_info) .expect("expected a TypeInfo for a prototype argument but it was not found") .as_value(context) }) .into_const_private_pointer_or_null( format!("{}_param_types", function.prototype.name), context, ); ir::FunctionPrototype { name: name_str.as_value(context), signature: ir::FunctionSignature { arg_types, return_type, num_arg_types: function.prototype.arg_types.len() as u16, }, } } /// Given a function, construct a pointer to a `ir::TypeInfo` global that represents the return type /// of the function; or `null` if the return type is empty. fn gen_signature_return_type<D: IrDatabase>( db: &D, context: &IrValueContext, ret_type: Ty, ) -> Value<*const ir::TypeInfo> { gen_signature_return_type_from_type_info( context, if ret_type.is_empty() { None } else { Some(db.type_info(ret_type)) }, ) } /// Given a function, construct a pointer to a `ir::TypeInfo` global that represents the return type /// of the function; or `null` if the return type is empty. fn gen_signature_return_type_from_type_info( context: &IrValueContext, ret_type: Option<TypeInfo>, ) -> Value<*const ir::TypeInfo> { ret_type .map(|info| { TypeTable::get(context.module, &info) .expect("could not find TypeInfo that should definitely be there") .as_value(context) }) .unwrap_or_else(|| Value::null(context)) } /// Construct a global that holds a reference to all functions. e.g.: /// MunFunctionDefinition[] definitions = { ... } fn get_function_definition_array<'a, D: IrDatabase>( db: &D, context: &IrValueContext, functions: impl Iterator<Item = &'a hir::Function>, ) -> Global<[ir::FunctionDefinition]> { let module = context.module; functions .map(|f| { let name = f.name(db).to_string(); // Get the function from the cloned module and modify the linkage of the function. let value = module // If a wrapper function exists, use that (required for struct types) .get_function(&format!("{}_wrapper", name)) // Otherwise, use the normal function .or_else(|| module.get_function(&name)) .unwrap(); value.set_linkage(Linkage::Private); // Generate the signature from the function let prototype = gen_prototype_from_function(db, context, *f); ir::FunctionDefinition { prototype, fn_ptr: Value::from_raw(value.as_global_value().as_pointer_value()), } }) .as_value(context) .into_const_private_global("fn.get_info.functions", context) } /// Generate the dispatch table information. e.g.: /// ```c /// MunDispatchTable dispatchTable = { ... } /// ``` fn gen_dispatch_table( context: &IrValueContext, dispatch_table: &DispatchTable, ) -> ir::DispatchTable { let module = context.module; // Generate an internal array that holds all the function prototypes let prototypes = dispatch_table .entries() .iter() .map(|entry| gen_prototype_from_dispatch_entry(context, entry)) .into_const_private_pointer("fn.get_info.dispatchTable.signatures", context); // Get the pointer to the global table (or nullptr if no global table was defined). let fn_ptrs = dispatch_table .global_value() .map(|_g| // TODO: This is a hack, the passed module here is a clone of the module with which the // dispatch table was created. Because of this we have to lookup the dispatch table // global again. There is however not a `GlobalValue::get_name` method so I just // hardcoded the name here. Value::from_raw(module.get_global("dispatchTable").unwrap().as_pointer_value())) .unwrap_or_else(|| Value::null(context)); ir::DispatchTable { prototypes, fn_ptrs, num_entries: dispatch_table.entries().len() as u32, } } /// Constructs IR that exposes the types and symbols in the specified module. A function called /// `get_info` is constructed that returns a struct `MunAssemblyInfo`. See the `mun_abi` crate /// for the ABI that `get_info` exposes. pub(super) fn gen_reflection_ir( db: &impl IrDatabase, context: &IrValueContext, api: &HashSet<hir::Function>, dispatch_table: &DispatchTable, type_table: &TypeTable, ) { let module = context.module; let num_functions = api.len() as u32; let functions = get_function_definition_array(db, context, api.iter()); // Get the TypeTable global let types = TypeTable::find_global(module) .map(|g| g.as_value(context)) .unwrap_or_else(|| Value::null(context)); // Construct the module info struct let module_info = ir::ModuleInfo { path: CString::new("") .unwrap() .intern("module_info::path", context) .as_value(context), functions: functions.as_value(context), num_functions, types, num_types: type_table.num_types() as u32, }; // Construct the dispatch table struct let dispatch_table = gen_dispatch_table(context, dispatch_table); // Construct the actual `get_info` function gen_get_info_fn(db, context, module_info, dispatch_table); gen_set_allocator_handle_fn(db, context); } /// Construct the actual `get_info` function. fn gen_get_info_fn( db: &impl IrDatabase, context: &IrValueContext, module_info: ir::ModuleInfo, dispatch_table: ir::DispatchTable, ) { let target = db.target(); let str_type = context.context.i8_type().ptr_type(AddressSpace::Generic); // Construct the return type of the `get_info` method. Depending on the C ABI this is either the // `MunAssemblyInfo` struct or void. On windows the return argument is passed back to the caller // through a pointer to the return type as the first argument. e.g.: // On Windows: // ```c // void get_info(MunModuleInfo* result) {...} // ``` // Whereas on other platforms the signature of the `get_info` function is: // ```c // MunModuleInfo get_info() { ... } // ``` let get_symbols_type = if target.options.is_like_windows { Value::<fn(*mut ir::AssemblyInfo)>::get_ir_type(context.type_context) } else { Value::<fn() -> ir::AssemblyInfo>::get_ir_type(context.type_context) }; let get_symbols_fn = context .module .add_function("get_info", get_symbols_type, Some(Linkage::DLLExport)); if target.options.is_like_windows { get_symbols_fn.add_attribute( inkwell::attributes::AttributeLoc::Param(0), context .context .create_enum_attribute(Attribute::get_named_enum_kind_id("sret"), 1), ); } let builder = db.context().create_builder(); let body_ir = db.context().append_basic_block(&get_symbols_fn, "body"); builder.position_at_end(&body_ir); // Get a pointer to the IR value that will hold the return value. Again this differs depending // on the C ABI. let result_ptr = if target.options.is_like_windows { get_symbols_fn .get_nth_param(0) .unwrap() .into_pointer_value() } else { builder.build_alloca( Value::<ir::AssemblyInfo>::get_ir_type(context.type_context), "", ) }; // Get access to the structs internals let symbols_addr = unsafe { builder.build_struct_gep(result_ptr, 0, "symbols") }; let dispatch_table_addr = unsafe { builder.build_struct_gep(result_ptr, 1, "dispatch_table") }; let dependencies_addr = unsafe { builder.build_struct_gep(result_ptr, 2, "dependencies") }; let num_dependencies_addr = unsafe { builder.build_struct_gep(result_ptr, 3, "num_dependencies") }; // Assign the struct values one by one. builder.build_store(symbols_addr, module_info.as_value(context).value); builder.build_store(dispatch_table_addr, dispatch_table.as_value(context).value); builder.build_store( dependencies_addr, str_type.ptr_type(AddressSpace::Generic).const_null(), ); builder.build_store( num_dependencies_addr, context.context.i32_type().const_int(0 as u64, false), ); // Construct the return statement of the function. if target.options.is_like_windows { builder.build_return(None); } else { builder.build_return(Some(&builder.build_load(result_ptr, ""))); } // Run the function optimizer on the generate function function::create_pass_manager(&context.module, db.optimization_lvl()).run_on(&get_symbols_fn); } /// Generates a method `void set_allocator_handle(void*)` that stores the argument into the global /// `allocatorHandle`. This global is used internally to reference the allocator used by this /// munlib. fn gen_set_allocator_handle_fn(db: &impl IrDatabase, context: &IrValueContext) { let set_allocator_handle_fn = context.module.add_function( "set_allocator_handle", Value::<fn(*const u8)>::get_ir_type(context.type_context), Some(Linkage::DLLExport), ); let builder = db.context().create_builder(); let body_ir = db .context() .append_basic_block(&set_allocator_handle_fn, "body"); builder.position_at_end(&body_ir); if let Some(allocator_handle_global) = context.module.get_global("allocatorHandle") { builder.build_store( allocator_handle_global.as_pointer_value(), set_allocator_handle_fn.get_nth_param(0).unwrap(), ); } builder.build_return(None); }
36.51567
100
0.650308
22aed1db16d6fd53b23e1738e52b87cc649d0265
5,756
//! FIXME: write short doc here use std::mem; use ra_parser::{ParseError, TreeSink}; use crate::{ parsing::Token, syntax_node::GreenNode, SmolStr, SyntaxError, SyntaxKind::{self, *}, SyntaxTreeBuilder, TextRange, TextSize, }; /// Bridges the parser with our specific syntax tree representation. /// /// `TextTreeSink` also handles attachment of trivia (whitespace) to nodes. pub(crate) struct TextTreeSink<'a> { text: &'a str, tokens: &'a [Token], text_pos: TextSize, token_pos: usize, state: State, inner: SyntaxTreeBuilder, } enum State { PendingStart, Normal, PendingFinish, } impl<'a> TreeSink for TextTreeSink<'a> { fn token(&mut self, kind: SyntaxKind, n_tokens: u8) { match mem::replace(&mut self.state, State::Normal) { State::PendingStart => unreachable!(), State::PendingFinish => self.inner.finish_node(), State::Normal => (), } self.eat_trivias(); let n_tokens = n_tokens as usize; let len = self.tokens[self.token_pos..self.token_pos + n_tokens] .iter() .map(|it| it.len) .sum::<TextSize>(); self.do_token(kind, len, n_tokens); } fn start_node(&mut self, kind: SyntaxKind) { match mem::replace(&mut self.state, State::Normal) { State::PendingStart => { self.inner.start_node(kind); // No need to attach trivias to previous node: there is no // previous node. return; } State::PendingFinish => self.inner.finish_node(), State::Normal => (), } let n_trivias = self.tokens[self.token_pos..].iter().take_while(|it| it.kind.is_trivia()).count(); let leading_trivias = &self.tokens[self.token_pos..self.token_pos + n_trivias]; let mut trivia_end = self.text_pos + leading_trivias.iter().map(|it| it.len).sum::<TextSize>(); let n_attached_trivias = { let leading_trivias = leading_trivias.iter().rev().map(|it| { let next_end = trivia_end - it.len; let range = TextRange::new(next_end, trivia_end); trivia_end = next_end; (it.kind, &self.text[range]) }); n_attached_trivias(kind, leading_trivias) }; self.eat_n_trivias(n_trivias - n_attached_trivias); self.inner.start_node(kind); self.eat_n_trivias(n_attached_trivias); } fn finish_node(&mut self) { match mem::replace(&mut self.state, State::PendingFinish) { State::PendingStart => unreachable!(), State::PendingFinish => self.inner.finish_node(), State::Normal => (), } } fn error(&mut self, error: ParseError) { self.inner.error(error, self.text_pos) } } impl<'a> TextTreeSink<'a> { pub(super) fn new(text: &'a str, tokens: &'a [Token]) -> Self { Self { text, tokens, text_pos: 0.into(), token_pos: 0, state: State::PendingStart, inner: SyntaxTreeBuilder::default(), } } pub(super) fn finish(mut self) -> (GreenNode, Vec<SyntaxError>) { match mem::replace(&mut self.state, State::Normal) { State::PendingFinish => { self.eat_trivias(); self.inner.finish_node() } State::PendingStart | State::Normal => unreachable!(), } self.inner.finish_raw() } fn eat_trivias(&mut self) { while let Some(&token) = self.tokens.get(self.token_pos) { if !token.kind.is_trivia() { break; } self.do_token(token.kind, token.len, 1); } } fn eat_n_trivias(&mut self, n: usize) { for _ in 0..n { let token = self.tokens[self.token_pos]; assert!(token.kind.is_trivia()); self.do_token(token.kind, token.len, 1); } } fn do_token(&mut self, kind: SyntaxKind, len: TextSize, n_tokens: usize) { let range = TextRange::at(self.text_pos, len); let text: SmolStr = self.text[range].into(); self.text_pos += len; self.token_pos += n_tokens; self.inner.token(kind, text); } } fn n_attached_trivias<'a>( kind: SyntaxKind, trivias: impl Iterator<Item = (SyntaxKind, &'a str)>, ) -> usize { match kind { MACRO_CALL | CONST_DEF | TYPE_ALIAS_DEF | STRUCT_DEF | ENUM_DEF | ENUM_VARIANT | FN_DEF | TRAIT_DEF | MODULE | RECORD_FIELD_DEF | STATIC_DEF => { let mut res = 0; let mut trivias = trivias.enumerate().peekable(); while let Some((i, (kind, text))) = trivias.next() { match kind { WHITESPACE => { if text.contains("\n\n") { // we check whether the next token is a doc-comment // and skip the whitespace in this case if let Some((peek_kind, peek_text)) = trivias.peek().map(|(_, pair)| pair) { if *peek_kind == COMMENT && peek_text.starts_with("///") { continue; } } break; } } COMMENT => { res = i + 1; } _ => (), } } res } _ => 0, } }
31.801105
95
0.510598
1c8816ba91d770324fe821cd8de615563dbc761d
8,528
use crate::dependency_collector::{DependencyDescriptor, DependencyKind}; use crate::hoist::{Collect, Import}; use crate::id; use crate::utils::{IdentId, SourceLocation}; use data_encoding::{BASE64, HEXLOWER}; use std::collections::HashSet; use std::path::{Path, PathBuf}; use swc_atoms::JsWord; use swc_common::{Mark, Span, DUMMY_SP}; use swc_ecmascript::ast::*; use swc_ecmascript::visit::{Fold, FoldWith, VisitWith}; pub fn inline_fs<'a>( filename: &str, source_map: swc_common::sync::Lrc<swc_common::SourceMap>, decls: HashSet<IdentId>, global_mark: Mark, project_root: &'a str, deps: &'a mut Vec<DependencyDescriptor>, ) -> impl Fold + 'a { InlineFS { filename: Path::new(filename).to_path_buf(), collect: Collect::new( source_map, decls, Mark::fresh(Mark::root()), global_mark, false, ), global_mark, project_root, deps, } } struct InlineFS<'a> { filename: PathBuf, collect: Collect, global_mark: Mark, project_root: &'a str, deps: &'a mut Vec<DependencyDescriptor>, } impl<'a> Fold for InlineFS<'a> { fn fold_module(&mut self, node: Module) -> Module { node.visit_with(&mut self.collect); node.fold_children_with(self) } fn fold_expr(&mut self, node: Expr) -> Expr { if let Expr::Call(call) = &node { if let Callee::Expr(expr) = &call.callee { if let Some((source, specifier)) = self.match_module_reference(expr) { if &source == "fs" && &specifier == "readFileSync" { if let Some(arg) = call.args.get(0) { if let Some(res) = self.evaluate_fs_arg(&*arg.expr, call.args.get(1), call.span) { return res; } } } } } } node.fold_children_with(self) } } impl<'a> InlineFS<'a> { fn match_module_reference(&self, node: &Expr) -> Option<(JsWord, JsWord)> { match node { Expr::Ident(ident) => { if let Some(Import { source, specifier, .. }) = self.collect.imports.get(&id!(ident)) { return Some((source.clone(), specifier.clone())); } } Expr::Member(member) => { let prop = match &member.prop { MemberProp::Ident(ident) => ident.sym.clone(), MemberProp::Computed(ComputedPropName { expr, .. }) => { if let Expr::Lit(Lit::Str(str_)) = &**expr { str_.value.clone() } else { return None; } } _ => return None, }; if let Some(source) = self.collect.match_require(&*member.obj) { return Some((source, prop)); } if let Expr::Ident(ident) = &*member.obj { if let Some(Import { source, specifier, .. }) = self.collect.imports.get(&id!(ident)) { if specifier == "default" || specifier == "*" { return Some((source.clone(), prop)); } } } } _ => {} } None } fn evaluate_fs_arg( &mut self, node: &Expr, encoding: Option<&ExprOrSpread>, span: Span, ) -> Option<Expr> { let mut evaluator = Evaluator { inline: self }; let res = node.clone().fold_with(&mut evaluator); match res { Expr::Lit(Lit::Str(str_)) => { // Ignore if outside the project root let path = match dunce::canonicalize(Path::new(&str_.value.to_string())) { Ok(path) => path, Err(_err) => return None, }; if !path.starts_with(&self.project_root) { return None; } let encoding = match encoding { Some(e) => match &*e.expr { Expr::Lit(Lit::Str(str_)) => &str_.value, _ => "buffer", }, None => "buffer", }; // TODO: this should probably happen in JS so we use Parcel's file system // rather than only the real FS. Will need when we convert to WASM. let contents = match encoding { "base64" | "buffer" => { if let Ok(contents) = std::fs::read(&path) { BASE64.encode(&contents) } else { return None; } } "hex" => { if let Ok(contents) = std::fs::read(&path) { HEXLOWER.encode(&contents) } else { return None; } } "utf8" | "utf-8" => { if let Ok(contents) = std::fs::read_to_string(&path) { contents } else { return None; } } _ => return None, }; let contents = Expr::Lit(Lit::Str(contents.into())); // Add a file dependency so the cache is invalidated when this file changes. self.deps.push(DependencyDescriptor { kind: DependencyKind::File, loc: SourceLocation::from(&self.collect.source_map, span), specifier: path.to_str().unwrap().into(), attributes: None, is_optional: false, is_helper: false, source_type: None, placeholder: None, }); // If buffer, wrap in Buffer.from(base64String, 'base64') if encoding == "buffer" { Some(Expr::Call(CallExpr { callee: Callee::Expr(Box::new(Expr::Member(MemberExpr { obj: Box::new(Expr::Ident(Ident::new( "Buffer".into(), DUMMY_SP.apply_mark(self.global_mark), ))), prop: MemberProp::Ident(Ident::new("from".into(), DUMMY_SP)), span: DUMMY_SP, }))), args: vec![ ExprOrSpread { expr: Box::new(contents), spread: None, }, ExprOrSpread { expr: Box::new(Expr::Lit(Lit::Str("base64".into()))), spread: None, }, ], span: DUMMY_SP, type_args: None, })) } else { Some(contents) } } _ => None, } } } struct Evaluator<'a> { inline: &'a InlineFS<'a>, } impl<'a> Fold for Evaluator<'a> { fn fold_expr(&mut self, node: Expr) -> Expr { let node = node.fold_children_with(self); match &node { Expr::Ident(ident) => match ident.sym.to_string().as_str() { "__dirname" => Expr::Lit(Lit::Str( self .inline .filename .parent() .unwrap() .to_str() .unwrap() .into(), )), "__filename" => Expr::Lit(Lit::Str(self.inline.filename.to_str().unwrap().into())), _ => node, }, Expr::Bin(bin) => match bin.op { BinaryOp::Add => { let left = match &*bin.left { Expr::Lit(Lit::Str(str_)) => str_.value.clone(), _ => return node, }; let right = match &*bin.right { Expr::Lit(Lit::Str(str_)) => str_.value.clone(), _ => return node, }; Expr::Lit(Lit::Str(format!("{}{}", left, right).into())) } _ => node, }, Expr::Call(call) => { let callee = match &call.callee { Callee::Expr(expr) => &*expr, _ => return node, }; if let Some((source, specifier)) = self.inline.match_module_reference(callee) { match (source.to_string().as_str(), specifier.to_string().as_str()) { ("path", "join") => { let mut path = PathBuf::new(); for arg in call.args.clone() { let s = match &*arg.expr { Expr::Lit(Lit::Str(str_)) => str_.value.clone(), _ => return node, }; if path.as_os_str().is_empty() { path.push(s.to_string()); } else { let s = s.to_string(); let mut p = Path::new(s.as_str()); // Node's path.join ignores separators at the start of path components. // Rust's does not, so we need to strip them. if let Ok(stripped) = p.strip_prefix("/") { p = stripped; } path.push(p); } } return Expr::Lit(Lit::Str(path.to_str().unwrap().into())); } _ => return node, } } node } _ => node, } } }
28.713805
96
0.489447
285d8c23648d3802a35594c8144cab2fa30f2a22
24,774
use crate::{ gui::{make_dropdown_list_option, BuildContext, EditorUiNode, Ui, UiMessage, UiNode}, make_relative_path, scene::commands::{ camera::{ SetCameraPreviewCommand, SetColorGradingEnabledCommand, SetColorGradingLutCommand, SetExposureCommand, SetFovCommand, SetZFarCommand, SetZNearCommand, }, SceneCommand, }, send_sync_message, sidebar::{ make_bool_input_field, make_f32_input_field, make_section, make_text_mark, COLUMN_WIDTH, ROW_HEIGHT, }, Message, }; use rg3d::{ core::{futures::executor::block_on, pool::Handle, scope_profile}, engine::resource_manager::{ResourceManager, TextureImportOptions}, gui::{ dropdown_list::DropdownListBuilder, grid::{Column, GridBuilder, Row}, image::ImageBuilder, message::{ CheckBoxMessage, DropdownListMessage, ImageMessage, MessageDirection, NumericUpDownMessage, UiMessageData, WidgetMessage, }, stack_panel::StackPanelBuilder, widget::WidgetBuilder, }, resource::texture::CompressionOptions, scene::{ camera::{ColorGradingLut, Exposure}, node::Node, }, utils::into_gui_texture, }; use std::sync::mpsc::Sender; pub struct CameraSection { pub section: Handle<UiNode>, fov: Handle<UiNode>, z_near: Handle<UiNode>, z_far: Handle<UiNode>, sender: Sender<Message>, preview: Handle<UiNode>, exposure_kind: Handle<UiNode>, exposure_value: Handle<UiNode>, key_value: Handle<UiNode>, min_luminance: Handle<UiNode>, max_luminance: Handle<UiNode>, color_grading_lut: Handle<UiNode>, use_color_grading: Handle<UiNode>, manual_exposure_section: Handle<UiNode>, auto_exposure_section: Handle<UiNode>, } impl CameraSection { pub fn new(ctx: &mut BuildContext, sender: Sender<Message>) -> Self { let fov; let z_near; let z_far; let preview; let exposure_kind; let exposure_value; let key_value; let min_luminance; let max_luminance; let manual_exposure_section; let auto_exposure_section; let color_grading_lut; let use_color_grading; let section = make_section( "Camera Properties", StackPanelBuilder::new( WidgetBuilder::new() .with_child( GridBuilder::new( WidgetBuilder::new() .with_child(make_text_mark(ctx, "FOV", 0)) .with_child({ fov = make_f32_input_field( ctx, 0, 0.0, std::f32::consts::PI, 0.01, ); fov }) .with_child(make_text_mark(ctx, "Z Near", 1)) .with_child({ z_near = make_f32_input_field(ctx, 1, 0.0, f32::MAX, 0.01); z_near }) .with_child(make_text_mark(ctx, "Z Far", 2)) .with_child({ z_far = make_f32_input_field(ctx, 2, 0.0, f32::MAX, 1.0); z_far }) .with_child(make_text_mark(ctx, "Preview", 3)) .with_child({ preview = make_bool_input_field(ctx, 3); preview }) .with_child(make_text_mark(ctx, "Use Color Grading", 4)) .with_child({ use_color_grading = make_bool_input_field(ctx, 4); use_color_grading }) .with_child(make_text_mark(ctx, "Color Grading LUT", 5)) .with_child({ color_grading_lut = ImageBuilder::new( WidgetBuilder::new() .on_row(5) .on_column(1) .with_allow_drop(true), ) .build(ctx); color_grading_lut }) .with_child(make_text_mark(ctx, "Exposure Kind", 6)) .with_child({ exposure_kind = DropdownListBuilder::new( WidgetBuilder::new().on_row(6).on_column(1), ) .with_close_on_selection(true) .with_items(vec![ make_dropdown_list_option(ctx, "Auto"), make_dropdown_list_option(ctx, "Manual"), ]) .build(ctx); exposure_kind }), ) .add_column(Column::strict(COLUMN_WIDTH)) .add_column(Column::stretch()) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .build(ctx), ) .with_child( StackPanelBuilder::new( WidgetBuilder::new() .with_child({ manual_exposure_section = make_section( "Manual Exposure", GridBuilder::new( WidgetBuilder::new() .with_child(make_text_mark( ctx, "Exposure Value", 0, )) .with_child({ exposure_value = make_f32_input_field( ctx, 0, 0.0, f32::MAX, 1.0, ); exposure_value }), ) .add_column(Column::strict(COLUMN_WIDTH)) .add_column(Column::stretch()) .add_row(Row::strict(ROW_HEIGHT)) .build(ctx), ctx, ); manual_exposure_section }) .with_child({ auto_exposure_section = make_section( "Auto Exposure", GridBuilder::new( WidgetBuilder::new() .with_child(make_text_mark(ctx, "Key Value", 0)) .with_child({ key_value = make_f32_input_field( ctx, 0, 0.001, f32::MAX, 1.0, ); key_value }) .with_child(make_text_mark(ctx, "Min Luminance", 1)) .with_child({ min_luminance = make_f32_input_field( ctx, 1, 0.001, f32::MAX, 1.0, ); min_luminance }) .with_child(make_text_mark(ctx, "Max Luminance", 2)) .with_child({ max_luminance = make_f32_input_field( ctx, 2, 0.0, f32::MAX, 1.0, ); max_luminance }), ) .add_column(Column::strict(COLUMN_WIDTH)) .add_column(Column::stretch()) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .add_row(Row::strict(ROW_HEIGHT)) .build(ctx), ctx, ); auto_exposure_section }), ) .build(ctx), ), ) .build(ctx), ctx, ); Self { section, fov, z_near, z_far, sender, preview, exposure_kind, exposure_value, key_value, min_luminance, max_luminance, auto_exposure_section, manual_exposure_section, color_grading_lut, use_color_grading, } } pub fn sync_to_model(&mut self, node: &Node, ui: &mut Ui) { send_sync_message( ui, WidgetMessage::visibility(self.section, MessageDirection::ToWidget, node.is_camera()), ); if let Node::Camera(camera) = node { send_sync_message( ui, NumericUpDownMessage::value(self.fov, MessageDirection::ToWidget, camera.fov()), ); send_sync_message( ui, NumericUpDownMessage::value( self.z_near, MessageDirection::ToWidget, camera.z_near(), ), ); send_sync_message( ui, NumericUpDownMessage::value(self.z_far, MessageDirection::ToWidget, camera.z_far()), ); send_sync_message( ui, CheckBoxMessage::checked( self.preview, MessageDirection::ToWidget, Some(camera.is_enabled()), ), ); send_sync_message( ui, CheckBoxMessage::checked( self.use_color_grading, MessageDirection::ToWidget, Some(camera.color_grading_enabled()), ), ); send_sync_message( ui, ImageMessage::texture( self.color_grading_lut, MessageDirection::ToWidget, camera .color_grading_lut_ref() .map(|lut| into_gui_texture(lut.unwrapped_lut().clone())), ), ); match camera.exposure() { Exposure::Auto { key_value, min_luminance, max_luminance, } => { send_sync_message( ui, NumericUpDownMessage::value( self.key_value, MessageDirection::ToWidget, key_value, ), ); send_sync_message( ui, NumericUpDownMessage::value( self.min_luminance, MessageDirection::ToWidget, min_luminance, ), ); send_sync_message( ui, NumericUpDownMessage::value( self.max_luminance, MessageDirection::ToWidget, max_luminance, ), ); send_sync_message( ui, DropdownListMessage::selection( self.exposure_kind, MessageDirection::ToWidget, Some(0), ), ); send_sync_message( ui, WidgetMessage::visibility( self.auto_exposure_section, MessageDirection::ToWidget, true, ), ); send_sync_message( ui, WidgetMessage::visibility( self.manual_exposure_section, MessageDirection::ToWidget, false, ), ); } Exposure::Manual(value) => { send_sync_message( ui, NumericUpDownMessage::value( self.exposure_value, MessageDirection::ToWidget, value, ), ); send_sync_message( ui, DropdownListMessage::selection( self.exposure_kind, MessageDirection::ToWidget, Some(1), ), ); send_sync_message( ui, WidgetMessage::visibility( self.auto_exposure_section, MessageDirection::ToWidget, false, ), ); send_sync_message( ui, WidgetMessage::visibility( self.manual_exposure_section, MessageDirection::ToWidget, true, ), ); } } } } pub fn handle_ui_message( &mut self, message: &UiMessage, node: &Node, handle: Handle<Node>, ui: &Ui, resource_manager: ResourceManager, ) { scope_profile!(); if let Node::Camera(camera) = node { match *message.data() { UiMessageData::NumericUpDown(NumericUpDownMessage::Value(value)) => { if message.destination() == self.fov && camera.fov().ne(&value) { self.sender .send(Message::DoSceneCommand(SceneCommand::SetFov( SetFovCommand::new(handle, value), ))) .unwrap(); } else if message.destination() == self.z_far && camera.z_far().ne(&value) { self.sender .send(Message::DoSceneCommand(SceneCommand::SetZFar( SetZFarCommand::new(handle, value), ))) .unwrap(); } else if message.destination() == self.z_near && camera.z_near().ne(&value) { self.sender .send(Message::DoSceneCommand(SceneCommand::SetZNear( SetZNearCommand::new(handle, value), ))) .unwrap(); } else if message.destination() == self.exposure_value { self.sender .send(Message::DoSceneCommand(SceneCommand::SetExposure( SetExposureCommand::new(handle, Exposure::Manual(value)), ))) .unwrap(); } else if message.destination() == self.key_value { let mut current_auto_exposure = camera.exposure().clone(); if let Exposure::Auto { ref mut key_value, .. } = current_auto_exposure { *key_value = value; } self.sender .send(Message::DoSceneCommand(SceneCommand::SetExposure( SetExposureCommand::new(handle, current_auto_exposure), ))) .unwrap(); } else if message.destination() == self.min_luminance { let mut current_auto_exposure = camera.exposure().clone(); if let Exposure::Auto { ref mut min_luminance, .. } = current_auto_exposure { *min_luminance = value; } self.sender .send(Message::DoSceneCommand(SceneCommand::SetExposure( SetExposureCommand::new(handle, current_auto_exposure), ))) .unwrap(); } else if message.destination() == self.min_luminance { let mut current_auto_exposure = camera.exposure().clone(); if let Exposure::Auto { ref mut max_luminance, .. } = current_auto_exposure { *max_luminance = value; } self.sender .send(Message::DoSceneCommand(SceneCommand::SetExposure( SetExposureCommand::new(handle, current_auto_exposure), ))) .unwrap(); } } UiMessageData::CheckBox(CheckBoxMessage::Check(value)) => { if message.destination() == self.preview && camera.is_enabled().ne(&value.unwrap()) { self.sender .send(Message::DoSceneCommand(SceneCommand::SetCameraActive( SetCameraPreviewCommand::new(handle, value.unwrap_or(false)), ))) .unwrap(); } else if message.destination() == self.use_color_grading { self.sender .send(Message::DoSceneCommand( SceneCommand::SetColorGradingEnabled( SetColorGradingEnabledCommand::new( handle, value.unwrap_or_default(), ), ), )) .unwrap(); } } UiMessageData::DropdownList(DropdownListMessage::SelectionChanged(Some(index))) => { if message.destination() == self.exposure_kind { let exposure = match index { 0 => Exposure::default(), 1 => Exposure::Manual(1.0), _ => unreachable!(), }; self.sender .send(Message::DoSceneCommand(SceneCommand::SetExposure( SetExposureCommand::new(handle, exposure), ))) .unwrap(); } } UiMessageData::Widget(WidgetMessage::Drop(dropped)) => { if message.destination() == self.color_grading_lut { if let UiNode::User(EditorUiNode::AssetItem(item)) = ui.node(dropped) { let relative_path = make_relative_path(&item.path); match block_on(ColorGradingLut::new( resource_manager.request_texture( relative_path, Some( TextureImportOptions::default() .with_compression(CompressionOptions::NoCompression), ), ), )) { Ok(lut) => { self.sender .send(Message::DoSceneCommand( SceneCommand::SetColorGradingLut( SetColorGradingLutCommand::new(handle, Some(lut)), ), )) .unwrap(); } Err(e) => self .sender .send(Message::Log(format!( "Failed to load color grading look-up texture. Reason: {}", e ))) .unwrap(), } } } } _ => {} } } } }
43.693122
100
0.342173
1c265127ba3d57f9a441e7c2c518c9efd282243b
1,777
// WARNING: This file was autogenerated by jni-bindgen. Any changes to this file may be lost!!! #[cfg(any(feature = "all", feature = "android-graphics-PorterDuffXfermode"))] __jni_bindgen! { /// public class [PorterDuffXfermode](https://developer.android.com/reference/android/graphics/PorterDuffXfermode.html) /// /// Required feature: android-graphics-PorterDuffXfermode public class PorterDuffXfermode ("android/graphics/PorterDuffXfermode") extends crate::android::graphics::Xfermode { /// [PorterDuffXfermode](https://developer.android.com/reference/android/graphics/PorterDuffXfermode.html#PorterDuffXfermode(android.graphics.PorterDuff.Mode)) /// /// Required features: "android-graphics-PorterDuff_Mode" #[cfg(any(feature = "all", all(feature = "android-graphics-PorterDuff_Mode")))] pub fn new<'env>(__jni_env: &'env __jni_bindgen::Env, arg0: impl __jni_bindgen::std::convert::Into<__jni_bindgen::std::option::Option<&'env crate::android::graphics::PorterDuff_Mode>>) -> __jni_bindgen::std::result::Result<__jni_bindgen::Local<'env, crate::android::graphics::PorterDuffXfermode>, __jni_bindgen::Local<'env, crate::java::lang::Throwable>> { // class.path == "android/graphics/PorterDuffXfermode", java.flags == PUBLIC, .name == "<init>", .descriptor == "(Landroid/graphics/PorterDuff$Mode;)V" unsafe { let __jni_args = [__jni_bindgen::AsJValue::as_jvalue(&arg0.into())]; let (__jni_class, __jni_method) = __jni_env.require_class_method("android/graphics/PorterDuffXfermode\0", "<init>\0", "(Landroid/graphics/PorterDuff$Mode;)V\0"); __jni_env.new_object_a(__jni_class, __jni_method, __jni_args.as_ptr()) } } } }
71.08
364
0.698368
299ed156a5d2706da9bbda653f2c2a49226e25f8
3,170
//! Test for `boxed` mod. use core::any::Any; use core::clone::Clone; use core::convert::TryInto; use core::ops::Deref; use core::result::Result::{Err, Ok}; use std::boxed::Box; #[test] fn test_owned_clone() { let a = Box::new(5); let b: Box<i32> = a.clone(); assert!(a == b); } #[derive(PartialEq, Eq)] struct Test; #[test] fn any_move() { let a = Box::new(8) as Box<dyn Any>; let b = Box::new(Test) as Box<dyn Any>; match a.downcast::<i32>() { Ok(a) => { assert!(a == Box::new(8)); } Err(..) => panic!(), } match b.downcast::<Test>() { Ok(a) => { assert!(a == Box::new(Test)); } Err(..) => panic!(), } let a = Box::new(8) as Box<dyn Any>; let b = Box::new(Test) as Box<dyn Any>; assert!(a.downcast::<Box<Test>>().is_err()); assert!(b.downcast::<Box<i32>>().is_err()); } #[test] fn test_show() { let a = Box::new(8) as Box<dyn Any>; let b = Box::new(Test) as Box<dyn Any>; let a_str = format!("{a:?}"); let b_str = format!("{b:?}"); assert_eq!(a_str, "Any { .. }"); assert_eq!(b_str, "Any { .. }"); static EIGHT: usize = 8; static TEST: Test = Test; let a = &EIGHT as &dyn Any; let b = &TEST as &dyn Any; let s = format!("{a:?}"); assert_eq!(s, "Any { .. }"); let s = format!("{b:?}"); assert_eq!(s, "Any { .. }"); } #[test] fn deref() { fn homura<T: Deref<Target = i32>>(_: T) {} homura(Box::new(765)); } #[test] fn raw_sized() { let x = Box::new(17); let p = Box::into_raw(x); unsafe { assert_eq!(17, *p); *p = 19; let y = Box::from_raw(p); assert_eq!(19, *y); } } #[test] fn raw_trait() { trait Foo { fn get(&self) -> u32; fn set(&mut self, value: u32); } struct Bar(u32); impl Foo for Bar { fn get(&self) -> u32 { self.0 } fn set(&mut self, value: u32) { self.0 = value; } } let x: Box<dyn Foo> = Box::new(Bar(17)); let p = Box::into_raw(x); unsafe { assert_eq!(17, (*p).get()); (*p).set(19); let y: Box<dyn Foo> = Box::from_raw(p); assert_eq!(19, y.get()); } } #[test] fn f64_slice() { let slice: &[f64] = &[-1.0, 0.0, 1.0, f64::INFINITY]; let boxed: Box<[f64]> = Box::from(slice); assert_eq!(&*boxed, slice) } #[test] fn i64_slice() { let slice: &[i64] = &[i64::MIN, -2, -1, 0, 1, 2, i64::MAX]; let boxed: Box<[i64]> = Box::from(slice); assert_eq!(&*boxed, slice) } #[test] fn str_slice() { let s = "Hello, world!"; let boxed: Box<str> = Box::from(s); assert_eq!(&*boxed, s) } #[test] fn boxed_slice_from_iter() { let iter = 0..100; let boxed: Box<[u32]> = iter.collect(); assert_eq!(boxed.len(), 100); assert_eq!(boxed[7], 7); } #[test] fn test_array_from_slice() { let v = vec![1, 2, 3]; let r: Box<[u32]> = v.into_boxed_slice(); let a: Result<Box<[u32; 3]>, _> = r.clone().try_into(); assert!(a.is_ok()); let a: Result<Box<[u32; 2]>, _> = r.clone().try_into(); assert!(a.is_err()); }
20.855263
63
0.494322
e58c0c88cc46b42d5ee69206cf647102f975a2d8
4,185
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Object; use glib::object::IsA; use glib::translate::*; use std::fmt; use std::mem; glib::wrapper! { #[doc(alias = "AtkTableCell")] pub struct TableCell(Interface<ffi::AtkTableCell, ffi::AtkTableCellIface>) @requires Object; match fn { type_ => || ffi::atk_table_cell_get_type(), } } impl TableCell { pub const NONE: Option<&'static TableCell> = None; } pub trait TableCellExt: 'static { #[doc(alias = "atk_table_cell_get_column_header_cells")] #[doc(alias = "get_column_header_cells")] fn column_header_cells(&self) -> Vec<Object>; #[doc(alias = "atk_table_cell_get_column_span")] #[doc(alias = "get_column_span")] fn column_span(&self) -> i32; #[doc(alias = "atk_table_cell_get_position")] #[doc(alias = "get_position")] fn position(&self) -> Option<(i32, i32)>; #[doc(alias = "atk_table_cell_get_row_column_span")] #[doc(alias = "get_row_column_span")] fn row_column_span(&self) -> Option<(i32, i32, i32, i32)>; #[doc(alias = "atk_table_cell_get_row_header_cells")] #[doc(alias = "get_row_header_cells")] fn row_header_cells(&self) -> Vec<Object>; #[doc(alias = "atk_table_cell_get_row_span")] #[doc(alias = "get_row_span")] fn row_span(&self) -> i32; #[doc(alias = "atk_table_cell_get_table")] #[doc(alias = "get_table")] fn table(&self) -> Option<Object>; } impl<O: IsA<TableCell>> TableCellExt for O { fn column_header_cells(&self) -> Vec<Object> { unsafe { FromGlibPtrContainer::from_glib_full(ffi::atk_table_cell_get_column_header_cells( self.as_ref().to_glib_none().0, )) } } fn column_span(&self) -> i32 { unsafe { ffi::atk_table_cell_get_column_span(self.as_ref().to_glib_none().0) } } fn position(&self) -> Option<(i32, i32)> { unsafe { let mut row = mem::MaybeUninit::uninit(); let mut column = mem::MaybeUninit::uninit(); let ret = from_glib(ffi::atk_table_cell_get_position( self.as_ref().to_glib_none().0, row.as_mut_ptr(), column.as_mut_ptr(), )); let row = row.assume_init(); let column = column.assume_init(); if ret { Some((row, column)) } else { None } } } fn row_column_span(&self) -> Option<(i32, i32, i32, i32)> { unsafe { let mut row = mem::MaybeUninit::uninit(); let mut column = mem::MaybeUninit::uninit(); let mut row_span = mem::MaybeUninit::uninit(); let mut column_span = mem::MaybeUninit::uninit(); let ret = from_glib(ffi::atk_table_cell_get_row_column_span( self.as_ref().to_glib_none().0, row.as_mut_ptr(), column.as_mut_ptr(), row_span.as_mut_ptr(), column_span.as_mut_ptr(), )); let row = row.assume_init(); let column = column.assume_init(); let row_span = row_span.assume_init(); let column_span = column_span.assume_init(); if ret { Some((row, column, row_span, column_span)) } else { None } } } fn row_header_cells(&self) -> Vec<Object> { unsafe { FromGlibPtrContainer::from_glib_full(ffi::atk_table_cell_get_row_header_cells( self.as_ref().to_glib_none().0, )) } } fn row_span(&self) -> i32 { unsafe { ffi::atk_table_cell_get_row_span(self.as_ref().to_glib_none().0) } } fn table(&self) -> Option<Object> { unsafe { from_glib_full(ffi::atk_table_cell_get_table( self.as_ref().to_glib_none().0, )) } } } impl fmt::Display for TableCell { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("TableCell") } }
30.547445
96
0.568459
fb7fcb958e2cbb476a46b65a553708e5767808dd
3,072
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT //! Data structures to represent vtable trait function pointer restrictions // TODO: We currently use `InternedString`, but possibly should only use `String` pub use cbmc::InternedString; use serde::{Deserialize, Serialize}; /// A "trait-defined method" (`Trait::method`) represents the abstract function. /// For example, `Into::into` identifies a trait and a function within this trait, but /// does not identify a concrete function (because it is not applied to a concrete type.) #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct TraitDefinedMethod { /// The canonical trait name (see function `normalized_trait_name` in the Kani compiler) pub trait_name: InternedString, /// Use the index into this vtable, instead of the function name. pub vtable_idx: usize, } /// A call-site is a location in the code that invokes a particular `TraitDefinedMethod`. /// This is identified by: /// 1. The (mangled) name of the function this code is a part of /// 2. The (unique) label we applied #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct CallSite { /// The "Trait::method" being invoked at this location pub trait_method: TraitDefinedMethod, /// The (mangled symbol name of the) function this code is within pub function_name: InternedString, /// The unique label we applied to this function invocation. /// Because of how MIR works, the code being emitted here will always look like this: /// `label: tmp_n = vtable->fn(tmp_1, tmp_2, ...)` /// This label we apply is the means by which we identify the function pointer `vtable->fn` as /// having only certain possible values. pub label: InternedString, } /// A set of possible targets for a vtable entry's function pointer. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PossibleMethodEntry { /// The `Trait::method` entry we have new possibilities for. pub trait_method: TraitDefinedMethod, /// The (mangled symbol name of the) function this trait-defined method might pointer to. /// (This is a `Vec` purely for representation efficiency reasons. It could be a single /// possibility, but with more entries in `possible_method` below.) pub possibilities: Vec<InternedString>, } /// Represents the full set of vtable restrictions visible in this crate. /// Currently corresponds to a `*.restrictions.json` file. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct VtableCtxResults { /// Each call site that is visible in this crate: a call site can have restrictions applied to it. pub call_sites: Vec<CallSite>, /// A set of entries to the map from `TraitDefinedMethod` to function symbol. /// When all of these are aggregated together from all linked crates, these collectively represent /// the only function pointers that might exist in this vtable entry. pub possible_methods: Vec<PossibleMethodEntry>, }
50.360656
102
0.731445
337afe2ebbaa23da8426b3fece1931c6e3b96478
1,731
use super::*; use pulldown_cmark::CowStr; pub struct SectionEventHandler<'a> { base_identifier: &'a str, header_section_html: &'a str, next_text_is_section: bool, current_header: String, } impl<'a> SectionEventHandler<'a> { pub fn new(base_identifier: &'a str, header_section_html: &'a str) -> SectionEventHandler<'a> { SectionEventHandler { base_identifier, header_section_html, next_text_is_section: false, current_header: String::new(), } } } impl<'a> EventHandler for SectionEventHandler<'a> { fn handle(&mut self, event: &Event, result: &mut ParseResult, events: &mut Vec<Event>) -> bool { match event { Event::Start(Tag::Heading(_, _, _)) => { self.next_text_is_section = true; } Event::Text(ref text) if self.next_text_is_section => { self.current_header.push_str(&text); } Event::End(Tag::Heading(_, _, _)) => { self.next_text_is_section = false; let header_number = (result.sections.len() as u32) + 1; let text = std::mem::replace(&mut self.current_header, String::new()); result.sections.push((header_number, text)); // we insert a small identifier so that the header can be linked to let string = self .header_section_html .replace("{identifier}", self.base_identifier) .replace("{number}", &header_number.to_string()); events.push(Event::Html(CowStr::Boxed(string.into_boxed_str()))); } _ => (), } true } }
35.326531
100
0.557481
4ac9d9cd93359187d673dd3c8c4f690e60d04397
846
use { super::GltfLoadingError, crate::renderer::{Context, Texture}, illume::{ImageView, Sampler, SamplerInfo}, }; pub fn load_gltf_texture( texture: gltf::Texture, views: &[ImageView], samplers: &[Sampler], default_sampler: &mut Option<Sampler>, ctx: &mut Context, ) -> Result<Texture, GltfLoadingError> { let image = views[texture.source().index()].clone(); let sampler = match texture.sampler().index() { Some(index) => samplers[index].clone(), None => match default_sampler { Some(default_sampler) => default_sampler.clone(), None => { let sampler = ctx.create_sampler(SamplerInfo::default())?; *default_sampler = Some(sampler.clone()); sampler } }, }; Ok(Texture { image, sampler }) }
30.214286
74
0.583924
0ec1383c1262d220d775aa7d67600e2dd5daf225
25,602
use vk; use crate::Device; use crate::Pipeline; use crate::PipelineInfo; use crate::RenderPass; use crate::buffer::{UniformData}; use crate::ownage::check_errors; use crate::vkenums::{BlendFactor, Topology, PolygonMode, CullMode, FrontFace, SampleCount, VkBool, ShaderStage, StencilOp, CompareOp, ColourComponent, LogicOp, BlendOp, DynamicState}; use std::mem; use std::ptr; use std::sync::Arc; use std::ffi::CString; pub struct PipelineBuilder { vertex_shader: Option<vk::ShaderModule>, fragment_shader: Option<vk::ShaderModule>, compute_shader: Option<vk::ShaderModule>, render_pass: Option<RenderPass>, subpass: u32, topology: Topology, polygon_mode: PolygonMode, cull_mode: CullMode, front_face: FrontFace, depth_test: u32, depth_write: u32, depth_clamp: u32, depth_bias: u32, rasterizer_discard: u32, blend_constants: [f32; 4], primitive_restart: u32, rasterization_samples: SampleCount, sample_shader: u32, alpha_to_coverage: u32, alpha_to_one: u32, blend_enabled: VkBool, has_push_constant: bool, push_constant_size: u32, push_constant_shader_stage: ShaderStage, specialisation_constants: Vec<(u32, UniformData, u32, ShaderStage)>, //Vec<(id, data, offset, shader stage)> descriptor_set_layouts: Option<Vec<vk::DescriptorSetLayout>>, vertex_binding: Option<Vec<vk::VertexInputBindingDescription>>, vertex_attributes: Option<Vec<vk::VertexInputAttributeDescription>>, } impl PipelineBuilder { pub fn new() -> PipelineBuilder { PipelineBuilder { vertex_shader: None, fragment_shader: None, compute_shader: None, render_pass: None, subpass: 0, topology: Topology::TriangleList, polygon_mode: PolygonMode::Fill, cull_mode: CullMode::Back, front_face: FrontFace::Clockwise, depth_test: vk::FALSE, depth_write: vk::FALSE, depth_clamp: vk::FALSE, depth_bias: vk::FALSE, rasterizer_discard: vk::FALSE, blend_constants: [0.0, 0.0, 0.0, 0.0], primitive_restart: vk::FALSE, rasterization_samples: SampleCount::OneBit, sample_shader: vk::FALSE, alpha_to_coverage: vk::FALSE, alpha_to_one: vk::FALSE, blend_enabled: VkBool::True, has_push_constant: false, push_constant_size: 0, push_constant_shader_stage: ShaderStage::Vertex, specialisation_constants: Vec::new(), descriptor_set_layouts: None, vertex_binding: None, vertex_attributes: None, } } pub fn disable_blend(mut self) -> PipelineBuilder { self.blend_enabled = VkBool::False; self } pub fn push_constants(mut self, shader_stage: ShaderStage, size: u32) -> PipelineBuilder { self.has_push_constant = true; self.push_constant_size = size; self.push_constant_shader_stage = shader_stage; self } pub fn vertex_shader(mut self, shader: vk::ShaderModule) -> PipelineBuilder { self.vertex_shader = Some(shader); self } pub fn fragment_shader(mut self, shader: vk::ShaderModule) -> PipelineBuilder { self.fragment_shader = Some(shader); self } pub fn compute_shader(mut self, shader: vk::ShaderModule) -> PipelineBuilder { self.compute_shader = Some(shader); self } pub fn subpass(mut self, subpass_num: u32) -> PipelineBuilder { self.subpass = subpass_num; self } pub fn add_vertex_specialisation_constant(mut self, id: u32, data: UniformData, offset: u32) -> PipelineBuilder { self.specialisation_constants.push((id, data, offset, ShaderStage::Vertex)); self } pub fn add_fragment_specialisation_constant(mut self, id: u32, data: UniformData, offset: u32) -> PipelineBuilder { self.specialisation_constants.push((id, data, offset, ShaderStage::Fragment)); self } pub fn vertex_binding(mut self, binding: Vec<vk::VertexInputBindingDescription>) -> PipelineBuilder { self.vertex_binding = Some(binding); self } pub fn vertex_attributes(mut self, attributes: Vec<vk::VertexInputAttributeDescription>) -> PipelineBuilder { self.vertex_attributes = Some(attributes); self } pub fn render_pass(mut self, render_pass: RenderPass) -> PipelineBuilder { self.render_pass = Some(render_pass); self } pub fn descriptor_set_layout(mut self, layouts: Vec<vk::DescriptorSetLayout>) -> PipelineBuilder { if let Some(descriptor_layout) = &mut self.descriptor_set_layouts { descriptor_layout.push(layouts[0]); } else { self.descriptor_set_layouts = Some(vec!(layouts[0])); } self } pub fn topology_point_list(mut self) -> PipelineBuilder { self.topology = Topology::PointList; self } pub fn topology_line_list(mut self) -> PipelineBuilder { self.topology = Topology::LineList; self } pub fn topology_line_strip(mut self) -> PipelineBuilder { self.topology = Topology::LineStrip; self } pub fn topology_triangle_list(mut self) -> PipelineBuilder { self.topology = Topology::TriangleList; self } pub fn topology_triangle_strip(mut self) -> PipelineBuilder { self.topology = Topology::TriangleStrip; self } pub fn topology_triangle_fan(mut self) -> PipelineBuilder { self.topology = Topology::TriangleFan; self } pub fn topology_line_list_with_adjacency(mut self) -> PipelineBuilder { self.topology = Topology::LineListWithAdjacency; self } pub fn topology_line_strip_with_adjacency(mut self) -> PipelineBuilder { self.topology = Topology::LineStripWithAdjacency; self } pub fn topology_triangle_list_with_adjacency(mut self) -> PipelineBuilder { self.topology = Topology::TriangleListWithAdjacency; self } pub fn topology_triangle_strip_with_adjacency(mut self) -> PipelineBuilder { self.topology = Topology::TriangleStripWithAjacency; self } pub fn topology_patch_list(mut self) -> PipelineBuilder { self.topology = Topology::PatchList; self } pub fn polygon_mode_fill(mut self) -> PipelineBuilder { self.polygon_mode = PolygonMode::Fill; self } pub fn polygon_mode_line(mut self) -> PipelineBuilder { self.polygon_mode = PolygonMode::Line; self } pub fn polygon_mode_point(mut self) -> PipelineBuilder { self.polygon_mode = PolygonMode::Point; self } pub fn cull_mode_none(mut self) -> PipelineBuilder { self.cull_mode = CullMode::None; self } pub fn cull_mode_front(mut self) -> PipelineBuilder { self.cull_mode = CullMode::Front; self } pub fn cull_mode_back(mut self) -> PipelineBuilder { self.cull_mode = CullMode::Back; self } pub fn cull_mode_front_and_back(mut self) -> PipelineBuilder { self.cull_mode = CullMode::FrontAndBack; self } pub fn front_face_clockwise(mut self) -> PipelineBuilder { self.front_face = FrontFace::Clockwise; self } pub fn front_face_counter_clockwise(mut self) -> PipelineBuilder { self.front_face = FrontFace::CounterClockwise; self } pub fn enable_depth_test(mut self) -> PipelineBuilder { self.depth_test = vk::TRUE; self } pub fn enable_depth_write(mut self) -> PipelineBuilder { self.depth_write = vk::TRUE; self } pub fn enable_depth_clamp(mut self) -> PipelineBuilder { self.depth_clamp = vk::TRUE; self } pub fn enable_depth_bias(mut self) -> PipelineBuilder { self.depth_bias = vk::TRUE; self } pub fn discard_rasterizer(mut self) -> PipelineBuilder { self.rasterizer_discard = vk::TRUE; self } pub fn primitive_restart(mut self) -> PipelineBuilder { self.primitive_restart = vk::TRUE; self } pub fn multisample(mut self, samples: &SampleCount) -> PipelineBuilder { self.rasterization_samples = *samples; self } pub fn rasterization_samples_1_bit(mut self) -> PipelineBuilder { self.rasterization_samples = SampleCount::OneBit; self } pub fn rasterization_samples_2_bit(mut self) -> PipelineBuilder { self.rasterization_samples = SampleCount::TwoBit; self } pub fn rasterization_samples_4_bit(mut self) -> PipelineBuilder { self.rasterization_samples = SampleCount::FourBit; self } pub fn rasterization_samples_8_bit(mut self) -> PipelineBuilder { self.rasterization_samples = SampleCount::EightBit; self } pub fn rasterization_samples_16_bit(mut self) -> PipelineBuilder { self.rasterization_samples = SampleCount::SixteenBit; self } pub fn sample_shader(mut self) -> PipelineBuilder { self.sample_shader = vk::TRUE; self } pub fn alpha_to_coverage(mut self) -> PipelineBuilder { self.alpha_to_coverage = vk::TRUE; self } pub fn alpha_to_one(mut self) -> PipelineBuilder { self.alpha_to_one = vk::TRUE; self } pub fn build_compute(self, device: Arc<Device>) -> Pipeline { let vk = device.pointers(); let device = device.internal_object(); let mut layout = unsafe { mem::MaybeUninit::uninit().assume_init() }; let mut pipelines: Vec<vk::Pipeline> = Vec::with_capacity(1); let mut compute_pipeline_create_infos: Vec<vk::ComputePipelineCreateInfo> = Vec::with_capacity(2); let shader_stage = vk::PipelineShaderStageCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, pNext: ptr::null(), flags: 0, stage: ShaderStage::Compute.to_bits(), module: self.compute_shader.unwrap(), pName: CString::new("main").unwrap().into_raw(), pSpecializationInfo: ptr::null(), }; let push_constant_range = { vk::PushConstantRange { stageFlags: self.push_constant_shader_stage.to_bits(), offset: 0, size: self.push_constant_size, } }; let layouts = self.descriptor_set_layouts.expect("PipelineBuilderError: Descriptor set layouts not present."); let pipeline_layout_create_info = { vk::PipelineLayoutCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, pNext: ptr::null(), flags: 0, setLayoutCount: layouts.len() as u32, pSetLayouts: layouts.as_ptr(), pushConstantRangeCount: if self.push_constant_size == 0 { 0 } else { 1 }, pPushConstantRanges: if self.push_constant_size == 0 { ptr::null() } else { &push_constant_range }, } }; unsafe { vk.CreatePipelineLayout(*device, &pipeline_layout_create_info, ptr::null(), &mut layout); } compute_pipeline_create_infos.push(vk::ComputePipelineCreateInfo { sType: vk::STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, pNext: ptr::null(), flags: 0, stage: shader_stage, layout: layout, basePipelineHandle: 0, basePipelineIndex: 0, // -1 }); unsafe { check_errors(vk.CreateComputePipelines(*device, 0, 1, compute_pipeline_create_infos.as_ptr(), ptr::null(), pipelines.as_mut_ptr())); pipelines.set_len(compute_pipeline_create_infos.len()); } let pipeline_info = PipelineInfo { vertex_shader: 0, fragment_shader: 0, vertex_binding: Vec::new(), vertex_input_attribute_descriptions: Vec::new(), }; Pipeline::new_with_fields(pipeline_info, pipelines, 0, layout) } pub fn build(mut self, device: Arc<Device>) -> Pipeline { if !self.vertex_shader.is_some() { panic!("PipelineBuilder Error: vertex shader missing!"); } if !self.fragment_shader.is_some() { panic!("PipelineBuilder Error: fragment shader missing!"); } if !self.render_pass.is_some() { panic!("PipelineBuilder Error: render_pass missing!"); } if !self.descriptor_set_layouts.is_some() { panic!("PipelineBuilder Error: descriptor_set_layout missing!"); } if !self.vertex_binding.is_some() { panic!("PipelineBuilder Error: vertex bindings missing!"); } if !self.vertex_attributes.is_some() { panic!("PipelineBuilder Error: vertex attributes missing!"); } let mut pipelines: Vec<vk::Pipeline> = Vec::with_capacity(1); let mut layout: vk::PipelineLayout = unsafe { mem::MaybeUninit::uninit().assume_init() }; let mut cache: vk::PipelineCache = unsafe { mem::MaybeUninit::uninit().assume_init() }; let mut graphics_pipeline_create_infos: Vec<vk::GraphicsPipelineCreateInfo> = Vec::with_capacity(2); let mut shader_stages: Vec<vk::PipelineShaderStageCreateInfo> = Vec::with_capacity(2); let vertex_specialisation_constants: vk::SpecializationInfo; let fragment_specialisation_constants: vk::SpecializationInfo; let mut vertex_specialisation_map_entry: Vec<vk::SpecializationMapEntry> = Vec::new(); let mut fragment_specialisation_map_entry: Vec<vk::SpecializationMapEntry> = Vec::new(); let mut vertex_specialisation_data: UniformData = UniformData::new(); let mut fragment_specialisation_data: UniformData = UniformData::new(); for (id, data, offset, shader_stage) in &mut self.specialisation_constants { match shader_stage { ShaderStage::Vertex => { vertex_specialisation_map_entry.push( vk::SpecializationMapEntry { constantID: *id, offset: *offset, size: data.size_non_aligned() as usize, } ); let raw_data = data.build_non_aligned(); for float in raw_data.iter() { vertex_specialisation_data = vertex_specialisation_data.add_float(*float); } }, ShaderStage::Fragment => { fragment_specialisation_map_entry.push( vk::SpecializationMapEntry { constantID: *id, offset: *offset, size: data.size_non_aligned() as usize, } ); let raw_data = data.build_non_aligned(); for float in raw_data.iter() { fragment_specialisation_data = fragment_specialisation_data.add_float(*float); } }, _ => {} } } vertex_specialisation_constants = vk::SpecializationInfo { mapEntryCount: vertex_specialisation_map_entry.len() as u32, pMapEntries: vertex_specialisation_map_entry.as_ptr(), dataSize: vertex_specialisation_data.size_non_aligned() as usize, pData: vertex_specialisation_data.build_non_aligned().as_ptr() as *const _, }; fragment_specialisation_constants = vk::SpecializationInfo { mapEntryCount: fragment_specialisation_map_entry.len() as u32, pMapEntries: fragment_specialisation_map_entry.as_ptr(), dataSize: fragment_specialisation_data.size_non_aligned() as usize, pData: fragment_specialisation_data.build_non_aligned().as_ptr() as *const _, }; shader_stages.push( vk::PipelineShaderStageCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, pNext: ptr::null(), flags: 0, stage: ShaderStage::Vertex.to_bits(), module: self.vertex_shader.unwrap(), pName: CString::new("main").unwrap().into_raw(), pSpecializationInfo: if vertex_specialisation_map_entry.len() == 0 { ptr::null() } else { &vertex_specialisation_constants }, } ); shader_stages.push( vk::PipelineShaderStageCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, pNext: ptr::null(), flags: 0, stage: ShaderStage::Fragment.to_bits(), module: self.fragment_shader.unwrap(), pName: CString::new("main").unwrap().into_raw(), pSpecializationInfo: if fragment_specialisation_map_entry.len() == 0 { ptr::null() } else { &fragment_specialisation_constants }, } ); /* float: VK_FORMAT_R32_SFLOAT vec2: VK_FORMAT_R32G32_SFLOAT vec3: VK_FORMAT_R32G32B32_SFLOAT vec4: VK_FORMAT_R32G32B32A32_SFLOAT ivec2: VK_FORMAT_R32G32_SINT uvec4: VK_FORMAT_R32G32B32A32_UINT double: VK_FORMAT_R64_SFLOAT */ let vertex_binding = self.vertex_binding.unwrap(); let vertex_attributes = self.vertex_attributes.unwrap(); let pipeline_vertex_input_state_create_info = { vk::PipelineVertexInputStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, vertexBindingDescriptionCount: vertex_binding.len() as u32, pVertexBindingDescriptions: vertex_binding.as_ptr(), vertexAttributeDescriptionCount: vertex_attributes.len() as u32, pVertexAttributeDescriptions: vertex_attributes.as_ptr(), } }; let pipeline_input_assembly_state_create_info = { vk::PipelineInputAssemblyStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, topology: self.topology.to_bits(), primitiveRestartEnable: self.primitive_restart, } }; let pipeline_tessellation_state_create_info = { vk::PipelineTessellationStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, patchControlPoints: 0, } }; let pipeline_viewport_state_create_info = { vk::PipelineViewportStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, viewportCount: 1, pViewports: ptr::null(),//&viewport, scissorCount: 1, pScissors: ptr::null(),//&scissor, } }; let pipeline_rasterization_state_create_info = { vk::PipelineRasterizationStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, depthClampEnable: self.depth_clamp, rasterizerDiscardEnable: self.rasterizer_discard, polygonMode: self.polygon_mode.to_bits(), cullMode: self.cull_mode.to_bits(), frontFace: self.front_face.to_bits(), depthBiasEnable: self.depth_bias, depthBiasConstantFactor: 0.0, depthBiasClamp: 0.0, depthBiasSlopeFactor: 0.0, lineWidth: 1.0, } }; let pipeline_multisample_state_create_info = { vk::PipelineMultisampleStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, rasterizationSamples: self.rasterization_samples.to_bits(), sampleShadingEnable: self.sample_shader, minSampleShading: 1.0, pSampleMask: ptr::null(), alphaToCoverageEnable: self.alpha_to_coverage, alphaToOneEnable: self.alpha_to_one, } }; let front_stencil_op_state = { vk::StencilOpState { failOp: StencilOp::Keep.to_bits(), passOp: StencilOp::Keep.to_bits(), depthFailOp: StencilOp::Keep.to_bits(), compareOp: CompareOp::Never.to_bits(), compareMask: 0, writeMask: 0, reference: 0, } }; let back_stencil_op_state = { vk::StencilOpState { failOp: StencilOp::Keep.to_bits(), passOp: StencilOp::Keep.to_bits(), depthFailOp: StencilOp::Keep.to_bits(), compareOp: CompareOp::Never.to_bits(), compareMask: 0, writeMask: 0, reference: 0, } }; let pipeline_depth_stencil_state_create_info = { vk::PipelineDepthStencilStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, depthTestEnable: self.depth_test, depthWriteEnable: self.depth_write, depthCompareOp: CompareOp::Less.to_bits(), depthBoundsTestEnable: vk::FALSE, stencilTestEnable: vk::FALSE, front: front_stencil_op_state, back: back_stencil_op_state, minDepthBounds: 0.0, maxDepthBounds: 1.0, } }; let num_attachments = if let Some(renderpass) = &self.render_pass { renderpass.get_num_colour_attachments_in_subpass(self.subpass) } else { 1 }; let mut pipeline_colour_blend_attachments: Vec<vk::PipelineColorBlendAttachmentState> = Vec::with_capacity(num_attachments as usize); for _ in 0..num_attachments { pipeline_colour_blend_attachments.push( vk::PipelineColorBlendAttachmentState { blendEnable: self.blend_enabled.to_bits(), srcColorBlendFactor: BlendFactor::SrcAlpha.to_bits(), dstColorBlendFactor: BlendFactor::OneMinusSrcAlpha.to_bits(), colorBlendOp: BlendOp::Add.to_bits(), srcAlphaBlendFactor: BlendFactor::SrcAlpha.to_bits(), dstAlphaBlendFactor: BlendFactor::Zero.to_bits(), alphaBlendOp: BlendOp::Add.to_bits(), colorWriteMask: ColourComponent::R.to_bits() | ColourComponent::G.to_bits() | ColourComponent::B.to_bits() | ColourComponent::A.to_bits(), } ); } let pipeline_colour_blend_state_create_info = { vk::PipelineColorBlendStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, logicOpEnable: vk::FALSE, logicOp: LogicOp::Copy.to_bits(), attachmentCount: num_attachments as u32, pAttachments: pipeline_colour_blend_attachments.as_ptr(), blendConstants: self.blend_constants, } }; let mut dynamic_states = Vec::with_capacity(3); dynamic_states.push(DynamicState::Viewport.to_bits()); dynamic_states.push(DynamicState::Scissor.to_bits()); dynamic_states.push(DynamicState::LineWidth.to_bits()); let dynamic_state_create_info = { vk::PipelineDynamicStateCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, pNext: ptr::null(), flags: 0, dynamicStateCount: dynamic_states.len() as u32, pDynamicStates: dynamic_states.as_ptr(), } }; let push_constant_range = { vk::PushConstantRange { stageFlags: self.push_constant_shader_stage.to_bits(), offset: 0, size: self.push_constant_size, } }; let layouts = self.descriptor_set_layouts.unwrap(); let pipeline_layout_create_info = { vk::PipelineLayoutCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, pNext: ptr::null(), flags: 0, setLayoutCount: layouts.len() as u32, pSetLayouts: layouts.as_ptr(), pushConstantRangeCount: if self.push_constant_size == 0 { 0 } else { 1 }, pPushConstantRanges: if self.push_constant_size == 0 { ptr::null() } else { &push_constant_range }, } }; let vk = device.pointers(); let device = device.internal_object(); unsafe { vk.CreatePipelineLayout(*device, &pipeline_layout_create_info, ptr::null(), &mut layout); } graphics_pipeline_create_infos.push( vk::GraphicsPipelineCreateInfo { sType: vk::STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, pNext: ptr::null(), flags: 0, stageCount: shader_stages.len() as u32, pStages: shader_stages.as_ptr(), pVertexInputState: &pipeline_vertex_input_state_create_info, pInputAssemblyState: &pipeline_input_assembly_state_create_info, pTessellationState: &pipeline_tessellation_state_create_info, pViewportState: &pipeline_viewport_state_create_info, pRasterizationState: &pipeline_rasterization_state_create_info, pMultisampleState: &pipeline_multisample_state_create_info, pDepthStencilState: &pipeline_depth_stencil_state_create_info, pColorBlendState: &pipeline_colour_blend_state_create_info, pDynamicState: &dynamic_state_create_info, layout: layout, renderPass: *self.render_pass.unwrap().internal_object(), subpass: self.subpass, basePipelineHandle: 0, basePipelineIndex: -1, } ); let pipeline_cache_create_info = { vk::PipelineCacheCreateInfo { sType: vk::STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, pNext: ptr::null(), flags: 0, initialDataSize: 0, pInitialData: ptr::null(), } }; unsafe { check_errors(vk.CreatePipelineCache(*device, &pipeline_cache_create_info, ptr::null(), &mut cache)); check_errors(vk.CreateGraphicsPipelines(*device, cache, graphics_pipeline_create_infos.len() as u32, graphics_pipeline_create_infos.as_ptr(), ptr::null(), pipelines.as_mut_ptr())); pipelines.set_len(graphics_pipeline_create_infos.len()); } let pipeline_info = PipelineInfo { vertex_shader: self.vertex_shader.unwrap(), fragment_shader: self.fragment_shader.unwrap(), vertex_binding: vertex_binding, vertex_input_attribute_descriptions: vertex_attributes, }; Pipeline::new_with_fields(pipeline_info, pipelines, cache, layout) } }
33.820343
186
0.666589
3aa512370be1624ba040a9d2e549e27635f83597
2,592
mod state; use colored::*; use state::{FsEntry, State}; use std::{cmp::min, fs::read_dir}; use zellij_tile::*; register_tile!(State); impl ZellijTile for State { fn init(&mut self) { refresh_directory(self); } fn draw(&mut self, rows: usize, cols: usize) { for i in 0..rows { if self.selected() < self.scroll() { *self.scroll_mut() = self.selected(); } if self.selected() - self.scroll() + 2 > rows { *self.scroll_mut() = self.selected() + 2 - rows; } let i = self.scroll() + i; if let Some(entry) = self.files.get(i) { let mut path = entry.as_line(cols).normal(); if let FsEntry::Dir(..) = entry { path = path.dimmed().bold(); } if i == self.selected() { println!("{}", path.reversed()); } else { println!("{}", path); } } else { println!(); } } } fn handle_key(&mut self, key: Key) { match key { Key::Up | Key::Char('k') => { *self.selected_mut() = self.selected().saturating_sub(1); } Key::Down | Key::Char('j') => { let next = self.selected().saturating_add(1); *self.selected_mut() = min(self.files.len() - 1, next); } Key::Right | Key::Char('\n') | Key::Char('l') => { match self.files[self.selected()].clone() { FsEntry::Dir(p, _) => { self.path = p; refresh_directory(self); } FsEntry::File(p, _) => open_file(&p), } } Key::Left | Key::Char('h') => { self.path.pop(); refresh_directory(self); } _ => (), }; } } fn refresh_directory(state: &mut State) { state.files = read_dir(&state.path) .unwrap() .filter_map(|res| { res.and_then(|d| { if d.metadata()?.is_dir() { let children = read_dir(d.path())?.count(); Ok(FsEntry::Dir(d.path(), children)) } else { let size = d.metadata()?.len(); Ok(FsEntry::File(d.path(), size)) } }) .ok() }) .collect(); state.files.sort_unstable(); }
29.454545
73
0.405864
2ff3da8b6f7124e92a627c2c84d44c1b5a82ab7d
138
#![no_main] #[mock::app] mod app { #[monotonic(binds = Tim1, default = true, default = false)] type Fast = hal::Tim1Monotonic; }
17.25
63
0.615942
e9e69dadd3b8358bf294ec9e582c99dff1b1059e
1,177
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(box_syntax)] fn main() { let _foo = &[1us, 2] as [usize]; //~^ ERROR cast to unsized type: `&[usize; 2]` as `[usize]` //~^^ HELP consider using an implicit coercion to `&[usize]` instead let _bar = box 1us as std::fmt::Show; //~^ ERROR cast to unsized type: `Box<usize>` as `core::fmt::Show` //~^^ HELP did you mean `Box<core::fmt::Show>`? let _baz = 1us as std::fmt::Show; //~^ ERROR cast to unsized type: `usize` as `core::fmt::Show` //~^^ HELP consider using a box or reference as appropriate let _quux = [1us, 2] as [usize]; //~^ ERROR cast to unsized type: `[usize; 2]` as `[usize]` //~^^ HELP consider using a box or reference as appropriate }
43.592593
72
0.658454
21dd506b04b2b3e5a32141108a4d8ebe07e517f6
74
pub(crate) mod event; pub(crate) mod event_map; pub(crate) mod event_dict;
24.666667
26
0.77027
11fa0573b92a7fa94bd2ff777d0ca106a32b96b2
3,276
#[cfg(test)] #[path = "../../tests/unit/validation/objectives_test.rs"] mod objectives_test; use super::*; use crate::format::problem::Objective::*; use std::collections::HashMap; /// Checks that objective is not empty when specified. fn check_e1600_empty_objective(objectives: &[&Objective]) -> Result<(), FormatError> { if objectives.is_empty() { Err(FormatError::new( "E1600".to_string(), "an empty objective specified".to_string(), "remove objectives property completely to use default".to_string(), )) } else { Ok(()) } } /// Checks that each objective type specified only once. fn check_e1601_duplicate_objectives(objectives: &[&Objective]) -> Result<(), FormatError> { let mut duplicates = objectives .iter() .fold(HashMap::new(), |mut acc, objective| { match objective { MinimizeCost => acc.entry("minimize-cost"), MinimizeTours => acc.entry("minimize-tours"), MaximizeTours => acc.entry("maximize-tours"), MinimizeUnassignedJobs => acc.entry("minimize-unassigned"), BalanceMaxLoad { .. } => acc.entry("balance-max-load"), BalanceActivities { .. } => acc.entry("balance-activities"), BalanceDistance { .. } => acc.entry("balance-distance"), BalanceDuration { .. } => acc.entry("balance-duration"), } .and_modify(|count| *count += 1) .or_insert(1_usize); acc }) .iter() .filter_map(|(name, count)| if *count > 1 { Some((*name).to_string()) } else { None }) .collect::<Vec<_>>(); duplicates.sort(); if duplicates.is_empty() { Ok(()) } else { Err(FormatError::new( "E1601".to_string(), "duplicate objective specified".to_string(), "remove duplicate objectives".to_string(), )) } } /// Checks that cost objective is specified. fn check_e1602_no_cost_value_objective(objectives: &[&Objective]) -> Result<(), FormatError> { let min_costs = objectives .iter() .filter(|objective| match objective { MinimizeCost => true, _ => false, }) .count(); if min_costs == 0 { Err(FormatError::new( "E1602".to_string(), "missing cost objective".to_string(), "specify 'minimize-cost' objective".to_string(), )) } else { Ok(()) } } fn get_objectives<'a>(ctx: &'a ValidationContext) -> Option<Vec<&'a Objective>> { ctx.problem.objectives.as_ref().map(|objectives| { Some(&objectives.primary) .iter() .chain(objectives.secondary.as_ref().iter()) .flat_map(|objectives| objectives.iter()) .collect() }) } pub fn validate_objectives(ctx: &ValidationContext) -> Result<(), Vec<FormatError>> { if let Some(objectives) = get_objectives(ctx) { combine_error_results(&[ check_e1600_empty_objective(&objectives), check_e1601_duplicate_objectives(&objectives), check_e1602_no_cost_value_objective(&objectives), ]) } else { Ok(()) } }
32.435644
94
0.569597
6739ec627317f39214fb6ee10bd87249f59629c3
9,404
// MakAir // // Copyright: 2020, Makers For Life // License: Public Domain License use std::sync::mpsc::{Receiver, Sender, TryRecvError}; use chrono::offset::Local; use chrono::{DateTime, Duration}; use conrod_core::Ui; use glium::glutin::{ContextBuilder, EventsLoop, WindowBuilder}; use glium::Surface; use image::{buffer::ConvertBuffer, RgbImage, RgbaImage}; use plotters::prelude::*; use telemetry::{self, structures::MachineStateSnapshot, structures::TelemetryMessage}; use crate::APP_ARGS; use super::fonts::Fonts; use super::support::{self, EventLoop}; use super::widgets::{create_widgets, Ids}; lazy_static! { static ref SERIAL_RECEIVE_CHUNK_TIME: Duration = Duration::milliseconds(32); } pub struct DisplayDrawerBuilder; pub struct DisplayDrawer { renderer: conrod_glium::Renderer, display: support::GliumDisplayWinitWrapper, interface: conrod_core::Ui, events_loop: EventsLoop, event_loop: EventLoop, fonts: Fonts, } enum HandleLoopOutcome { Break, Continue, } type DataPressure = Vec<(DateTime<Local>, u16)>; impl DisplayDrawerBuilder { #[allow(clippy::new_ret_no_self)] pub fn new( window: WindowBuilder, context: ContextBuilder, events_loop: EventsLoop, interface: Ui, fonts: Fonts, ) -> DisplayDrawer { // Create display let display = glium::Display::new(window, context, &events_loop).unwrap(); let display = support::GliumDisplayWinitWrapper(display); // Create renderer let renderer = conrod_glium::Renderer::new(&display.0).unwrap(); // Create drawer DisplayDrawer { renderer, display, interface, events_loop, event_loop: EventLoop::new(), fonts, } } } impl DisplayDrawer { pub fn run(&mut self) { // TODO: move more of this into the "serial" module let mut data: DataPressure = Vec::new(); // Start gathering telemetry let rx = self.start_telemetry(); let mut last_machine_snapshot = MachineStateSnapshot::default(); // Start drawer loop // Flow: cycles through telemetry events, and refreshes the view every time there is an \ // update on the machines state. 'main: loop { // TODO: only update when needed self.event_loop.needs_update(); // Receive telemetry data (from the input serial from the motherboard) if let Some(machine_snapshot) = self.step_loop_telemetry(&rx, &mut data) { last_machine_snapshot = machine_snapshot; } let older = Local::now() - chrono::Duration::seconds(40); data.retain(|d| d.0 > older); // Handle incoming events match self.step_loop_events() { HandleLoopOutcome::Break => break 'main, HandleLoopOutcome::Continue => {} } // Refresh the pressure data interface, if we have any data in the buffer if !data.is_empty() { self.step_loop_refresh(&data, &last_machine_snapshot); } } } // TODO: refactor this #[allow(clippy::ptr_arg)] fn render( &mut self, data_pressure: &DataPressure, machine_snapshot: &MachineStateSnapshot, ) -> conrod_core::image::Map<glium::texture::Texture2d> { let mut buffer = vec![0; (780 * 200 * 4) as usize]; let root = BitMapBackend::with_buffer(&mut buffer, (780, 200)).into_drawing_area(); root.fill(&BLACK).unwrap(); let oldest = data_pressure.first().unwrap().0 - chrono::Duration::seconds(40); let newest = data_pressure.first().unwrap().0; let mut chart = ChartBuilder::on(&root) .margin(10) .x_label_area_size(10) .y_label_area_size(40) .build_ranged(oldest..newest, 0..70) .unwrap(); chart .configure_mesh() .line_style_1(&plotters::style::colors::WHITE.mix(0.5)) .line_style_2(&plotters::style::colors::BLACK) .y_labels(5) .y_label_style( plotters::style::TextStyle::from(("sans-serif", 20).into_font()).color(&WHITE), ) .draw() .unwrap(); chart .draw_series(LineSeries::new( data_pressure.iter().map(|x| (x.0, x.1 as i32)), ShapeStyle::from(&plotters::style::RGBColor(0, 137, 255)) .filled() .stroke_width(1), )) .unwrap(); drop(chart); drop(root); let rgba_image: RgbaImage = RgbImage::from_raw(780, 200, buffer).unwrap().convert(); let image_dimensions = rgba_image.dimensions(); let raw_image = glium::texture::RawImage2d::from_raw_rgba_reversed( &rgba_image.into_raw(), image_dimensions, ); let image_texture = glium::texture::Texture2d::new(&self.display.0, raw_image).unwrap(); let (w, h) = ( image_texture.get_width(), image_texture.get_height().unwrap(), ); let mut image_map = conrod_core::image::Map::new(); let image_id = image_map.insert(image_texture); // The `WidgetId` for our background and `Image` widgets. let ids = Ids::new(self.interface.widget_id_generator()); let ui = self.interface.set_widgets(); create_widgets(ui, ids, image_id, (w, h), &self.fonts, &machine_snapshot); image_map } fn start_telemetry(&self) -> Receiver<TelemetryMessage> { // Start gathering telemetry let (tx, rx): (Sender<TelemetryMessage>, Receiver<TelemetryMessage>) = std::sync::mpsc::channel(); std::thread::spawn(move || { telemetry::gather_telemetry(&APP_ARGS.port, tx, None); }); rx } // TODO: refactor, rename and relocate this fn add_pressure(&self, data: &mut DataPressure, new_point: u16) { let now = Local::now(); if !data.is_empty() { let last_point = data.last().unwrap(); let diff_between = now - last_point.0; if diff_between < *SERIAL_RECEIVE_CHUNK_TIME { return; } } let point = new_point / 10; data.insert(0, (now, point)); } // TODO: relocate this fn step_loop_telemetry( &mut self, rx: &Receiver<TelemetryMessage>, data: &mut DataPressure, ) -> Option<MachineStateSnapshot> { let mut machine_snapshot = None; loop { match rx.try_recv() { Ok(message) => { match message { // TODO: add more message types TelemetryMessage::DataSnapshot(snapshot) => { self.add_pressure(data, snapshot.pressure); } TelemetryMessage::MachineStateSnapshot(snapshot) => { machine_snapshot = Some(snapshot); } _ => {} } } Err(TryRecvError::Empty) => { break; } Err(TryRecvError::Disconnected) => { panic!("channel to serial port thread was closed"); } } } machine_snapshot } // TODO: relocate this fn step_loop_events(&mut self) -> HandleLoopOutcome { for event in self.event_loop.next(&mut self.events_loop) { // Use the `winit` backend feature to convert the winit event to a conrod one. if let Some(event) = support::convert_event(event.clone(), &self.display) { self.interface.handle_event(event); self.event_loop.needs_update(); } // Break from the loop upon `Escape` or closed window. if let glium::glutin::Event::WindowEvent { event, .. } = event.clone() { match event { glium::glutin::WindowEvent::CloseRequested | glium::glutin::WindowEvent::KeyboardInput { input: glium::glutin::KeyboardInput { virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape), .. }, .. } => { return HandleLoopOutcome::Break; } _ => (), } } } HandleLoopOutcome::Continue } #[allow(clippy::ptr_arg)] fn step_loop_refresh(&mut self, data: &DataPressure, snapshot: &MachineStateSnapshot) { let image_map = self.render(data, &snapshot); if let Some(primitives) = self.interface.draw_if_changed() { self.renderer.fill(&self.display.0, primitives, &image_map); let mut target = self.display.0.draw(); target.clear_color(0.0, 0.0, 0.0, 1.0); self.renderer .draw(&self.display.0, &mut target, &image_map) .unwrap(); target.finish().unwrap(); } } }
32.652778
97
0.549872
148b1f8f69c41c2a65fb8470a27020db954a0e63
6,802
//! ibc-proto library gives the developer access to the Cosmos SDK IBC proto-defined structs. // Todo: automate the creation of this module setup based on the dots in the filenames. // This module setup is necessary because the generated code contains "super::" calls for dependencies. #![cfg_attr(not(feature = "std"), no_std)] #![deny(warnings, trivial_casts, trivial_numeric_casts, unused_import_braces)] #![allow(clippy::large_enum_variant)] #![allow(rustdoc::bare_urls)] #![forbid(unsafe_code)] #![doc(html_root_url = "https://docs.rs/ibc-proto/0.16.0")] extern crate alloc; #[cfg(not(feature = "std"))] #[macro_use] extern crate core as std; macro_rules! include_proto { ($path:literal) => { include!(concat!("prost/", $path)); }; } /// The version (commit hash) of the Cosmos SDK used when generating this library. pub const COSMOS_SDK_COMMIT: &str = include_str!("COSMOS_SDK_COMMIT"); /// The version (commit hash) of IBC Go used when generating this library. pub const IBC_GO_COMMIT: &str = include_str!("IBC_GO_COMMIT"); pub mod cosmos { pub mod auth { pub mod v1beta1 { include_proto!("cosmos.auth.v1beta1.rs"); /// EthAccount defines an Ethermint account. /// TODO: remove when/if a canonical `EthAccount` /// lands in the next Cosmos SDK release /// (note https://github.com/cosmos/cosmos-sdk/pull/9981 /// only adds the PubKey type) #[derive(Clone, PartialEq, ::prost::Message)] pub struct EthAccount { #[prost(message, optional, tag = "1")] pub base_account: ::core::option::Option<BaseAccount>, #[prost(bytes = "vec", tag = "2")] pub code_hash: ::prost::alloc::vec::Vec<u8>, } } } pub mod staking { pub mod v1beta1 { include_proto!("cosmos.staking.v1beta1.rs"); } } pub mod base { pub mod abci { pub mod v1beta1 { include_proto!("cosmos.base.abci.v1beta1.rs"); } } pub mod kv { pub mod v1beta1 { include_proto!("cosmos.base.kv.v1beta1.rs"); } } pub mod query { pub mod v1beta1 { include_proto!("cosmos.base.query.v1beta1.rs"); } pub mod pagination { use super::v1beta1::PageRequest; pub fn all() -> Option<PageRequest> { Some(PageRequest { limit: u64::MAX, ..Default::default() }) } } } pub mod reflection { pub mod v1beta1 { include_proto!("cosmos.base.reflection.v1beta1.rs"); } } pub mod store { pub mod v1beta1 { include_proto!("cosmos.base.store.v1beta1.rs"); } } pub mod v1beta1 { include_proto!("cosmos.base.v1beta1.rs"); } pub mod tendermint { pub mod v1beta1 { include_proto!("cosmos.base.tendermint.v1beta1.rs"); } } } pub mod crypto { pub mod multisig { pub mod v1beta1 { include_proto!("cosmos.crypto.multisig.v1beta1.rs"); } } } pub mod tx { pub mod signing { pub mod v1beta1 { include_proto!("cosmos.tx.signing.v1beta1.rs"); } } pub mod v1beta1 { include_proto!("cosmos.tx.v1beta1.rs"); } } pub mod upgrade { pub mod v1beta1 { include_proto!("cosmos.upgrade.v1beta1.rs"); } } pub mod gov { pub mod v1beta1 { include_proto!("cosmos.gov.v1beta1.rs"); } } } pub mod ibc { #[deprecated(since = "0.15.0", note = "Use `ibc_proto::ibc::applications` instead")] pub mod apps { pub use super::applications::*; } pub mod applications { pub mod transfer { pub mod v1 { include_proto!("ibc.applications.transfer.v1.rs"); } } pub mod interchain_accounts { pub mod v1 { include_proto!("ibc.applications.interchain_accounts.v1.rs"); } pub mod controller { pub mod v1 { include_proto!("ibc.applications.interchain_accounts.controller.v1.rs"); } } pub mod host { pub mod v1 { include_proto!("ibc.applications.interchain_accounts.host.v1.rs"); } } } } pub mod core { pub mod channel { pub mod v1 { include_proto!("ibc.core.channel.v1.rs"); } } pub mod client { pub mod v1 { include_proto!("ibc.core.client.v1.rs"); } } pub mod commitment { pub mod v1 { include_proto!("ibc.core.commitment.v1.rs"); } } pub mod connection { pub mod v1 { include_proto!("ibc.core.connection.v1.rs"); } } pub mod types { pub mod v1 { include_proto!("ibc.core.types.v1.rs"); } } } pub mod lightclients { pub mod localhost { pub mod v1 { include_proto!("ibc.lightclients.localhost.v1.rs"); } } pub mod solomachine { pub mod v1 { include_proto!("ibc.lightclients.solomachine.v1.rs"); } } pub mod tendermint { pub mod v1 { include_proto!("ibc.lightclients.tendermint.v1.rs"); } } } pub mod mock { include_proto!("ibc.mock.rs"); } } pub mod ics23 { include_proto!("ics23.rs"); } pub(crate) mod base64 { use alloc::string::String; use alloc::vec::Vec; use serde::{Deserialize, Deserializer, Serialize, Serializer}; pub fn serialize<S: Serializer>(v: &[u8], serializer: S) -> Result<S::Ok, S::Error> { let mut buf = String::new(); base64::encode_config_buf(v, base64::STANDARD, &mut buf); String::serialize(&buf, serializer) } pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result<Vec<u8>, D::Error> { let base64 = String::deserialize(deserializer)?; let mut buf = Vec::new(); base64::decode_config_buf(base64.as_bytes(), base64::STANDARD, &mut buf) .map_err(serde::de::Error::custom)?; Ok(buf) } }
29.318966
103
0.513378
03f5534fb64c95cdc81b4e3b6a343fda05bc0c81
574
use modelfox_ui as ui; use pinwheel::prelude::*; pub struct ClassSelectField { pub class: String, pub classes: Vec<String>, } impl Component for ClassSelectField { fn into_node(self) -> Node { let options = self .classes .iter() .map(|class_name| ui::SelectFieldOption { text: class_name.clone(), value: class_name.clone(), }) .collect::<Vec<_>>(); ui::SelectField::new() .id("class_select_field".to_owned()) .label("Select Class".to_owned()) .name("class".to_owned()) .options(options) .value(self.class) .into_node() } }
20.5
44
0.651568
11605de563c86b10482aa9cd66352de7e032fa38
4,300
use cranelift_entity::entity_impl; use cranelift_entity::EntityRef; #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct RegBankIndex(u32); entity_impl!(RegBankIndex); pub struct RegBank { pub name: &'static str, pub first_unit: u8, pub units: u8, pub names: Vec<&'static str>, pub prefix: &'static str, pub pressure_tracking: bool, pub toprcs: Vec<RegClassIndex>, pub classes: Vec<RegClassIndex>, } impl RegBank { pub fn new( name: &'static str, first_unit: u8, units: u8, names: Vec<&'static str>, prefix: &'static str, pressure_tracking: bool, ) -> Self { RegBank { name, first_unit, units, names, prefix, pressure_tracking, toprcs: Vec::new(), classes: Vec::new(), } } } #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct RegClassIndex(u32); entity_impl!(RegClassIndex); pub struct RegClass { pub name: &'static str, pub index: RegClassIndex, pub width: u8, pub bank: RegBankIndex, pub toprc: RegClassIndex, pub count: u8, pub start: u8, pub subclasses: Vec<RegClassIndex>, } impl RegClass { pub fn new( name: &'static str, index: RegClassIndex, width: u8, bank: RegBankIndex, toprc: RegClassIndex, count: u8, start: u8, ) -> Self { Self { name, index, width, bank, toprc, count, start, subclasses: Vec::new(), } } /// Compute a bit-mask of subclasses, including self. pub fn subclass_mask(&self) -> u64 { let mut m = 1 << self.index.index(); for rc in self.subclasses.iter() { m |= 1 << rc.index(); } m } /// Compute a bit-mask of the register units allocated by this register class. pub fn mask(&self, bank_first_unit: u8) -> Vec<u32> { let mut u = (self.start + bank_first_unit) as usize; let mut out_mask = vec![0, 0, 0]; for _ in 0..self.count { out_mask[u / 32] |= 1 << (u % 32); u += self.width as usize; } out_mask } } pub enum RegClassProto { TopLevel(RegBankIndex), SubClass(RegClassIndex), } pub struct RegClassBuilder { pub name: &'static str, pub width: u8, pub count: u8, pub start: u8, pub proto: RegClassProto, } impl RegClassBuilder { pub fn new_toplevel(name: &'static str, bank: RegBankIndex) -> Self { Self { name, width: 1, count: 0, start: 0, proto: RegClassProto::TopLevel(bank), } } pub fn subclass_of( name: &'static str, parent_index: RegClassIndex, start: u8, stop: u8, ) -> Self { assert!(stop >= start); Self { name, width: 0, count: stop - start, start: start, proto: RegClassProto::SubClass(parent_index), } } pub fn count(mut self, count: u8) -> Self { self.count = count; self } pub fn width(mut self, width: u8) -> Self { match self.proto { RegClassProto::TopLevel(_) => self.width = width, RegClassProto::SubClass(_) => panic!("Subclasses inherit their parent's width."), } self } } pub struct RegBankBuilder { pub name: &'static str, pub units: u8, pub names: Vec<&'static str>, pub prefix: &'static str, pub pressure_tracking: Option<bool>, } impl RegBankBuilder { pub fn new(name: &'static str, prefix: &'static str) -> Self { Self { name, units: 0, names: vec![], prefix, pressure_tracking: None, } } pub fn units(mut self, units: u8) -> Self { self.units = units; self } pub fn names(mut self, names: Vec<&'static str>) -> Self { self.names = names; self } pub fn track_pressure(mut self, track: bool) -> Self { self.pressure_tracking = Some(track); self } }
23.756906
93
0.528605
16a90f83a135a40bad9094cc3a4e1dfd8dd8f9f3
3,035
use crate::Envelope; #[derive(Debug, Copy, Clone)] enum Stage { Attack, Decay, Sustain, Release, None, } #[derive(Debug, Copy, Clone)] pub struct PulseModulator { clock: f32, amplitude: f32, envelope: Envelope, sample_rate: f32, stage: Stage, active: bool, // These are the coefficients from // the envelope function att_coef: f32, dec_coef: f32, rel_coef: f32, } impl PulseModulator { pub fn new(envelope: Envelope, sample_rate: f32) -> Self { let mut pulse_modulator = Self { clock: 0.0, amplitude: 0.0, envelope, sample_rate, stage: Stage::None, active: false, att_coef: 0.0, dec_coef: 0.0, rel_coef: 0.0, }; pulse_modulator.calc_envelope_coef(); return pulse_modulator; } pub fn set_envelope(&mut self, envelope: Envelope) { self.envelope = envelope; self.calc_envelope_coef(); } fn calc_envelope_coef(&mut self) { self.att_coef = 1.0 / (self.sample_rate * self.envelope.attack); self.dec_coef = (1.0 - self.envelope.sustain) / (self.sample_rate * self.envelope.decay); self.rel_coef = self.envelope.sustain / (self.sample_rate * self.envelope.release); } pub fn start(&mut self) { self.clock = 0.0; self.stage = Stage::Attack; self.active = true; } pub fn stop(&mut self) { self.active = false; } pub fn next(&mut self) -> f32 { self.clock += 1.0; match self.stage { Stage::Attack => { self.amplitude += self.att_coef; if self.amplitude > 1.0 { self.amplitude = 1.0; self.stage = Stage::Decay; } }, Stage::Decay => { self.amplitude -= self.dec_coef; if self.amplitude <= self.envelope.sustain { self.amplitude = self.envelope.sustain; self.stage = Stage::Sustain; } }, Stage::Sustain => { if !self.active { self.stage = Stage::Release; } }, Stage::Release => { self.amplitude -= self.rel_coef; if self.amplitude < 0.0 { self.amplitude = 0.0; self.stage = Stage::None; } }, Stage::None => {}, }; return self.amplitude; } } #[cfg(test)] mod tests { use super::*; const ENVELOPE: Envelope = Envelope { attack: 1.0, decay: 0.2, sustain: 0.5, release: 2.0, }; #[test] fn calculates_attack_coef() { let p_m = PulseModulator::new(ENVELOPE, 10.0); assert_eq!(p_m.att_coef, 0.1); } #[test] fn calculates_decay_coef() { let p_m = PulseModulator::new(ENVELOPE, 10.0); assert_eq!(p_m.dec_coef, 0.25); } #[test] fn calculates_release_coef() { let p_m = PulseModulator::new(ENVELOPE, 10.0); assert_eq!(p_m.rel_coef, 0.025); } }
24.475806
95
0.536738
f5e615e392f52f4c32075eb1cf156ed6415db086
5,205
// Copyright (C) 2017-2018 Baidu, Inc. All Rights Reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in // the documentation and/or other materials provided with the // distribution. // * Neither the name of Baidu, Inc., nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. extern crate sgx_types; extern crate sgx_urts; extern crate dirs; use sgx_types::*; use sgx_urts::SgxEnclave; use std::io::{Read, Write}; use std::fs; use std::path; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; static ENCLAVE_TOKEN: &'static str = "enclave.token"; extern { fn sample_main (eid: sgx_enclave_id_t, retval: *mut sgx_status_t) -> sgx_status_t; } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // Step 1: try to retrieve the launch token saved by last transaction // if there is no token, then create a new one. // // try to get the token saved in $HOME */ let mut home_dir = path::PathBuf::new(); let use_token = match dirs::home_dir() { Some(path) => { println!("[+] Home dir is {}", path.display()); home_dir = path; true }, None => { println!("[-] Cannot get home dir"); false } }; let token_file: path::PathBuf = home_dir.join(ENCLAVE_TOKEN);; if use_token == true { match fs::File::open(&token_file) { Err(_) => { println!("[-] Open token file {} error! Will create one.", token_file.as_path().to_str().unwrap()); }, Ok(mut f) => { println!("[+] Open token file success! "); match f.read(&mut launch_token) { Ok(1024) => { println!("[+] Token file valid!"); }, _ => println!("[+] Token file invalid, will create new token file"), } } } } // Step 2: call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = 1; let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0}; let enclave = try!(SgxEnclave::create(ENCLAVE_FILE, debug, &mut launch_token, &mut launch_token_updated, &mut misc_attr)); // Step 3: save the launch token if it is updated if use_token == true && launch_token_updated != 0 { // reopen the file with write capablity match fs::File::create(&token_file) { Ok(mut f) => { match f.write_all(&launch_token) { Ok(()) => println!("[+] Saved updated launch token!"), Err(_) => println!("[-] Failed to save updated launch token!"), } }, Err(_) => { println!("[-] Failed to save updated enclave token, but doesn't matter"); }, } } Ok(enclave) } fn main() { let enclave = match init_enclave() { Ok(r) => { println!("[+] Init Enclave Successful {}!", r.geteid()); r }, Err(x) => { println!("[-] Init Enclave Failed {}!", x.as_str()); return; }, }; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { sample_main(enclave.geteid(), &mut retval) }; match result { sgx_status_t::SGX_SUCCESS => {}, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); return; } } println!("[+] say_something success..."); enclave.destroy(); }
35.650685
115
0.579059
29fee55caf80b70cf4a727399843e9a2a6365ddc
16,488
#[cfg(test)] #[path = "../../../tests/unit/construction/heuristics/evaluators_test.rs"] mod evaluators_test; use std::sync::Arc; use crate::construction::constraints::{ActivityConstraintViolation, ConstraintPipeline}; use crate::construction::heuristics::*; use crate::models::common::Cost; use crate::models::problem::{Job, Multi, Single}; use crate::models::solution::{Activity, Leg, Place}; use crate::utils::Either; use rosomaxa::utils::unwrap_from_result; use std::iter::repeat; /// Specifies an evaluation context data. pub struct EvaluationContext<'a> { /// An actual constraint. pub constraint: &'a ConstraintPipeline, /// A job which is about to be inserted. pub job: &'a Job, /// A leg selector. pub leg_selector: &'a (dyn LegSelector + Send + Sync), /// A result selector. pub result_selector: &'a (dyn ResultSelector + Send + Sync), } /// Specifies allowed insertion position in route for the job. #[derive(Copy, Clone)] pub enum InsertionPosition { /// Job can be inserted anywhere in the route. Any, /// Job can be inserted only at the leg with the concrete index. Concrete(usize), /// Job can be inserted only to the end of the route. Last, } /// Evaluates possibility to preform insertion from given insertion context in given route /// at given position constraint. pub fn evaluate_job_insertion_in_route( insertion_ctx: &InsertionContext, eval_ctx: &EvaluationContext, route_ctx: &RouteContext, position: InsertionPosition, alternative: InsertionResult, ) -> InsertionResult { // NOTE do not evaluate unassigned job in unmodified route if it has a positive code match (route_ctx.is_stale(), insertion_ctx.solution.unassigned.get(eval_ctx.job)) { (false, Some(code)) if *code > 0 => return alternative, _ => {} } let constraint = &insertion_ctx.problem.constraint; if let Some(violation) = constraint.evaluate_hard_route(&insertion_ctx.solution, route_ctx, eval_ctx.job) { return eval_ctx.result_selector.select_insertion( insertion_ctx, alternative, InsertionResult::make_failure_with_code(violation.code, true, Some(eval_ctx.job.clone())), ); } let route_costs = constraint.evaluate_soft_route(&insertion_ctx.solution, route_ctx, eval_ctx.job); let best_known_cost = match &alternative { InsertionResult::Success(success) => Some(success.cost), _ => None, }; if let Some(best_known_cost) = best_known_cost { if best_known_cost < route_costs { return alternative; } } eval_ctx.result_selector.select_insertion( insertion_ctx, alternative, evaluate_job_constraint_in_route(eval_ctx, route_ctx, position, route_costs, best_known_cost), ) } /// Evaluates possibility to preform insertion in route context only. /// NOTE: doesn't evaluate constraints on route level. pub fn evaluate_job_constraint_in_route( eval_ctx: &EvaluationContext, route_ctx: &RouteContext, position: InsertionPosition, route_costs: Cost, best_known_cost: Option<Cost>, ) -> InsertionResult { match eval_ctx.job { Job::Single(single) => evaluate_single(eval_ctx, route_ctx, single, position, route_costs, best_known_cost), Job::Multi(multi) => evaluate_multi(eval_ctx, route_ctx, multi, position, route_costs, best_known_cost), } } pub(crate) fn evaluate_single_constraint_in_route( insertion_ctx: &InsertionContext, eval_ctx: &EvaluationContext, route_ctx: &RouteContext, single: &Arc<Single>, position: InsertionPosition, route_costs: Cost, best_known_cost: Option<Cost>, ) -> InsertionResult { if let Some(violation) = eval_ctx.constraint.evaluate_hard_route(&insertion_ctx.solution, route_ctx, eval_ctx.job) { InsertionResult::Failure(InsertionFailure { constraint: violation.code, stopped: true, job: Some(eval_ctx.job.clone()), }) } else { evaluate_single(eval_ctx, route_ctx, single, position, route_costs, best_known_cost) } } fn evaluate_single( eval_ctx: &EvaluationContext, route_ctx: &RouteContext, single: &Arc<Single>, position: InsertionPosition, route_costs: Cost, best_known_cost: Option<Cost>, ) -> InsertionResult { let insertion_idx = get_insertion_index(route_ctx, position); let mut activity = Activity::new_with_job(single.clone()); let result = analyze_insertion_in_route( eval_ctx, route_ctx, insertion_idx, single, &mut activity, SingleContext::new(best_known_cost, 0), ); let job = eval_ctx.job.clone(); if result.is_success() { activity.place = result.place.unwrap(); let activities = vec![(activity, result.index)]; InsertionResult::make_success(result.cost.unwrap() + route_costs, job, activities, route_ctx.clone()) } else { let (code, stopped) = result.violation.map_or((0, false), |v| (v.code, v.stopped)); InsertionResult::make_failure_with_code(code, stopped, Some(job)) } } fn evaluate_multi( eval_ctx: &EvaluationContext, route_ctx: &RouteContext, multi: &Arc<Multi>, position: InsertionPosition, route_costs: Cost, best_known_cost: Option<Cost>, ) -> InsertionResult { let insertion_idx = get_insertion_index(route_ctx, position).unwrap_or(0); // 1. analyze permutations let result = unwrap_from_result(multi.permutations().into_iter().try_fold( MultiContext::new(best_known_cost, insertion_idx), |acc_res, services| { let mut shadow = ShadowContext::new(eval_ctx.constraint, route_ctx); let perm_res = unwrap_from_result(repeat(0).try_fold(MultiContext::new(None, insertion_idx), |out, _| { if out.is_failure(route_ctx.route.tour.job_activity_count()) { return Result::Err(out); } shadow.restore(route_ctx); // 2. analyze inner jobs let sq_res = unwrap_from_result(services.iter().try_fold(out.next(), |in1, service| { if in1.violation.is_some() { return Result::Err(in1); } let mut activity = Activity::new_with_job(service.clone()); // 3. analyze legs let srv_res = analyze_insertion_in_route( eval_ctx, &shadow.ctx, None, service, &mut activity, SingleContext::new(None, in1.next_index), ); if srv_res.is_success() { activity.place = srv_res.place.unwrap(); let activity = shadow.insert(activity, srv_res.index); let activities = concat_activities(in1.activities, (activity, srv_res.index)); return MultiContext::success(in1.cost.unwrap_or(0.) + srv_res.cost.unwrap(), activities); } MultiContext::fail(srv_res, in1) })); MultiContext::promote(sq_res, out) })); MultiContext::promote(perm_res, acc_res) }, )); let job = eval_ctx.job.clone(); if result.is_success() { let activities = result.activities.unwrap(); InsertionResult::make_success(result.cost.unwrap() + route_costs, job, activities, route_ctx.clone()) } else { let (code, stopped) = result.violation.map_or((0, false), |v| (v.code, v.stopped)); InsertionResult::make_failure_with_code(code, stopped, Some(job)) } } fn analyze_insertion_in_route( eval_ctx: &EvaluationContext, route_ctx: &RouteContext, insertion_idx: Option<usize>, single: &Single, target: &mut Activity, init: SingleContext, ) -> SingleContext { unwrap_from_result(match insertion_idx { Some(idx) => { if let Some(leg) = route_ctx.route.tour.legs().nth(idx) { analyze_insertion_in_route_leg(eval_ctx, route_ctx, leg, single, target, init) } else { Ok(init) } } None => eval_ctx .leg_selector .get_legs(route_ctx, eval_ctx.job, init.index) .try_fold(init, |out, leg| analyze_insertion_in_route_leg(eval_ctx, route_ctx, leg, single, target, out)), }) } fn analyze_insertion_in_route_leg( eval_ctx: &EvaluationContext, route_ctx: &RouteContext, leg: Leg, single: &Single, target: &mut Activity, out: SingleContext, ) -> Result<SingleContext, SingleContext> { let (items, index) = leg; let (prev, next) = match items { [prev] => (prev, None), [prev, next] => (prev, Some(next)), _ => panic!("Unexpected route leg configuration."), }; let start_time = route_ctx.route.tour.start().unwrap().schedule.departure; // analyze service details single.places.iter().try_fold(out, |in1, detail| { // analyze detail time windows detail.times.iter().try_fold(in1, |in2, time| { target.place = Place { location: detail.location.unwrap_or(prev.place.location), duration: detail.duration, time: time.to_time_window(start_time), }; let activity_ctx = ActivityContext { index, prev, target, next }; if let Some(violation) = eval_ctx.constraint.evaluate_hard_activity(route_ctx, &activity_ctx) { return SingleContext::fail(violation, in2); } let costs = eval_ctx.constraint.evaluate_soft_activity(route_ctx, &activity_ctx); let other_costs = in2.cost.unwrap_or(f64::MAX); match eval_ctx.result_selector.select_cost(route_ctx, costs, other_costs) { Either::Left(_) => SingleContext::success(activity_ctx.index, costs, target.place.clone()), Either::Right(_) => SingleContext::skip(in2), } }) }) } fn get_insertion_index(route_ctx: &RouteContext, position: InsertionPosition) -> Option<usize> { match position { InsertionPosition::Any => None, InsertionPosition::Concrete(idx) => Some(idx), InsertionPosition::Last => Some(route_ctx.route.tour.legs().count().max(1) - 1), } } /// Stores information needed for single insertion. #[derive(Debug)] struct SingleContext { /// Constraint violation. pub violation: Option<ActivityConstraintViolation>, /// Insertion index. pub index: usize, /// Best cost. pub cost: Option<Cost>, /// Activity place. pub place: Option<Place>, } impl SingleContext { /// Creates a new empty context with given cost. fn new(cost: Option<Cost>, index: usize) -> Self { Self { violation: None, index, cost, place: None } } fn fail(violation: ActivityConstraintViolation, other: SingleContext) -> Result<Self, Self> { let stopped = violation.stopped; let ctx = Self { violation: Some(violation), index: other.index, cost: other.cost, place: other.place }; if stopped { Result::Err(ctx) } else { Result::Ok(ctx) } } #[allow(clippy::unnecessary_wraps)] fn success(index: usize, cost: Cost, place: Place) -> Result<Self, Self> { Result::Ok(Self { violation: None, index, cost: Some(cost), place: Some(place) }) } #[allow(clippy::unnecessary_wraps)] fn skip(other: SingleContext) -> Result<Self, Self> { Result::Ok(other) } fn is_success(&self) -> bool { self.place.is_some() } } /// Stores information needed for multi job insertion. struct MultiContext { /// Constraint violation. pub violation: Option<ActivityConstraintViolation>, /// Insertion index for first service. pub start_index: usize, /// Insertion index for next service. pub next_index: usize, /// Cost accumulator. pub cost: Option<Cost>, /// Activities with their indices. pub activities: Option<Vec<(Activity, usize)>>, } impl MultiContext { /// Creates new empty insertion context. fn new(cost: Option<Cost>, index: usize) -> Self { Self { violation: None, start_index: index, next_index: index, cost, activities: None } } /// Promotes insertion context by best price. fn promote(left: Self, right: Self) -> Result<Self, Self> { let index = left.start_index.max(right.start_index) + 1; let best = match (left.cost, right.cost) { (Some(left_cost), Some(right_cost)) => { if left_cost < right_cost { left } else { right } } (Some(_), None) => left, (None, Some(_)) => right, _ => { if left.violation.is_some() { left } else { right } } }; let result = Self { violation: best.violation, start_index: index, next_index: index, cost: best.cost, activities: best.activities, }; if result.violation.as_ref().map_or_else(|| false, |v| v.stopped) { Result::Err(result) } else { Result::Ok(result) } } /// Creates failed insertion context within reason code. fn fail(err_ctx: SingleContext, other_ctx: MultiContext) -> Result<Self, Self> { let (code, stopped) = err_ctx.violation.map_or((0, false), |v| (v.code, v.stopped && other_ctx.activities.is_none())); Result::Err(Self { violation: Some(ActivityConstraintViolation { code, stopped }), start_index: other_ctx.start_index, next_index: other_ctx.start_index, cost: None, activities: None, }) } /// Creates successful insertion context. #[allow(clippy::unnecessary_wraps)] fn success(cost: Cost, activities: Vec<(Activity, usize)>) -> Result<Self, Self> { Result::Ok(Self { violation: None, start_index: activities.first().unwrap().1, next_index: activities.last().unwrap().1 + 1, cost: Some(cost), activities: Some(activities), }) } /// Creates next insertion context from existing one. fn next(&self) -> Self { Self { violation: None, start_index: self.start_index, next_index: self.start_index, cost: None, activities: None, } } /// Checks whether insertion is found. fn is_success(&self) -> bool { self.violation.is_none() && self.cost.is_some() && self.activities.is_some() } /// Checks whether insertion is failed. fn is_failure(&self, index: usize) -> bool { self.violation.as_ref().map_or(false, |v| v.stopped) || (self.start_index > index) } } /// Provides the way to use copy on write strategy within route state context. struct ShadowContext<'a> { is_mutated: bool, is_dirty: bool, constraint: &'a ConstraintPipeline, ctx: RouteContext, } impl<'a> ShadowContext<'a> { fn new(constraint: &'a ConstraintPipeline, ctx: &RouteContext) -> Self { Self { is_mutated: false, is_dirty: false, constraint, ctx: ctx.clone() } } fn insert(&mut self, activity: Activity, index: usize) -> Activity { if !self.is_mutated { self.ctx = self.ctx.deep_copy(); self.is_mutated = true; } self.ctx.route_mut().tour.insert_at(activity.deep_copy(), index + 1); self.constraint.accept_route_state(&mut self.ctx); self.is_dirty = true; activity } fn restore(&mut self, original: &RouteContext) { if self.is_dirty { self.ctx = original.clone(); self.is_mutated = false; self.is_dirty = false; } } } fn concat_activities( activities: Option<Vec<(Activity, usize)>>, activity: (Activity, usize), ) -> Vec<(Activity, usize)> { let mut activities = activities.unwrap_or_default(); activities.push((activity.0, activity.1)); activities }
34.493724
120
0.613355
87ff91cf093fad48a381ff3b6fec9a096ab23bb3
13,556
#![allow(dead_code)] use criterion::{black_box, criterion_group, criterion_main, Criterion}; use mpstthree::binary::struct_trait::{end::End, recv::Recv, send::Send, session::Session}; use mpstthree::role::broadcast::RoleBroadcast; use mpstthree::role::end::RoleEnd; use mpstthree::{ bundle_struct_fork_close_multi_cancel, create_fn_choose_mpst_multi_to_all_bundle, create_multiple_normal_role_short, create_recv_mpst_session_bundle, create_send_mpst_cancel_bundle, offer_mpst, }; use std::error::Error; use std::time::Duration; // Create the new MeshedChannels for six participants and the close and fork functions bundle_struct_fork_close_multi_cancel!(close_mpst_multi, fork_mpst, MeshedChannelsSix, 6); // Create new roles // normal create_multiple_normal_role_short!(A, B, C, D, E, F); // Create new send functions // A create_send_mpst_cancel_bundle!( send_mpst_a_to_b, RoleB, 1 | send_mpst_a_to_c, RoleC, 2 | send_mpst_a_to_d, RoleD, 3 | send_mpst_a_to_e, RoleE, 4 | send_mpst_a_to_f, RoleF, 5 | => RoleA, MeshedChannelsSix, 6 ); // B create_send_mpst_cancel_bundle!( send_mpst_b_to_a, RoleA, 1 | send_mpst_b_to_c, RoleC, 2 | send_mpst_b_to_d, RoleD, 3 | send_mpst_b_to_e, RoleE, 4 | send_mpst_b_to_f, RoleF, 5 | => RoleB, MeshedChannelsSix, 6 ); // C create_send_mpst_cancel_bundle!( send_mpst_c_to_a, RoleA, 1 | send_mpst_c_to_b, RoleB, 2 | send_mpst_c_to_d, RoleD, 3 | send_mpst_c_to_e, RoleE, 4 | send_mpst_c_to_f, RoleF, 5 | => RoleC, MeshedChannelsSix, 6 ); // D create_send_mpst_cancel_bundle!( send_mpst_d_to_a, RoleA, 1 | send_mpst_d_to_b, RoleB, 2 | send_mpst_d_to_c, RoleC, 3 | send_mpst_d_to_e, RoleE, 4 | send_mpst_d_to_f, RoleF, 5 | => RoleD, MeshedChannelsSix, 6 ); // E create_send_mpst_cancel_bundle!( send_mpst_e_to_a, RoleA, 1 | send_mpst_e_to_b, RoleB, 2 | send_mpst_e_to_c, RoleC, 3 | send_mpst_e_to_d, RoleD, 4 | send_mpst_e_to_f, RoleF, 5 | => RoleE, MeshedChannelsSix, 6 ); // F create_send_mpst_cancel_bundle!( send_mpst_f_to_a, RoleA, 1 | send_mpst_f_to_b, RoleB, 2 | send_mpst_f_to_c, RoleC, 3 | send_mpst_f_to_d, RoleD, 4 | send_mpst_f_to_e, RoleE, 5 | => RoleF, MeshedChannelsSix, 6 ); // Create new recv functions and related types // A create_recv_mpst_session_bundle!( recv_mpst_a_from_b, RoleB, 1 | recv_mpst_a_from_c, RoleC, 2 | recv_mpst_a_from_d, RoleD, 3 | recv_mpst_a_from_e, RoleE, 4 | recv_mpst_a_from_f, RoleF, 5 | => RoleA, MeshedChannelsSix, 6 ); // B create_recv_mpst_session_bundle!( recv_mpst_b_from_a, RoleA, 1 | recv_mpst_b_from_c, RoleC, 2 | recv_mpst_b_from_d, RoleD, 3 | recv_mpst_b_from_e, RoleE, 4 | recv_mpst_b_from_f, RoleF, 5 | => RoleB, MeshedChannelsSix, 6 ); // C create_recv_mpst_session_bundle!( recv_mpst_c_from_a, RoleA, 1 | recv_mpst_c_from_b, RoleB, 2 | recv_mpst_c_from_d, RoleD, 3 | recv_mpst_c_from_e, RoleE, 4 | recv_mpst_c_from_f, RoleF, 5 | => RoleC, MeshedChannelsSix, 6 ); // D create_recv_mpst_session_bundle!( recv_mpst_d_from_a, RoleA, 1 | recv_mpst_d_from_b, RoleB, 2 | recv_mpst_d_from_c, RoleC, 3 | recv_mpst_d_from_e, RoleE, 4 | recv_mpst_d_from_f, RoleF, 5 | => RoleD, MeshedChannelsSix, 6 ); // E create_recv_mpst_session_bundle!( recv_mpst_e_from_a, RoleA, 1 | recv_mpst_e_from_b, RoleB, 2 | recv_mpst_e_from_c, RoleC, 3 | recv_mpst_e_from_d, RoleD, 4 | recv_mpst_e_from_f, RoleF, 5 | => RoleE, MeshedChannelsSix, 6 ); // F create_recv_mpst_session_bundle!( recv_mpst_f_from_a, RoleA, 1 | recv_mpst_f_from_b, RoleB, 2 | recv_mpst_f_from_c, RoleC, 3 | recv_mpst_f_from_d, RoleD, 4 | recv_mpst_f_from_e, RoleE, 5 | => RoleF, MeshedChannelsSix, 6 ); // Names type NameA = RoleA<RoleEnd>; type NameB = RoleB<RoleEnd>; type NameC = RoleC<RoleEnd>; type NameD = RoleD<RoleEnd>; type NameE = RoleE<RoleEnd>; type NameF = RoleF<RoleEnd>; // Types // Send/Recv type RS = Recv<(), Send<(), End>>; type SR = Send<(), Recv<(), End>>; // Roles type R2A<R> = RoleA<RoleA<R>>; type R2B<R> = RoleB<RoleB<R>>; type R2C<R> = RoleC<RoleC<R>>; type R2D<R> = RoleD<RoleD<R>>; type R2E<R> = RoleE<RoleE<R>>; type R2F<R> = RoleF<RoleF<R>>; // A enum Branching0fromFtoA { More( MeshedChannelsSix< RS, RS, RS, RS, Recv<(), Send<(), RecursAtoF>>, R2F<R2B<R2C<R2D<R2E<RoleF<RoleEnd>>>>>>, NameA, >, ), Done(MeshedChannelsSix<End, End, End, End, End, RoleEnd, NameA>), } type RecursAtoF = <Choose0fromFtoA as Session>::Dual; // B enum Branching0fromFtoB { More( MeshedChannelsSix< SR, RS, RS, RS, Recv<(), Send<(), RecursBtoF>>, R2F<R2A<R2C<R2D<R2E<RoleF<RoleEnd>>>>>>, NameB, >, ), Done(MeshedChannelsSix<End, End, End, End, End, RoleEnd, NameB>), } type RecursBtoF = <Choose0fromFtoB as Session>::Dual; // C enum Branching0fromFtoC { More( MeshedChannelsSix< SR, SR, RS, RS, Recv<(), Send<(), RecursCtoF>>, R2F<R2A<R2B<R2D<R2E<RoleF<RoleEnd>>>>>>, NameC, >, ), Done(MeshedChannelsSix<End, End, End, End, End, RoleEnd, NameC>), } type RecursCtoF = <Choose0fromFtoC as Session>::Dual; // D enum Branching0fromFtoD { More( MeshedChannelsSix< SR, SR, SR, RS, Recv<(), Send<(), RecursDtoF>>, R2F<R2A<R2B<R2C<R2E<RoleF<RoleEnd>>>>>>, NameD, >, ), Done(MeshedChannelsSix<End, End, End, End, End, RoleEnd, NameD>), } type RecursDtoF = <Choose0fromFtoD as Session>::Dual; // E enum Branching0fromFtoE { More( MeshedChannelsSix< SR, SR, SR, SR, Recv<(), Send<(), RecursEtoF>>, R2F<R2A<R2B<R2C<R2D<RoleF<RoleEnd>>>>>>, NameE, >, ), Done(MeshedChannelsSix<End, End, End, End, End, RoleEnd, NameE>), } type RecursEtoF = <Choose0fromFtoE as Session>::Dual; // F type Choose0fromFtoA = Send<Branching0fromFtoA, End>; type Choose0fromFtoB = Send<Branching0fromFtoB, End>; type Choose0fromFtoC = Send<Branching0fromFtoC, End>; type Choose0fromFtoD = Send<Branching0fromFtoD, End>; type Choose0fromFtoE = Send<Branching0fromFtoE, End>; type EndpointDoneF = MeshedChannelsSix<End, End, End, End, End, RoleEnd, NameF>; type EndpointMoreF = MeshedChannelsSix< Send<(), Recv<(), Choose0fromFtoA>>, Send<(), Recv<(), Choose0fromFtoB>>, Send<(), Recv<(), Choose0fromFtoC>>, Send<(), Recv<(), Choose0fromFtoD>>, Send<(), Recv<(), Choose0fromFtoE>>, R2A<R2B<R2C<R2D<R2E<RoleBroadcast>>>>>, NameF, >; // Creating the MP sessions type EndpointA = MeshedChannelsSix<End, End, End, End, RecursAtoF, RoleF<RoleEnd>, NameA>; type EndpointB = MeshedChannelsSix<End, End, End, End, RecursBtoF, RoleF<RoleEnd>, NameB>; type EndpointC = MeshedChannelsSix<End, End, End, End, RecursCtoF, RoleF<RoleEnd>, NameC>; type EndpointD = MeshedChannelsSix<End, End, End, End, RecursDtoF, RoleF<RoleEnd>, NameD>; type EndpointE = MeshedChannelsSix<End, End, End, End, RecursEtoF, RoleF<RoleEnd>, NameE>; type EndpointF = MeshedChannelsSix< Choose0fromFtoA, Choose0fromFtoB, Choose0fromFtoC, Choose0fromFtoD, Choose0fromFtoE, RoleBroadcast, NameF, >; create_fn_choose_mpst_multi_to_all_bundle!( done_from_f_to_all, more_from_f_to_all, => Done, More, => EndpointDoneF, EndpointMoreF, => Branching0fromFtoA, Branching0fromFtoB, Branching0fromFtoC, Branching0fromFtoD, Branching0fromFtoE, => RoleA, RoleB, RoleC, RoleD, RoleE, => RoleF, MeshedChannelsSix, 6 ); fn endpoint_a(s: EndpointA) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_a_from_f, { Branching0fromFtoA::Done(s) => { close_mpst_multi(s) }, Branching0fromFtoA::More(s) => { let (_, s) = recv_mpst_a_from_f(s)?; let s = send_mpst_a_to_f((), s)?; let (_, s) = recv_mpst_a_from_b(s)?; let s = send_mpst_a_to_b((), s)?; let (_, s) = recv_mpst_a_from_c(s)?; let s = send_mpst_a_to_c((), s)?; let (_, s) = recv_mpst_a_from_d(s)?; let s = send_mpst_a_to_d((), s)?; let (_, s) = recv_mpst_a_from_e(s)?; let s = send_mpst_a_to_e((), s)?; endpoint_a(s) }, }) } fn endpoint_b(s: EndpointB) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_b_from_f, { Branching0fromFtoB::Done(s) => { close_mpst_multi(s) }, Branching0fromFtoB::More(s) => { let (_, s) = recv_mpst_b_from_f(s)?; let s = send_mpst_b_to_f((), s)?; let s = send_mpst_b_to_a((), s)?; let (_, s) = recv_mpst_b_from_a(s)?; let (_, s) = recv_mpst_b_from_c(s)?; let s = send_mpst_b_to_c((), s)?; let (_, s) = recv_mpst_b_from_d(s)?; let s = send_mpst_b_to_d((), s)?; let (_, s) = recv_mpst_b_from_e(s)?; let s = send_mpst_b_to_e((), s)?; endpoint_b(s) }, }) } fn endpoint_c(s: EndpointC) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_c_from_f, { Branching0fromFtoC::Done(s) => { close_mpst_multi(s) }, Branching0fromFtoC::More(s) => { let (_, s) = recv_mpst_c_from_f(s)?; let s = send_mpst_c_to_f((), s)?; let s = send_mpst_c_to_a((), s)?; let (_, s) = recv_mpst_c_from_a(s)?; let s = send_mpst_c_to_b((), s)?; let (_, s) = recv_mpst_c_from_b(s)?; let (_, s) = recv_mpst_c_from_d(s)?; let s = send_mpst_c_to_d((), s)?; let (_, s) = recv_mpst_c_from_e(s)?; let s = send_mpst_c_to_e((), s)?; endpoint_c(s) }, }) } fn endpoint_d(s: EndpointD) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_d_from_f, { Branching0fromFtoD::Done(s) => { close_mpst_multi(s) }, Branching0fromFtoD::More(s) => { let (_, s) = recv_mpst_d_from_f(s)?; let s = send_mpst_d_to_f((), s)?; let s = send_mpst_d_to_a((), s)?; let (_, s) = recv_mpst_d_from_a(s)?; let s = send_mpst_d_to_b((), s)?; let (_, s) = recv_mpst_d_from_b(s)?; let s = send_mpst_d_to_c((), s)?; let (_, s) = recv_mpst_d_from_c(s)?; let (_, s) = recv_mpst_d_from_e(s)?; let s = send_mpst_d_to_e((), s)?; endpoint_d(s) }, }) } fn endpoint_e(s: EndpointE) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_e_from_f, { Branching0fromFtoE::Done(s) => { close_mpst_multi(s) }, Branching0fromFtoE::More(s) => { let (_, s) = recv_mpst_e_from_f(s)?; let s = send_mpst_e_to_f((), s)?; let s = send_mpst_e_to_a((), s)?; let (_, s) = recv_mpst_e_from_a(s)?; let s = send_mpst_e_to_b((), s)?; let (_, s) = recv_mpst_e_from_b(s)?; let s = send_mpst_e_to_c((), s)?; let (_, s) = recv_mpst_e_from_c(s)?; let s = send_mpst_e_to_d((), s)?; let (_, s) = recv_mpst_e_from_d(s)?; endpoint_e(s) }, }) } fn endpoint_f(s: EndpointF) -> Result<(), Box<dyn Error>> { recurs_f(s, SIZE) } fn recurs_f(s: EndpointF, index: i64) -> Result<(), Box<dyn Error>> { match index { 0 => { let s = done_from_f_to_all(s); close_mpst_multi(s) } i => { let s = more_from_f_to_all(s); let s = send_mpst_f_to_a((), s)?; let (_, s) = recv_mpst_f_from_a(s)?; let s = send_mpst_f_to_b((), s)?; let (_, s) = recv_mpst_f_from_b(s)?; let s = send_mpst_f_to_c((), s)?; let (_, s) = recv_mpst_f_from_c(s)?; let s = send_mpst_f_to_d((), s)?; let (_, s) = recv_mpst_f_from_d(s)?; let s = send_mpst_f_to_e((), s)?; let (_, s) = recv_mpst_f_from_e(s)?; recurs_f(s, i - 1) } } } fn all_mpst() -> Result<(), Box<dyn std::any::Any + std::marker::Send>> { let (thread_a, thread_b, thread_c, thread_d, thread_e, thread_f) = fork_mpst( black_box(endpoint_a), black_box(endpoint_b), black_box(endpoint_c), black_box(endpoint_d), black_box(endpoint_e), black_box(endpoint_f), ); thread_a.join()?; thread_b.join()?; thread_c.join()?; thread_d.join()?; thread_e.join()?; thread_f.join()?; Ok(()) } ///////////////////////// static SIZE: i64 = 100; fn mesh_protocol_mpst(c: &mut Criterion) { c.bench_function(&format!("mesh six cancel protocol MPST {}", SIZE), |b| { b.iter(|| all_mpst()) }); } fn long_warmup() -> Criterion { Criterion::default().measurement_time(Duration::new(1800, 0)) } criterion_group! { name = mesh_six; // config = long_warmup(); config = Criterion::default().significance_level(0.1).sample_size(10100); targets = mesh_protocol_mpst } criterion_main!(mesh_six);
31.020595
138
0.594571
ac9e778b940f022bbf80c9f72477ec7f8c20271e
2,319
#![cfg(feature = "reqwest-backend")] use std::env::{remove_var, set_var}; use std::error::Error; use std::net::TcpListener; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; use std::thread; use std::time::Duration; use env_proxy::for_url; use lazy_static::lazy_static; use reqwest::{blocking::Client, Proxy}; use url::Url; lazy_static! { static ref SERIALISE_TESTS: Mutex<()> = Mutex::new(()); } fn scrub_env() { remove_var("http_proxy"); remove_var("https_proxy"); remove_var("HTTPS_PROXY"); remove_var("ftp_proxy"); remove_var("FTP_PROXY"); remove_var("all_proxy"); remove_var("ALL_PROXY"); remove_var("no_proxy"); remove_var("NO_PROXY"); } // Tests for correctly retrieving the proxy (host, port) tuple from $https_proxy #[test] fn read_basic_proxy_params() { let _guard = SERIALISE_TESTS .lock() .expect("Unable to lock the test guard"); scrub_env(); set_var("https_proxy", "http://proxy.example.com:8080"); let u = Url::parse("https://www.example.org").ok().unwrap(); assert_eq!( for_url(&u).host_port(), Some(("proxy.example.com".to_string(), 8080)) ); } // Tests to verify if socks feature is available and being used #[test] fn socks_proxy_request() { static CALL_COUNT: AtomicUsize = AtomicUsize::new(0); let _guard = SERIALISE_TESTS .lock() .expect("Unable to lock the test guard"); scrub_env(); set_var("all_proxy", "socks5://127.0.0.1:1080"); thread::spawn(move || { let listener = TcpListener::bind("127.0.0.1:1080").unwrap(); let incoming = listener.incoming(); for _ in incoming { CALL_COUNT.fetch_add(1, Ordering::SeqCst); } }); let env_proxy = |url: &Url| for_url(&url).to_url(); let url = Url::parse("http://192.168.0.1/").unwrap(); let client = Client::builder() .proxy(Proxy::custom(env_proxy)) .timeout(Duration::from_secs(1)) .build() .unwrap(); let res = client.get(url.as_str()).send(); if let Err(e) = res { let s = e.source().unwrap(); assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1); assert!(s.to_string().contains("socks connect error")); } else { panic!("Socks proxy was ignored") } }
27.607143
80
0.620526
0131563d36d35e05838561731a5ca21c156b7fbd
1,485
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #[link(name = "rust_test_helpers")] extern { fn rust_interesting_average(_: u64, ...) -> f64; } pub fn main() { // Call without variadic arguments unsafe { assert!(rust_interesting_average(0).is_nan()); } // Call with direct arguments unsafe { assert_eq!(rust_interesting_average(1, 10i64, 10.0f64) as i64, 20); } // Call with named arguments, variable number of them let (x1, x2, x3, x4) = (10i64, 10.0f64, 20i64, 20.0f64); unsafe { assert_eq!(rust_interesting_average(2, x1, x2, x3, x4) as i64, 30); } // A function that takes a function pointer unsafe fn call(fp: unsafe extern fn(u64, ...) -> f64) { let (x1, x2, x3, x4) = (10i64, 10.0f64, 20i64, 20.0f64); assert_eq!(fp(2, x1, x2, x3, x4) as i64, 30); } unsafe { call(rust_interesting_average); // Make a function pointer, pass indirectly let x: unsafe extern fn(u64, ...) -> f64 = rust_interesting_average; call(x); } }
31.595745
76
0.641077
b93d37f389cbe0758ef42be07b495dff8a96e6db
1,480
use std::path::PathBuf; use anyhow::Result; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(untagged)] pub enum ResolverConfig { DiskResolver { root: PathBuf }, GithubResolver { github_area: usize }, } #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(untagged)] pub enum ResolverConstraint { ID(usize), Range((usize, usize)), } impl ResolverConstraint { pub fn matches(&self, id: usize) -> bool { match &self { ResolverConstraint::ID(i) => id == *i, ResolverConstraint::Range((min, max)) => id >= *min && id <= *max, } } } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Resolver { pub constraint: ResolverConstraint, pub config: ResolverConfig, } #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Config { pub index_path: PathBuf, #[serde(default = "Vec::new")] pub resolvers: Vec<Resolver>, } impl Default for Config { fn default() -> Self { let index_path = dirs::data_dir() .unwrap() .join("dalloriam") .join("jd") .join("index.json"); // yolo let resolvers = Vec::new(); Self { index_path, resolvers, } } } impl Config { pub fn load() -> Result<Self> { Ok(cfgloader::load_or_default( "dalloriam/jd", "config", Self::default(), )?) } }
21.142857
78
0.569595
f419dbd0b13c3739ca002c76dc41d1672ff486cb
9,162
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Buildable; use crate::TreeDragDest; use crate::TreeDragSource; use crate::TreeIter; use crate::TreeModel; use crate::TreeSortable; use glib::object::IsA; use glib::translate::*; use std::fmt; glib::wrapper! { #[doc(alias = "GtkTreeStore")] pub struct TreeStore(Object<ffi::GtkTreeStore, ffi::GtkTreeStoreClass>) @implements Buildable, TreeDragDest, TreeDragSource, TreeModel, TreeSortable; match fn { type_ => || ffi::gtk_tree_store_get_type(), } } impl TreeStore { //#[doc(alias = "gtk_tree_store_new")] //pub fn new(n_columns: i32, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> TreeStore { // unsafe { TODO: call ffi:gtk_tree_store_new() } //} //#[doc(alias = "gtk_tree_store_newv")] //pub fn newv(types: /*Unimplemented*/&CArray TypeId { ns_id: 0, id: 30 }) -> TreeStore { // unsafe { TODO: call ffi:gtk_tree_store_newv() } //} } pub const NONE_TREE_STORE: Option<&TreeStore> = None; pub trait TreeStoreExt: 'static { #[doc(alias = "gtk_tree_store_append")] fn append(&self, parent: Option<&TreeIter>) -> TreeIter; #[doc(alias = "gtk_tree_store_clear")] fn clear(&self); #[doc(alias = "gtk_tree_store_insert")] fn insert(&self, parent: Option<&TreeIter>, position: i32) -> TreeIter; #[doc(alias = "gtk_tree_store_insert_after")] fn insert_after(&self, parent: Option<&TreeIter>, sibling: Option<&TreeIter>) -> TreeIter; #[doc(alias = "gtk_tree_store_insert_before")] fn insert_before(&self, parent: Option<&TreeIter>, sibling: Option<&TreeIter>) -> TreeIter; //#[doc(alias = "gtk_tree_store_insert_with_values")] //fn insert_with_values(&self, parent: Option<&TreeIter>, position: i32, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> TreeIter; //#[doc(alias = "gtk_tree_store_insert_with_valuesv")] //fn insert_with_valuesv(&self, parent: Option<&TreeIter>, position: i32, columns: &[i32], values: &[&glib::Value]) -> TreeIter; #[doc(alias = "gtk_tree_store_is_ancestor")] fn is_ancestor(&self, iter: &TreeIter, descendant: &TreeIter) -> bool; #[doc(alias = "gtk_tree_store_iter_depth")] fn iter_depth(&self, iter: &TreeIter) -> i32; #[doc(alias = "gtk_tree_store_iter_is_valid")] fn iter_is_valid(&self, iter: &TreeIter) -> bool; #[doc(alias = "gtk_tree_store_move_after")] fn move_after(&self, iter: &TreeIter, position: Option<&TreeIter>); #[doc(alias = "gtk_tree_store_move_before")] fn move_before(&self, iter: &TreeIter, position: Option<&TreeIter>); #[doc(alias = "gtk_tree_store_prepend")] fn prepend(&self, parent: Option<&TreeIter>) -> TreeIter; #[doc(alias = "gtk_tree_store_remove")] fn remove(&self, iter: &TreeIter) -> bool; //#[doc(alias = "gtk_tree_store_set")] //fn set(&self, iter: &TreeIter, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs); //#[doc(alias = "gtk_tree_store_set_column_types")] //fn set_column_types(&self, types: /*Unimplemented*/&CArray TypeId { ns_id: 0, id: 30 }); //#[doc(alias = "gtk_tree_store_set_valist")] //fn set_valist(&self, iter: &TreeIter, var_args: /*Unknown conversion*//*Unimplemented*/Unsupported); //#[doc(alias = "gtk_tree_store_set_valuesv")] //fn set_valuesv(&self, iter: &TreeIter, columns: &[i32], values: &[&glib::Value]); #[doc(alias = "gtk_tree_store_swap")] fn swap(&self, a: &TreeIter, b: &TreeIter); } impl<O: IsA<TreeStore>> TreeStoreExt for O { fn append(&self, parent: Option<&TreeIter>) -> TreeIter { unsafe { let mut iter = TreeIter::uninitialized(); ffi::gtk_tree_store_append( self.as_ref().to_glib_none().0, iter.to_glib_none_mut().0, mut_override(parent.to_glib_none().0), ); iter } } fn clear(&self) { unsafe { ffi::gtk_tree_store_clear(self.as_ref().to_glib_none().0); } } fn insert(&self, parent: Option<&TreeIter>, position: i32) -> TreeIter { unsafe { let mut iter = TreeIter::uninitialized(); ffi::gtk_tree_store_insert( self.as_ref().to_glib_none().0, iter.to_glib_none_mut().0, mut_override(parent.to_glib_none().0), position, ); iter } } fn insert_after(&self, parent: Option<&TreeIter>, sibling: Option<&TreeIter>) -> TreeIter { unsafe { let mut iter = TreeIter::uninitialized(); ffi::gtk_tree_store_insert_after( self.as_ref().to_glib_none().0, iter.to_glib_none_mut().0, mut_override(parent.to_glib_none().0), mut_override(sibling.to_glib_none().0), ); iter } } fn insert_before(&self, parent: Option<&TreeIter>, sibling: Option<&TreeIter>) -> TreeIter { unsafe { let mut iter = TreeIter::uninitialized(); ffi::gtk_tree_store_insert_before( self.as_ref().to_glib_none().0, iter.to_glib_none_mut().0, mut_override(parent.to_glib_none().0), mut_override(sibling.to_glib_none().0), ); iter } } //fn insert_with_values(&self, parent: Option<&TreeIter>, position: i32, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) -> TreeIter { // unsafe { TODO: call ffi:gtk_tree_store_insert_with_values() } //} //fn insert_with_valuesv(&self, parent: Option<&TreeIter>, position: i32, columns: &[i32], values: &[&glib::Value]) -> TreeIter { // unsafe { TODO: call ffi:gtk_tree_store_insert_with_valuesv() } //} fn is_ancestor(&self, iter: &TreeIter, descendant: &TreeIter) -> bool { unsafe { from_glib(ffi::gtk_tree_store_is_ancestor( self.as_ref().to_glib_none().0, mut_override(iter.to_glib_none().0), mut_override(descendant.to_glib_none().0), )) } } fn iter_depth(&self, iter: &TreeIter) -> i32 { unsafe { ffi::gtk_tree_store_iter_depth( self.as_ref().to_glib_none().0, mut_override(iter.to_glib_none().0), ) } } fn iter_is_valid(&self, iter: &TreeIter) -> bool { unsafe { from_glib(ffi::gtk_tree_store_iter_is_valid( self.as_ref().to_glib_none().0, mut_override(iter.to_glib_none().0), )) } } fn move_after(&self, iter: &TreeIter, position: Option<&TreeIter>) { unsafe { ffi::gtk_tree_store_move_after( self.as_ref().to_glib_none().0, mut_override(iter.to_glib_none().0), mut_override(position.to_glib_none().0), ); } } fn move_before(&self, iter: &TreeIter, position: Option<&TreeIter>) { unsafe { ffi::gtk_tree_store_move_before( self.as_ref().to_glib_none().0, mut_override(iter.to_glib_none().0), mut_override(position.to_glib_none().0), ); } } fn prepend(&self, parent: Option<&TreeIter>) -> TreeIter { unsafe { let mut iter = TreeIter::uninitialized(); ffi::gtk_tree_store_prepend( self.as_ref().to_glib_none().0, iter.to_glib_none_mut().0, mut_override(parent.to_glib_none().0), ); iter } } fn remove(&self, iter: &TreeIter) -> bool { unsafe { from_glib(ffi::gtk_tree_store_remove( self.as_ref().to_glib_none().0, mut_override(iter.to_glib_none().0), )) } } //fn set(&self, iter: &TreeIter, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) { // unsafe { TODO: call ffi:gtk_tree_store_set() } //} //fn set_column_types(&self, types: /*Unimplemented*/&CArray TypeId { ns_id: 0, id: 30 }) { // unsafe { TODO: call ffi:gtk_tree_store_set_column_types() } //} //fn set_valist(&self, iter: &TreeIter, var_args: /*Unknown conversion*//*Unimplemented*/Unsupported) { // unsafe { TODO: call ffi:gtk_tree_store_set_valist() } //} //fn set_valuesv(&self, iter: &TreeIter, columns: &[i32], values: &[&glib::Value]) { // unsafe { TODO: call ffi:gtk_tree_store_set_valuesv() } //} fn swap(&self, a: &TreeIter, b: &TreeIter) { unsafe { ffi::gtk_tree_store_swap( self.as_ref().to_glib_none().0, mut_override(a.to_glib_none().0), mut_override(b.to_glib_none().0), ); } } } impl fmt::Display for TreeStore { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("TreeStore") } }
34.704545
153
0.589937
ff12efccb17222cd1f09c0e0161901bef1113a2c
1,809
use super::BytesCompression; use serde::{Deserialize, Serialize}; use std::io; /// The [LZ4 compression algorithm](https://en.wikipedia.org/wiki/LZ4_(compression_algorithm)). #[derive(Clone, Copy, Debug, Deserialize, Serialize)] pub struct Lz4 { /// The compression level, from 0 to 10. 0 is fastest and least aggressive. 10 is slowest and /// most aggressive. pub level: u32, } impl BytesCompression for Lz4 { fn compress_bytes( &self, mut bytes: impl io::Read, compressed_bytes: impl io::Write, ) -> io::Result<()> { let mut encoder = lz4::EncoderBuilder::new() .level(self.level) .build(compressed_bytes)?; io::copy(&mut bytes, &mut encoder)?; let (_output, result) = encoder.finish(); result } fn decompress_bytes( compressed_bytes: impl io::Read, mut bytes: impl io::Write, ) -> io::Result<()> { let mut decoder = lz4::Decoder::new(compressed_bytes)?; io::copy(&mut decoder, &mut bytes)?; Ok(()) } } // ████████╗███████╗███████╗████████╗ // ╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ // ██║ █████╗ ███████╗ ██║ // ██║ ██╔══╝ ╚════██║ ██║ // ██║ ███████╗███████║ ██║ // ╚═╝ ╚══════╝╚══════╝ ╚═╝ #[cfg(test)] mod tests { use super::*; #[test] fn compress_and_decompress_serializable_type() { let bytes: Vec<u8> = (0u8..100).collect(); let mut compressed_bytes = Vec::new(); Lz4 { level: 10 } .compress_bytes(bytes.as_slice(), &mut compressed_bytes) .unwrap(); let mut decompressed_bytes = Vec::new(); Lz4::decompress_bytes(compressed_bytes.as_slice(), &mut decompressed_bytes).unwrap(); assert_eq!(bytes, decompressed_bytes); } }
28.265625
97
0.532891
0ef5b27dab87a8e8c09b2c4e5aab16421989ded2
1,320
use super::Include; use crate::entity::alias::Alias; use crate::entity::genre::Genre; use crate::entity::lifespan::LifeSpan; use crate::entity::rating::Rating; use crate::entity::tag::Tag; use crate::entity::BrowseBy; #[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] #[serde(rename_all(deserialize = "kebab-case"))] pub struct Event { pub id: String, pub name: String, #[serde(rename = "type")] pub event_type: Option<String>, pub type_id: Option<String>, pub life_span: Option<LifeSpan>, pub disambiguation: String, pub cancelled: bool, pub time: String, // need some info on that value, current IT test returns "" pub setlist: String, // same here pub tags: Option<Vec<Tag>>, pub rating: Option<Rating>, pub aliases: Option<Vec<Alias>>, pub genres: Option<Vec<Genre>>, pub annotation: Option<String>, } impl_browse! { Event, (by_area, BrowseBy::Area), (by_collection, BrowseBy::Collection), (by_artist, BrowseBy::Artist), (by_place, BrowseBy::Place) } impl_includes!( Event, (with_artist_relations, Include::ArtistRelations), (with_tags, Include::Tags), (with_aliases, Include::Aliases), (with_ratings, Include::Rating), (with_genres, Include::Genres), (with_annotations, Include::Annotations) );
27.5
63
0.680303
019730dc6f1e6236288e010dc8bd97ba1adbb376
20,130
use std::{ fmt, fs::Metadata, io, path::{Path, PathBuf}, time::{SystemTime, UNIX_EPOCH}, }; use actix_service::{Service, ServiceFactory}; use actix_web::{ body::{self, BoxBody, SizedStream}, dev::{AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse}, http::{ header::{ self, Charset, ContentDisposition, ContentEncoding, DispositionParam, DispositionType, ExtendedValue, HeaderValue, }, StatusCode, }, Error, HttpMessage, HttpRequest, HttpResponse, Responder, }; use bitflags::bitflags; use derive_more::{Deref, DerefMut}; use futures_core::future::LocalBoxFuture; use mime_guess::from_path; use crate::{encoding::equiv_utf8_text, range::HttpRange}; bitflags! { pub(crate) struct Flags: u8 { const ETAG = 0b0000_0001; const LAST_MD = 0b0000_0010; const CONTENT_DISPOSITION = 0b0000_0100; const PREFER_UTF8 = 0b0000_1000; } } impl Default for Flags { fn default() -> Self { Flags::from_bits_truncate(0b0000_0111) } } /// A file with an associated name. /// /// `NamedFile` can be registered as services: /// ``` /// use actix_web::App; /// use actix_files::NamedFile; /// /// # async fn run() -> Result<(), Box<dyn std::error::Error>> { /// let file = NamedFile::open_async("./static/index.html").await?; /// let app = App::new().service(file); /// # Ok(()) /// # } /// ``` /// /// They can also be returned from handlers: /// ``` /// use actix_web::{Responder, get}; /// use actix_files::NamedFile; /// /// #[get("/")] /// async fn index() -> impl Responder { /// NamedFile::open_async("./static/index.html").await /// } /// ``` #[derive(Deref, DerefMut)] pub struct NamedFile { path: PathBuf, #[deref] #[deref_mut] file: File, modified: Option<SystemTime>, pub(crate) md: Metadata, pub(crate) flags: Flags, pub(crate) status_code: StatusCode, pub(crate) content_type: mime::Mime, pub(crate) content_disposition: header::ContentDisposition, pub(crate) encoding: Option<ContentEncoding>, } impl fmt::Debug for NamedFile { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NamedFile") .field("path", &self.path) .field( "file", #[cfg(feature = "experimental-io-uring")] { &"tokio_uring::File" }, #[cfg(not(feature = "experimental-io-uring"))] { &self.file }, ) .field("modified", &self.modified) .field("md", &self.md) .field("flags", &self.flags) .field("status_code", &self.status_code) .field("content_type", &self.content_type) .field("content_disposition", &self.content_disposition) .field("encoding", &self.encoding) .finish() } } #[cfg(not(feature = "experimental-io-uring"))] pub(crate) use std::fs::File; #[cfg(feature = "experimental-io-uring")] pub(crate) use tokio_uring::fs::File; use super::chunked; impl NamedFile { /// Creates an instance from a previously opened file. /// /// The given `path` need not exist and is only used to determine the `ContentType` and /// `ContentDisposition` headers. /// /// # Examples /// ```ignore /// use actix_files::NamedFile; /// use std::io::{self, Write}; /// use std::env; /// use std::fs::File; /// /// fn main() -> io::Result<()> { /// let mut file = File::create("foo.txt")?; /// file.write_all(b"Hello, world!")?; /// let named_file = NamedFile::from_file(file, "bar.txt")?; /// # std::fs::remove_file("foo.txt"); /// Ok(()) /// } /// ``` pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> { let path = path.as_ref().to_path_buf(); // Get the name of the file and use it to construct default Content-Type // and Content-Disposition values let (content_type, content_disposition) = { let filename = match path.file_name() { Some(name) => name.to_string_lossy(), None => { return Err(io::Error::new( io::ErrorKind::InvalidInput, "Provided path has no filename", )); } }; let ct = from_path(&path).first_or_octet_stream(); let disposition = match ct.type_() { mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline, mime::APPLICATION => match ct.subtype() { mime::JAVASCRIPT | mime::JSON => DispositionType::Inline, name if name == "wasm" => DispositionType::Inline, _ => DispositionType::Attachment, }, _ => DispositionType::Attachment, }; let mut parameters = vec![DispositionParam::Filename(String::from(filename.as_ref()))]; if !filename.is_ascii() { parameters.push(DispositionParam::FilenameExt(ExtendedValue { charset: Charset::Ext(String::from("UTF-8")), language_tag: None, value: filename.into_owned().into_bytes(), })) } let cd = ContentDisposition { disposition, parameters, }; (ct, cd) }; let md = { #[cfg(not(feature = "experimental-io-uring"))] { file.metadata()? } #[cfg(feature = "experimental-io-uring")] { use std::os::unix::prelude::{AsRawFd, FromRawFd}; let fd = file.as_raw_fd(); // SAFETY: fd is borrowed and lives longer than the unsafe block unsafe { let file = std::fs::File::from_raw_fd(fd); let md = file.metadata(); // SAFETY: forget the fd before exiting block in success or error case but don't // run destructor (that would close file handle) std::mem::forget(file); md? } } }; let modified = md.modified().ok(); let encoding = None; Ok(NamedFile { path, file, content_type, content_disposition, md, modified, encoding, status_code: StatusCode::OK, flags: Flags::default(), }) } /// Attempts to open a file in read-only mode. /// /// # Examples /// ``` /// use actix_files::NamedFile; /// let file = NamedFile::open("foo.txt"); /// ``` #[cfg(not(feature = "experimental-io-uring"))] pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> { let file = File::open(&path)?; Self::from_file(file, path) } /// Attempts to open a file asynchronously in read-only mode. /// /// When the `experimental-io-uring` crate feature is enabled, this will be async. /// Otherwise, it will be just like [`open`][Self::open]. /// /// # Examples /// ``` /// use actix_files::NamedFile; /// # async fn open() { /// let file = NamedFile::open_async("foo.txt").await.unwrap(); /// # } /// ``` pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> { let file = { #[cfg(not(feature = "experimental-io-uring"))] { File::open(&path)? } #[cfg(feature = "experimental-io-uring")] { File::open(&path).await? } }; Self::from_file(file, path) } /// Returns reference to the underlying `File` object. #[inline] pub fn file(&self) -> &File { &self.file } /// Retrieve the path of this file. /// /// # Examples /// ``` /// # use std::io; /// use actix_files::NamedFile; /// /// # async fn path() -> io::Result<()> { /// let file = NamedFile::open_async("test.txt").await?; /// assert_eq!(file.path().as_os_str(), "foo.txt"); /// # Ok(()) /// # } /// ``` #[inline] pub fn path(&self) -> &Path { self.path.as_path() } /// Set response **Status Code** pub fn set_status_code(mut self, status: StatusCode) -> Self { self.status_code = status; self } /// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred /// from the filename extension. #[inline] pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self { self.content_type = mime_type; self } /// Set the Content-Disposition for serving this file. This allows changing the /// `inline/attachment` disposition as well as the filename sent to the peer. /// /// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and /// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the /// filename is taken from the path provided in the `open` method after converting it to UTF-8 /// (using `to_string_lossy`). #[inline] pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self { self.content_disposition = cd; self.flags.insert(Flags::CONTENT_DISPOSITION); self } /// Disable `Content-Disposition` header. /// /// By default Content-Disposition` header is enabled. #[inline] pub fn disable_content_disposition(mut self) -> Self { self.flags.remove(Flags::CONTENT_DISPOSITION); self } /// Set content encoding for serving this file /// /// Must be used with [`actix_web::middleware::Compress`] to take effect. #[inline] pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self { self.encoding = Some(enc); self } /// Specifies whether to return `ETag` header in response. /// /// Default is true. #[inline] pub fn use_etag(mut self, value: bool) -> Self { self.flags.set(Flags::ETAG, value); self } /// Specifies whether to return `Last-Modified` header in response. /// /// Default is true. #[inline] pub fn use_last_modified(mut self, value: bool) -> Self { self.flags.set(Flags::LAST_MD, value); self } /// Specifies whether text responses should signal a UTF-8 encoding. /// /// Default is false (but will default to true in a future version). #[inline] pub fn prefer_utf8(mut self, value: bool) -> Self { self.flags.set(Flags::PREFER_UTF8, value); self } /// Creates an `ETag` in a format is similar to Apache's. pub(crate) fn etag(&self) -> Option<header::EntityTag> { self.modified.as_ref().map(|mtime| { let ino = { #[cfg(unix)] { #[cfg(unix)] use std::os::unix::fs::MetadataExt as _; self.md.ino() } #[cfg(not(unix))] { 0 } }; let dur = mtime .duration_since(UNIX_EPOCH) .expect("modification time must be after epoch"); header::EntityTag::new_strong(format!( "{:x}:{:x}:{:x}:{:x}", ino, self.md.len(), dur.as_secs(), dur.subsec_nanos() )) }) } pub(crate) fn last_modified(&self) -> Option<header::HttpDate> { self.modified.map(|mtime| mtime.into()) } /// Creates an `HttpResponse` with file as a streaming body. pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> { if self.status_code != StatusCode::OK { let mut res = HttpResponse::build(self.status_code); let ct = if self.flags.contains(Flags::PREFER_UTF8) { equiv_utf8_text(self.content_type.clone()) } else { self.content_type }; res.insert_header((header::CONTENT_TYPE, ct.to_string())); if self.flags.contains(Flags::CONTENT_DISPOSITION) { res.insert_header(( header::CONTENT_DISPOSITION, self.content_disposition.to_string(), )); } if let Some(current_encoding) = self.encoding { res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); } let reader = chunked::new_chunked_read(self.md.len(), 0, self.file); return res.streaming(reader); } let etag = if self.flags.contains(Flags::ETAG) { self.etag() } else { None }; let last_modified = if self.flags.contains(Flags::LAST_MD) { self.last_modified() } else { None }; // check preconditions let precondition_failed = if !any_match(etag.as_ref(), req) { true } else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) = (last_modified, req.get_header()) { let t1: SystemTime = (*m).into(); let t2: SystemTime = (*since).into(); match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { (Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(), _ => false, } } else { false }; // check last modified let not_modified = if !none_match(etag.as_ref(), req) { true } else if req.headers().contains_key(header::IF_NONE_MATCH) { false } else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) = (last_modified, req.get_header()) { let t1: SystemTime = (*m).into(); let t2: SystemTime = (*since).into(); match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { (Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(), _ => false, } } else { false }; let mut res = HttpResponse::build(self.status_code); let ct = if self.flags.contains(Flags::PREFER_UTF8) { equiv_utf8_text(self.content_type.clone()) } else { self.content_type }; res.insert_header((header::CONTENT_TYPE, ct.to_string())); if self.flags.contains(Flags::CONTENT_DISPOSITION) { res.insert_header(( header::CONTENT_DISPOSITION, self.content_disposition.to_string(), )); } if let Some(current_encoding) = self.encoding { res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str())); } if let Some(lm) = last_modified { res.insert_header((header::LAST_MODIFIED, lm.to_string())); } if let Some(etag) = etag { res.insert_header((header::ETAG, etag.to_string())); } res.insert_header((header::ACCEPT_RANGES, "bytes")); let mut length = self.md.len(); let mut offset = 0; // check for range header if let Some(ranges) = req.headers().get(header::RANGE) { if let Ok(ranges_header) = ranges.to_str() { if let Ok(ranges) = HttpRange::parse(ranges_header, length) { length = ranges[0].length; offset = ranges[0].start; // don't allow compression middleware to modify partial content res.insert_header(( header::CONTENT_ENCODING, HeaderValue::from_static("identity"), )); res.insert_header(( header::CONTENT_RANGE, format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()), )); } else { res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length))); return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish(); }; } else { return res.status(StatusCode::BAD_REQUEST).finish(); }; }; if precondition_failed { return res.status(StatusCode::PRECONDITION_FAILED).finish(); } else if not_modified { return res .status(StatusCode::NOT_MODIFIED) .body(body::None::new()) .map_into_boxed_body(); } let reader = chunked::new_chunked_read(length, offset, self.file); if offset != 0 || length != self.md.len() { res.status(StatusCode::PARTIAL_CONTENT); } res.body(SizedStream::new(length, reader)) } } /// Returns true if `req` has no `If-Match` header or one which matches `etag`. fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool { match req.get_header::<header::IfMatch>() { None | Some(header::IfMatch::Any) => true, Some(header::IfMatch::Items(ref items)) => { if let Some(some_etag) = etag { for item in items { if item.strong_eq(some_etag) { return true; } } } false } } } /// Returns true if `req` doesn't have an `If-None-Match` header matching `req`. fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool { match req.get_header::<header::IfNoneMatch>() { Some(header::IfNoneMatch::Any) => false, Some(header::IfNoneMatch::Items(ref items)) => { if let Some(some_etag) = etag { for item in items { if item.weak_eq(some_etag) { return false; } } } true } None => true, } } impl Responder for NamedFile { type Body = BoxBody; fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> { self.into_response(req) } } impl ServiceFactory<ServiceRequest> for NamedFile { type Response = ServiceResponse; type Error = Error; type Config = (); type Service = NamedFileService; type InitError = (); type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>; fn new_service(&self, _: ()) -> Self::Future { let service = NamedFileService { path: self.path.clone(), }; Box::pin(async move { Ok(service) }) } } #[doc(hidden)] #[derive(Debug)] pub struct NamedFileService { path: PathBuf, } impl Service<ServiceRequest> for NamedFileService { type Response = ServiceResponse; type Error = Error; type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>; actix_service::always_ready!(); fn call(&self, req: ServiceRequest) -> Self::Future { let (req, _) = req.into_parts(); let path = self.path.clone(); Box::pin(async move { let file = NamedFile::open_async(path).await?; let res = file.into_response(&req); Ok(ServiceResponse::new(req, res)) }) } } impl HttpServiceFactory for NamedFile { fn register(self, config: &mut AppService) { config.register_service( ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()), None, self, None, ) } }
30.779817
100
0.528713
71924f26bfa0a911ce9279f0d4d4e6f881963b57
2,095
use crate::{ Builder, ButtonResponse, Id, ToId, button_control, ColorStyle, TextStyle, Shape, }; use crate::math::{ Rect, Vector2 }; pub struct Checkbox<'a> { id: Id, is_checked: &'a mut bool, } impl<'a> Checkbox<'a> { pub fn new(id: impl ToId, is_checked: &'a mut bool) -> Self { Self{ id: id.to_id(), is_checked: is_checked, } } } impl<'a> Checkbox<'a> { pub fn build(self, builder: &mut Builder) -> ButtonResponse { let color: ColorStyle = builder.style().get(); let text: TextStyle = builder.style().get(); let size = text.label_height(); let checkbox_size = Vector2::new(size, size); let check_size = (size / 3.0, size / 3.0).into(); let bounds = Rect::from_pos_size(builder.content_bounds(checkbox_size).pos(), checkbox_size); let response = button_control(self.id, bounds, builder); if response.clicked() { *self.is_checked = !*self.is_checked; } let is_focused = builder.is_focused(self.id); let is_hovered = builder.is_hovered(self.id); let (background_color, foreground_color) = { let background_color = if is_focused { color.focused_background } else if is_hovered { color.hovered_background } else { color.unhovered_background }; let foreground_color = if is_focused { color.focused_foreground } else if is_hovered { color.hovered_foreground } else { color.unhovered_foreground }; (background_color, foreground_color) }; builder.painter.push_shape(Shape::solid_rect(bounds, background_color, 0.0)); if *self.is_checked { let check_bounds = Rect::from_pos_size(bounds.pos(), check_size); builder.painter.push_shape(Shape::solid_rect(check_bounds, foreground_color, 100.0)); } response } }
26.858974
101
0.567542
3acc773a5472e925716c0c8baa3db8d4b4fbf662
3,059
#[doc = "Register `VLCR` reader"] pub struct R(crate::R<VLCR_SPEC>); impl core::ops::Deref for R { type Target = crate::R<VLCR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<VLCR_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<VLCR_SPEC>) -> Self { R(reader) } } #[doc = "Register `VLCR` writer"] pub struct W(crate::W<VLCR_SPEC>); impl core::ops::Deref for W { type Target = crate::W<VLCR_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<VLCR_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<VLCR_SPEC>) -> Self { W(writer) } } #[doc = "Field `HLINE` reader - Horizontal Line duration"] pub struct HLINE_R(crate::FieldReader<u16, u16>); impl HLINE_R { #[inline(always)] pub(crate) fn new(bits: u16) -> Self { HLINE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for HLINE_R { type Target = crate::FieldReader<u16, u16>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `HLINE` writer - Horizontal Line duration"] pub struct HLINE_W<'a> { w: &'a mut W, } impl<'a> HLINE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0x7fff) | (value as u32 & 0x7fff); self.w } } impl R { #[doc = "Bits 0:14 - Horizontal Line duration"] #[inline(always)] pub fn hline(&self) -> HLINE_R { HLINE_R::new((self.bits & 0x7fff) as u16) } } impl W { #[doc = "Bits 0:14 - Horizontal Line duration"] #[inline(always)] pub fn hline(&mut self) -> HLINE_W { HLINE_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "DSI Host Video Line Configuration Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [vlcr](index.html) module"] pub struct VLCR_SPEC; impl crate::RegisterSpec for VLCR_SPEC { type Ux = u32; } #[doc = "`read()` method returns [vlcr::R](R) reader structure"] impl crate::Readable for VLCR_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [vlcr::W](W) writer structure"] impl crate::Writable for VLCR_SPEC { type Writer = W; } #[doc = "`reset()` method sets VLCR to value 0"] impl crate::Resettable for VLCR_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
29.413462
427
0.598235
56c2f6c37cd90139a727b4f0355e0accf75f23fc
1,387
pub(crate) use decl::make_module; #[pymodule(name = "dis")] mod decl { use crate::{ builtins::{PyCode, PyDictRef, PyStrRef}, bytecode::CodeFlags, compile, ItemProtocol, PyObjectRef, PyRef, PyResult, TryFromObject, VirtualMachine, }; #[pyfunction] fn dis(obj: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { let co = if let Ok(co) = vm.get_attribute(obj.clone(), "__code__") { // Method or function: co } else if let Ok(co_str) = PyStrRef::try_from_object(vm, obj.clone()) { // String: vm.compile(co_str.as_str(), compile::Mode::Exec, "<dis>".to_owned()) .map_err(|err| vm.new_syntax_error(&err))? .into_object() } else { obj }; disassemble(co, vm) } #[pyfunction] fn disassemble(co: PyObjectRef, vm: &VirtualMachine) -> PyResult<()> { let code = PyRef::<PyCode>::try_from_object(vm, co)?; print!("{}", &code.code); Ok(()) } #[pyattr(name = "COMPILER_FLAG_NAMES")] fn compiler_flag_names(vm: &VirtualMachine) -> PyDictRef { let dict = vm.ctx.new_dict(); for (name, flag) in CodeFlags::NAME_MAPPING { dict.set_item(vm.ctx.new_int(flag.bits()), vm.ctx.new_utf8_str(name), vm) .unwrap(); } dict } }
31.522727
91
0.550108
d682fd2f47fe824d4b4cac7956b9046b0b4368d0
161
#[derive(Eq, PartialEq, Debug, Copy, Clone, Serialize, Deserialize)] pub enum ScrollDirection { ScrollUp, ScrollDown, ScrollLeft, ScrollRight, }
20.125
68
0.695652
6a144412d0751541c914963d464d2bd25a6330dd
257
#[doc(keyword = "match")] //~ ERROR: `#[doc(keyword)]` is meant for internal use only /// wonderful mod foo {} trait Mine {} #[doc(tuple_variadic)] //~ ERROR: `#[doc(tuple_variadic)]` is meant for internal use only impl<T> Mine for (T,) {} fn main() {}
23.363636
90
0.63035
1406ca8d010f83eada5167f5b798be19e4fd3a91
4,899
use crate::utils::NonZeroSized; use glam::{Mat4, Vec3}; use wgpu::util::DeviceExt; #[repr(C)] #[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)] pub struct CameraUniform { pub view_position: [f32; 4], pub proj_view: [[f32; 4]; 4], pub inv_proj: [[f32; 4]; 4], } impl Default for CameraUniform { fn default() -> Self { Self { view_position: [0.0; 4], proj_view: Mat4::IDENTITY.to_cols_array_2d(), inv_proj: Mat4::IDENTITY.to_cols_array_2d(), } } } pub struct CameraBinding { pub buffer: wgpu::Buffer, pub bind_group: wgpu::BindGroup, } impl CameraBinding { pub const DESC: wgpu::BindGroupLayoutDescriptor<'static> = wgpu::BindGroupLayoutDescriptor { label: Some("Camera Bind Group Layout"), entries: &[wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStages::VERTEX_FRAGMENT.union(wgpu::ShaderStages::COMPUTE), ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, min_binding_size: Some(CameraUniform::SIZE), }, count: None, }], }; pub fn new(device: &wgpu::Device) -> Self { let buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Camera Buffer"), contents: bytemuck::bytes_of(&CameraUniform::default()), usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, }); let layout = device.create_bind_group_layout(&Self::DESC); let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { label: Some("Camera Bind Group"), layout: &layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: buffer.as_entire_binding(), }], }); Self { buffer, bind_group } } pub fn update(&mut self, queue: &wgpu::Queue, camera: &mut Camera) { if camera.updated { queue.write_buffer( &self.buffer, 0, bytemuck::bytes_of(&camera.get_proj_view_matrix()), ); camera.updated = false; } } } #[derive(Debug, Clone, Copy)] pub struct Camera { pub zoom: f32, pub target: Vec3, pub eye: Vec3, pub pitch: f32, pub yaw: f32, pub up: Vec3, pub aspect: f32, updated: bool, } impl Camera { const ZFAR: f32 = 100.; const ZNEAR: f32 = 0.1; const FOVY: f32 = std::f32::consts::PI / 2.0; const UP: Vec3 = Vec3::Y; pub fn new(zoom: f32, pitch: f32, yaw: f32, target: Vec3, aspect: f32) -> Self { let mut camera = Self { zoom, pitch, yaw, eye: Vec3::ZERO, target, up: Self::UP, aspect, updated: false, }; camera.fix_eye(); camera } pub fn build_projection_view_matrix(&self) -> Mat4 { let view = Mat4::look_at_rh(self.eye, self.target, self.up); let proj = Mat4::perspective_rh(Self::FOVY, self.aspect, Self::ZNEAR, Self::ZFAR); proj * view } pub fn set_zoom(&mut self, zoom: f32) { self.zoom = zoom.clamp(0.3, Self::ZFAR / 2.); self.fix_eye(); self.updated = true; } pub fn add_zoom(&mut self, delta: f32) { self.set_zoom(self.zoom + delta); } pub fn set_pitch(&mut self, pitch: f32) { self.pitch = pitch.clamp( -std::f32::consts::PI / 2.0 + f32::EPSILON, std::f32::consts::PI / 2.0 - f32::EPSILON, ); self.fix_eye(); self.updated = true; } pub fn add_pitch(&mut self, delta: f32) { self.set_pitch(self.pitch + delta); } pub fn set_yaw(&mut self, yaw: f32) { self.yaw = yaw; self.fix_eye(); self.updated = true; } pub fn add_yaw(&mut self, delta: f32) { self.set_yaw(self.yaw + delta); } fn fix_eye(&mut self) { let pitch_cos = self.pitch.cos(); self.eye = self.target - self.zoom * Vec3::new( self.yaw.sin() * pitch_cos, self.pitch.sin(), self.yaw.cos() * pitch_cos, ); } pub fn set_aspect(&mut self, width: u32, height: u32) { self.aspect = width as f32 / height as f32; self.updated = true; } pub fn get_proj_view_matrix(&self) -> CameraUniform { let proj_view = self.build_projection_view_matrix(); CameraUniform { view_position: [self.eye.x, self.eye.y, self.eye.z, 1.0], proj_view: proj_view.to_cols_array_2d(), inv_proj: proj_view.inverse().to_cols_array_2d(), } } }
28.317919
96
0.544397
23ee7b074663fe01b9414b9f43b5cab331ce6a9e
2,306
/* Copyright (c) 2018 Todd Stellanova LICENSE: See LICENSE file */ use std::os::unix::io::{ AsRawFd }; use std::fs::File; use bindings::*; /// The path to the VideoControl firmware device driver const VC_FW_DEVICE_PATH: &'static str = "/dev/vcio"; // want _IOWR(100, 0, char*) const VC_FW_IOC_MAGIC_IOWR: u32 = 0xc0046400; //TODO use libc crate's ioctl instead? extern { //int ioctl(int fd, unsigned long request, ...); fn ioctl(fd: i32, req: u32, ...) -> i32; } /// The length of a request sent to the firmware const REQ_HEADER_LEN: usize = 5; /* Message contents: u32: buffer size in bytes (including the header values, the end tag and padding) u32: buffer request/response code u8...: sequence of concatenated tags (see below) u32: 0x0 (end tag) u8...: padding Tag format: u32: tag identifier u32: value buffer size in bytes u32: Request codes: b31 clear: request b30-b0: reserved Response codes: b31 set: response b30-b0: value length in bytes u8...: value buffer u8...: padding to align the tag to 32 bits. */ /// Read/Write firmware properties by tag ID pub fn touch_fw_property(tag: RPiFwPropTag, prop_data: &mut [u32] ) { let f = File::open(VC_FW_DEVICE_PATH) .expect("Could not open VC_FW_DEVICE_PATH"); // the size of the buffer into which the fw can r/w transaction data let prop_data_len = prop_data.len() ; let buf_size : u32 = (prop_data_len * 4) as u32; // total length of message, including header, data, and terminating tag let msg_size: u32 = ((REQ_HEADER_LEN + prop_data_len + 1) * 4) as u32; let mut msg= vec![msg_size, RPI_FWPROP_STATUS_REQUEST, tag as u32, buf_size, 0 ]; //copy prop_data into msg for i in 0..prop_data_len { msg.push(prop_data[i]); } msg.push(RPiFwPropTag::PropertyEnd as u32); unsafe { let rc = ioctl(f.as_raw_fd(), VC_FW_IOC_MAGIC_IOWR, msg.as_mut_slice()); if 0 != rc { println!("error result: {}", rc); //TODO handle this error case separately from below? } } let req_result : u32 = msg[1]; if RPI_FWPROP_STATUS_SUCCESS == req_result { //copy out any data that was written into the writable value buffer for i in 0..prop_data_len { prop_data[i] = msg[REQ_HEADER_LEN + i]; } } else { println!("req_result: {} ", req_result); } }
25.910112
83
0.679532
ed40325fecd4d621dcdf82a2134c764f9c51de56
22,374
use crate::*; use core::sync::atomic::{AtomicU64, Ordering}; /// A trait defining bitfield operations we need for tracking allocated objects within a page. pub(crate) trait Bitfield { fn initialize(&mut self, for_size: usize, capacity: usize); fn first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, metadata_size: usize, ) -> Option<(usize, usize)>; fn is_allocated(&self, idx: usize) -> bool; fn set_bit(&self, idx: usize); fn clear_bit(&self, idx: usize); fn is_full(&self) -> bool; fn all_free(&self, relevant_bits: usize) -> bool; } /// Implementation of bit operations on u64 slices. /// /// We allow deallocations (i.e. clearning a bit in the field) /// from any thread. That's why the bitfield is a bunch of AtomicU64. impl Bitfield for [AtomicU64] { /// Initialize the bitfield /// /// # Arguments /// * `for_size`: Object size we want to allocate /// * `capacity`: Maximum size of the buffer the bitmap maintains. /// /// Ensures that we only have free slots for what we can allocate /// within the page (by marking everything else allocated). fn initialize(&mut self, for_size: usize, capacity: usize) { // Set everything to allocated for bitmap in self.iter_mut() { *bitmap = AtomicU64::new(u64::max_value()); } // Mark actual slots as free let relevant_bits = core::cmp::min(capacity / for_size, self.len() * 64); for idx in 0..relevant_bits { self.clear_bit(idx); } } /// Tries to find a free block of memory that satisfies `alignment` requirement. /// /// # Notes /// * We pass size here to be able to calculate the resulting address within `data`. #[inline(always)] fn first_fit( &self, base_addr: usize, layout: Layout, page_size: usize, metadata_size: usize ) -> Option<(usize, usize)> { for (base_idx, b) in self.iter().enumerate() { let bitval = b.load(Ordering::Relaxed); if bitval == u64::max_value() { continue; } else { let negated = !bitval; let first_free = negated.trailing_zeros() as usize; let idx: usize = base_idx * 64 + first_free; let offset = idx * layout.size(); // TODO(bad): psize needs to be passed as arg let offset_inside_data_area = offset <= (page_size - metadata_size - layout.size()); if !offset_inside_data_area { return None; } let addr: usize = base_addr + offset; let alignment_ok = addr % layout.align() == 0; let block_is_free = bitval & (1 << first_free) == 0; if alignment_ok && block_is_free { return Some((idx, addr)); } } } None } /// Check if the bit `idx` is set. #[inline(always)] fn is_allocated(&self, idx: usize) -> bool { let base_idx = idx / 64; let bit_idx = idx % 64; (self[base_idx].load(Ordering::Relaxed) & (1 << bit_idx)) > 0 } /// Sets the bit number `idx` in the bit-field. #[inline(always)] fn set_bit(&self, idx: usize) { let base_idx = idx / 64; let bit_idx = idx % 64; self[base_idx].fetch_or(1 << bit_idx, Ordering::Relaxed); } /// Clears bit number `idx` in the bit-field. #[inline(always)] fn clear_bit(&self, idx: usize) { let base_idx = idx / 64; let bit_idx = idx % 64; self[base_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed); } /// Checks if we could allocate more objects of a given `alloc_size` within the /// `capacity` of the memory allocator. /// /// # Note /// The ObjectPage will make sure to mark the top-most bits as allocated /// for large sizes (i.e., a size 512 SCAllocator will only really need 3 bits) /// to track allocated objects). That's why this function can be simpler /// than it would need to be in practice. #[inline(always)] fn is_full(&self) -> bool { self.iter() .filter(|&x| x.load(Ordering::Relaxed) != u64::max_value()) .count() == 0 } /// Checks if the page has currently no allocations. /// /// This is called `all_free` rather than `is_emtpy` because /// we already have an is_empty fn as part of the slice. #[inline(always)] fn all_free(&self, relevant_bits: usize) -> bool { for (idx, bitmap) in self.iter().enumerate() { let checking_bit_range = (idx * 64, (idx + 1) * 64); if relevant_bits >= checking_bit_range.0 && relevant_bits < checking_bit_range.1 { // Last relevant bitmap, here we only have to check that a subset of bitmap is marked free // the rest will be marked full let bits_that_should_be_free = relevant_bits - checking_bit_range.0; let free_mask = (1 << bits_that_should_be_free) - 1; return (free_mask & bitmap.load(Ordering::Relaxed)) == 0; } if bitmap.load(Ordering::Relaxed) == 0 { continue; } else { return false; } } true } } /// This trait is used to define a page from which objects are allocated /// in an `SCAllocator`. /// /// The implementor of this trait needs to provide access to the page meta-data, /// which consists of: /// - A bitfield (to track allocations), /// - `prev` and `next` pointers to insert the page in free lists pub trait AllocablePage { /// The total size (in bytes) of the page. /// /// # Note /// We also assume that the address of the page will be aligned to `SIZE`. const SIZE: usize; const METADATA_SIZE: usize; const HEAP_ID_OFFSET: usize; fn new(mp: MappedPages, heap_id: usize) -> Result<Self, &'static str> where Self: core::marker::Sized; fn retrieve_mapped_pages(&mut self) -> MappedPages; fn clear_metadata(&mut self); fn set_heap_id(&mut self, heap_id: usize); fn heap_id(&self) -> usize; fn bitfield(&self) -> &[AtomicU64; 8]; fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8]; fn prev(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized; fn next(&mut self) -> &mut Rawlink<Self> where Self: core::marker::Sized; fn buffer_size() -> usize; /// Tries to find a free block within `data` that satisfies `alignment` requirement. fn first_fit(&self, layout: Layout) -> Option<(usize, usize)> { let base_addr = (&*self as *const Self as *const u8) as usize; self.bitfield().first_fit(base_addr, layout, Self::SIZE, Self::METADATA_SIZE) } /// Tries to allocate an object within this page. /// /// In case the slab is full, returns a null ptr. fn allocate(&mut self, layout: Layout) -> *mut u8 { match self.first_fit(layout) { Some((idx, addr)) => { self.bitfield().set_bit(idx); addr as *mut u8 } None => ptr::null_mut(), } } /// Checks if we can still allocate more objects of a given layout within the page. fn is_full(&self) -> bool { self.bitfield().is_full() } /// Checks if the page has currently no allocations. fn is_empty(&self, relevant_bits: usize) -> bool { self.bitfield().all_free(relevant_bits) } /// Deallocates a memory object within this page. fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) -> Result<(), &'static str> { // trace!( // "AllocablePage deallocating ptr = {:p} with {:?}", // ptr, // layout // ); let page_offset = (ptr.as_ptr() as usize) & (Self::SIZE - 1); assert!(page_offset % layout.size() == 0); let idx = page_offset / layout.size(); assert!( self.bitfield().is_allocated(idx), "{:p} not marked allocated?", ptr ); self.bitfield().clear_bit(idx); Ok(()) } } /// Holds allocated data within 2 4-KiB pages. /// /// Has a data-section where objects are allocated from /// and a small amount of meta-data in form of a bitmap /// to track allocations at the end of the page. /// /// # Notes /// An object of this type will be exactly 8 KiB. /// It is marked `repr(C)` because we rely on a well defined order of struct /// members (e.g., dealloc does a cast to find the bitfield). #[repr(C)] pub struct ObjectPage8k<'a> { /// Holds memory objects. #[allow(dead_code)] data: [u8; ObjectPage8k::SIZE -ObjectPage8k::METADATA_SIZE], pub mp: MappedPages, pub heap_id: usize, /// Next element in list (used by `PageList`). next: Rawlink<ObjectPage8k<'a>>, /// Previous element in list (used by `PageList`) prev: Rawlink<ObjectPage8k<'a>>, /// A bit-field to track free/allocated memory within `data`. pub(crate) bitfield: [AtomicU64; 8], } // These needs some more work to be really safe... unsafe impl<'a> Send for ObjectPage8k<'a> {} unsafe impl<'a> Sync for ObjectPage8k<'a> {} impl<'a> AllocablePage for ObjectPage8k<'a> { const SIZE: usize = 8192; const METADATA_SIZE: usize = core::mem::size_of::<MappedPages>() + core::mem::size_of::<usize>() + (2*core::mem::size_of::<Rawlink<ObjectPage8k<'a>>>()) + (8*8); const HEAP_ID_OFFSET: usize = Self::SIZE - (core::mem::size_of::<usize>() + (2*core::mem::size_of::<Rawlink<ObjectPage8k<'a>>>()) + (8*8)); /// Creates a new 8KiB allocable page and stores the MappedPages object in the metadata portion. /// This function checks that the given mapped pages is aligned at a 8KiB boundary, writable and has a size of 8KiB. fn new(mp: MappedPages, heap_id: usize) -> Result<ObjectPage8k<'a>, &'static str> { let vaddr = mp.start_address().value(); if vaddr % Self::SIZE != 0 { error!("The mapped pages for the heap are not aligned at 8k bytes"); return Err("The mapped pages for the heap are not aligned at 8k bytes"); } // check that the mapped pages is writable if !mp.flags().is_writable() { error!("Tried to convert to an allocable page but MappedPages weren't writable (flags: {:?})", mp.flags()); return Err("Trying to create an allocable page but MappedPages were not writable"); } // check that the mapped pages size is equal in size to the page if Self::SIZE != mp.size_in_bytes() { error!("MappedPages of size {} cannot be converted to an allocable page", mp.size_in_bytes()); return Err("MappedPages size does not equal allocable page size"); } Ok( ObjectPage8k { data: [0; ObjectPage8k::SIZE -ObjectPage8k::METADATA_SIZE], mp: mp, heap_id: heap_id, next: Rawlink::default(), prev: Rawlink::default(), bitfield: [AtomicU64::new(0), AtomicU64::new(0), AtomicU64::new(0), AtomicU64::new(0), AtomicU64::new(0), AtomicU64::new(0), AtomicU64::new(0),AtomicU64::new(0) ], }) } /// Returns the MappedPages object that was stored in the metadata portion of the page, /// by swapping with an empty MappedPages object. /// /// Marked unsafe since it should only be used when the the AllocablePage it applies to is removed from the heap's linked list and isn't used again fn retrieve_mapped_pages(&mut self) -> MappedPages { let mut mp = MappedPages::empty(); core::mem::swap(&mut self.mp, &mut mp); mp } /// clears the metadata section of the page fn clear_metadata(&mut self) { self.heap_id = 0; self.next = Rawlink::default(); self.prev = Rawlink::default(); for bf in &self.bitfield { bf.store(0, Ordering::SeqCst); } } fn set_heap_id(&mut self, heap_id: usize){ self.heap_id = heap_id; } fn heap_id(&self) -> usize { self.heap_id } fn bitfield(&self) -> &[AtomicU64; 8] { &self.bitfield } fn bitfield_mut(&mut self) -> &mut [AtomicU64; 8] { &mut self.bitfield } fn prev(&mut self) -> &mut Rawlink<Self> { &mut self.prev } fn next(&mut self) -> &mut Rawlink<Self> { &mut self.next } fn buffer_size() -> usize { ObjectPage8k::SIZE - ObjectPage8k::METADATA_SIZE } } impl<'a> Default for ObjectPage8k<'a> { fn default() -> ObjectPage8k<'a> { unsafe { mem::MaybeUninit::zeroed().assume_init() } } } impl<'a> fmt::Debug for ObjectPage8k<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "ObjectPage8k") } } /// A list of pages. pub(crate) struct PageList<'a, T: AllocablePage> { /// Points to the head of the list. pub(crate) head: Option<&'a mut T>, /// Number of elements in the list. pub(crate) elements: usize, } impl<'a, T: AllocablePage> PageList<'a, T> { #[cfg(feature = "unstable")] pub(crate) const fn new() -> PageList<'a, T> { PageList { head: None, elements: 0, } } #[cfg(not(feature = "unstable"))] pub(crate) fn new() -> PageList<'a, T> { PageList { head: None, elements: 0, } } pub(crate) fn iter_mut<'b: 'a>(&mut self) -> ObjectPageIterMut<'b, T> { let m = match self.head { None => Rawlink::none(), Some(ref mut m) => Rawlink::some(*m), }; ObjectPageIterMut { head: m, phantom: core::marker::PhantomData, } } /// Inserts `new_head` at the front of the list. pub(crate) fn insert_front<'b>(&'b mut self, mut new_head: &'a mut T) { match self.head { None => { *new_head.prev() = Rawlink::none(); self.head = Some(new_head); } Some(ref mut head) => { *new_head.prev() = Rawlink::none(); *head.prev() = Rawlink::some(new_head); mem::swap(head, &mut new_head); *head.next() = Rawlink::some(new_head); } } self.elements += 1; } /// Removes `slab_page` from the list. pub(crate) fn remove_from_list(&mut self, slab_page: &mut T) { unsafe { match slab_page.prev().resolve_mut() { None => { self.head = slab_page.next().resolve_mut(); } Some(prev) => { *prev.next() = match slab_page.next().resolve_mut() { None => Rawlink::none(), Some(next) => Rawlink::some(next), }; } } match slab_page.next().resolve_mut() { None => (), Some(next) => { *next.prev() = match slab_page.prev().resolve_mut() { None => Rawlink::none(), Some(prev) => Rawlink::some(prev), }; } } } *slab_page.prev() = Rawlink::none(); *slab_page.next() = Rawlink::none(); self.elements -= 1; } /// Removes `slab_page` from the list. pub(crate) fn pop<'b>(&'b mut self) -> Option<&'a mut T> { match self.head { None => None, Some(ref mut head) => { let head_next = head.next(); let mut new_head = unsafe { head_next.resolve_mut() }; mem::swap(&mut self.head, &mut new_head); let _ = self.head.as_mut().map(|n| { *n.prev() = Rawlink::none(); }); self.elements -= 1; new_head.map(|node| { *node.prev() = Rawlink::none(); *node.next() = Rawlink::none(); node }) } } } /// Does the list contain `s`? pub(crate) fn contains(&mut self, s: *const T) -> bool { for slab_page in self.iter_mut() { if slab_page as *const T == s as *const T { return true; } } false } pub(crate) fn is_empty(&self) -> bool { self.elements == 0 } } /// Iterate over all the pages inside a slab allocator pub(crate) struct ObjectPageIterMut<'a, P: AllocablePage> { head: Rawlink<P>, phantom: core::marker::PhantomData<&'a P>, } impl<'a, P: AllocablePage + 'a> Iterator for ObjectPageIterMut<'a, P> { type Item = &'a mut P; #[inline] fn next(&mut self) -> Option<&'a mut P> { unsafe { self.head.resolve_mut().map(|next| { self.head = match next.next().resolve_mut() { None => Rawlink::none(), Some(ref mut sp) => Rawlink::some(*sp), }; next }) } } } /// Rawlink is a type like Option<T> but for holding a raw pointer. /// /// We use it to link AllocablePages together. You probably won't need /// to use this type if you're not implementing AllocablePage /// for a custom page-size. pub struct Rawlink<T> { p: *mut T, } impl<T> Default for Rawlink<T> { fn default() -> Self { Rawlink { p: ptr::null_mut() } } } impl<T> Rawlink<T> { /// Like Option::None for Rawlink pub(crate) fn none() -> Rawlink<T> { Rawlink { p: ptr::null_mut() } } /// Like Option::Some for Rawlink pub(crate) fn some(n: &mut T) -> Rawlink<T> { Rawlink { p: n } } /// Convert the `Rawlink` into an Option value /// /// **unsafe** because: /// /// - Dereference of raw pointer. /// - Returns reference of arbitrary lifetime. #[allow(dead_code)] pub(crate) unsafe fn resolve<'a>(&self) -> Option<&'a T> { self.p.as_ref() } /// Convert the `Rawlink` into an Option value /// /// **unsafe** because: /// /// - Dereference of raw pointer. /// - Returns reference of arbitrary lifetime. pub(crate) unsafe fn resolve_mut<'a>(&mut self) -> Option<&'a mut T> { self.p.as_mut() } /// Return the `Rawlink` and replace with `Rawlink::none()` #[allow(dead_code)] pub(crate) fn take(&mut self) -> Rawlink<T> { mem::replace(self, Rawlink::none()) } } // /// Holds allocated data within a 2 MiB page. // /// // /// Has a data-section where objects are allocated from // /// and a small amount of meta-data in form of a bitmap // /// to track allocations at the end of the page. // /// // /// # Notes // /// An object of this type will be exactly 2 MiB. // /// It is marked `repr(C)` because we rely on a well defined order of struct // /// members (e.g., dealloc does a cast to find the bitfield). // #[repr(C)] // pub struct LargeObjectPage<'a> { // /// Holds memory objects. // #[allow(dead_code)] // data: [u8; (2 * 1024 * 1024) - 80], // /// Next element in list (used by `PageList`). // next: Rawlink<LargeObjectPage<'a>>, // prev: Rawlink<LargeObjectPage<'a>>, // /// A bit-field to track free/allocated memory within `data`. // pub(crate) bitfield: [u64; 8], // } // // These needs some more work to be really safe... // unsafe impl<'a> Send for LargeObjectPage<'a> {} // unsafe impl<'a> Sync for LargeObjectPage<'a> {} // impl<'a> AllocablePage for LargeObjectPage<'a> { // const SIZE: usize = LARGE_PAGE_SIZE; // fn bitfield(&self) -> &[u64; 8] { // &self.bitfield // } // fn bitfield_mut(&mut self) -> &mut [u64; 8] { // &mut self.bitfield // } // fn prev(&mut self) -> &mut Rawlink<Self> { // &mut self.prev // } // fn next(&mut self) -> &mut Rawlink<Self> { // &mut self.next // } // } // impl<'a> Default for LargeObjectPage<'a> { // fn default() -> LargeObjectPage<'a> { // unsafe { mem::MaybeUninit::zeroed().assume_init() } // } // } // impl<'a> fmt::Debug for LargeObjectPage<'a> { // fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // write!(f, "LargeObjectPage") // } // } // /// Holds allocated data within a 4 KiB page. // /// // /// Has a data-section where objects are allocated from // /// and a small amount of meta-data in form of a bitmap // /// to track allocations at the end of the page. // /// // /// # Notes // /// An object of this type will be exactly 4 KiB. // /// It is marked `repr(C)` because we rely on a well defined order of struct // /// members (e.g., dealloc does a cast to find the bitfield). // #[repr(C)] // pub struct ObjectPage<'a> { // /// Holds memory objects. // #[allow(dead_code)] // data: [u8; 4096 - 88], // pub heap_id: usize, // /// Next element in list (used by `PageList`). // next: Rawlink<ObjectPage<'a>>, // /// Previous element in list (used by `PageList`) // prev: Rawlink<ObjectPage<'a>>, // /// A bit-field to track free/allocated memory within `data`. // pub(crate) bitfield: [u64; 8], // } // // These needs some more work to be really safe... // unsafe impl<'a> Send for ObjectPage<'a> {} // unsafe impl<'a> Sync for ObjectPage<'a> {} // impl<'a> AllocablePage for ObjectPage<'a> { // const SIZE: usize = BASE_PAGE_SIZE; // fn bitfield(&self) -> &[u64; 8] { // &self.bitfield // } // fn bitfield_mut(&mut self) -> &mut [u64; 8] { // &mut self.bitfield // } // fn prev(&mut self) -> &mut Rawlink<Self> { // &mut self.prev // } // fn next(&mut self) -> &mut Rawlink<Self> { // &mut self.next // } // } // impl<'a> Default for ObjectPage<'a> { // fn default() -> ObjectPage<'a> { // unsafe { mem::MaybeUninit::zeroed().assume_init() } // } // } // impl<'a> fmt::Debug for ObjectPage<'a> { // fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { // write!(f, "ObjectPage") // } // }
32.239193
175
0.554304
29f5164d31066e6785b954141f395fc5eb8f7a3d
13,219
use std::collections::{HashMap, HashSet}; use std::sync::Arc; use futures::lock::Mutex; use parking_lot::Mutex as PLMutex; use tokio::select; use super::manifest::*; use super::{DeleteVector, DiskRowset, StorageOptions, StorageResult}; /// The operations sent to the version manager. Compared with manifest entries, operations /// like `AddRowSet` needs to be associated with a `DiskRowSet` struct. pub enum EpochOp { CreateTable(CreateTableEntry), DropTable(DropTableEntry), AddRowSet((AddRowSetEntry, DiskRowset)), DeleteRowSet(DeleteRowsetEntry), AddDV((AddDVEntry, DeleteVector)), DeleteDV(DeleteDVEntry), } /// We store the full information of a snapshot in one `Snapshot` object. In the future, we should /// implement a MVCC structure for this. #[derive(Clone, Default)] pub struct Snapshot { /// RowSet IDs in this snapshot. We **only store ID** in snapshot, we need to get the actual /// objects from version manager later. rowsets: HashMap<u32, HashSet<u32>>, /// DVs in this snapshot. dvs: HashMap<u32, HashMap<u32, HashSet<u64>>>, } impl Snapshot { pub fn add_rowset(&mut self, table_id: u32, rowset_id: u32) { self.rowsets.entry(table_id).or_default().insert(rowset_id); } pub fn delete_rowset(&mut self, table_id: u32, rowset_id: u32) { let table = self.rowsets.get_mut(&table_id).unwrap(); table.remove(&rowset_id); if table.is_empty() { self.rowsets.remove(&table_id); } } pub fn add_dv(&mut self, table_id: u32, rowset_id: u32, dv_id: u64) { self.dvs .entry(table_id) .or_default() .entry(rowset_id) .or_default() .insert(dv_id); } pub fn delete_dv(&mut self, table_id: u32, rowset_id: u32, dv_id: u64) { let table = self.dvs.get_mut(&table_id).unwrap(); let dvs = table.get_mut(&rowset_id).unwrap(); dvs.remove(&dv_id); if dvs.is_empty() { table.remove(&rowset_id); } if table.is_empty() { self.dvs.remove(&table_id); } } pub fn get_dvs_of(&self, table_id: u32, rowset_id: u32) -> Option<&HashSet<u64>> { if let Some(rowset) = self.dvs.get(&table_id) { if let Some(dvs) = rowset.get(&rowset_id) { return Some(dvs); } } None } pub fn get_rowsets_of(&self, table_id: u32) -> Option<&HashSet<u32>> { if let Some(rowset) = self.rowsets.get(&table_id) { return Some(rowset); } None } } #[derive(Default)] pub struct VersionManagerInner { /// To make things easy, we store the full snapshot of each epoch. In the future, we will use a /// MVCC structure for this, and only record changes compared with last epoch. status: HashMap<u64, Arc<Snapshot>>, /// (TableId, RowSetId) -> Object mapping rowsets: HashMap<(u32, u32), Arc<DiskRowset>>, /// (TableId, DVId) -> Object mapping dvs: HashMap<(u32, u64), Arc<DeleteVector>>, /// Reference count of each epoch. ref_cnt: HashMap<u64, usize>, /// Deletion to apply in each epoch. rowset_deletion_to_apply: HashMap<u64, Vec<(u32, u32)>>, /// Current epoch number. epoch: u64, } /// Manages the state history of the storage engine and vacuum the stale files on disk. /// /// Generally, when a transaction starts, it will take a snapshot and store the state of the /// merge-tree at the time of starting. As the txn is running, new RowSets are added and old RowSets /// will no longer be used. So how do we know that we can safely remove a RowSet file? /// /// [`VersionManager`] manages all RowSets in a multi-version way. Everytime there are some /// changes in the storage engine, [`VersionManager`] should be notified about this change, /// and handle out a epoch number for that change. For example, /// /// * (epoch 0) RowSet 1, 2 /// * (engine) add RowSet 3, remove RowSet 1 /// * (epoch 1) RowSet 2, 3 /// /// Each history state will be associated with an epoch number, which will be used by /// snapshots. When a snapshot is taken, it will "pin" an epoch number. RowSets logically /// deleted after that epoch won't be deleted physically until the snapshot "unpins" the /// epoch number. /// /// Therefore, [`VersionManager`] is the manifest manager of the whole storage system, /// which reads and writes manifest, manages all on-disk files and vacuum them when no /// snapshot holds the corresponding epoch of the file. /// /// The design choice of separating [`VersionManager`] out of the storage engine is a /// preparation for a distributed storage engine. In such distributed engine, there will /// generally be some kind of `MetadataManager` which does all of the things that our /// [`VersionManager`] do. pub struct VersionManager { /// Inner structure of `VersionManager`. This structure is protected by a parking lot Mutex, so /// as to support quick lock and unlock. inner: PLMutex<VersionManagerInner>, /// Manifest file. We only allow one thread to commit changes, and `commit_changes` will hold /// this lock until complete. As the commit procedure involves async waiting, we need to use an /// async lock. manifest: Mutex<Manifest>, /// Notify the vacuum to apply changes from one epoch. tx: tokio::sync::mpsc::UnboundedSender<()>, /// Receiver of the vacuum. rx: PLMutex<Option<tokio::sync::mpsc::UnboundedReceiver<()>>>, /// Storage options storage_options: Arc<StorageOptions>, } impl VersionManager { pub fn new(manifest: Manifest, storage_options: Arc<StorageOptions>) -> Self { let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); Self { manifest: Mutex::new(manifest), inner: PLMutex::new(VersionManagerInner::default()), tx, rx: PLMutex::new(Some(rx)), storage_options, } } /// Commit changes and return a new epoch number pub async fn commit_changes(&self, ops: Vec<EpochOp>) -> StorageResult<u64> { // Hold the manifest lock so that no one else could commit changes. let mut manifest = self.manifest.lock().await; let mut snapshot; let mut entries; let current_epoch; let mut rowset_deletion_to_apply = vec![]; { // Hold the inner lock, so as to apply the changes to the current status, and add new // RowSet and DVs to the pool. This lock is released before persisting entries to // manifest. let mut inner = self.inner.lock(); // Save the current epoch for later integrity check. current_epoch = inner.epoch; // Get snapshot of latest version. snapshot = inner .status .get(&current_epoch) .map(|x| x.as_ref().clone()) .unwrap_or_default(); // Store entries to be committed into the manifest entries = Vec::with_capacity(ops.len()); for op in ops { match op { // For catalog operations, just leave it as-is. The version manager currently // doesn't create MVCC map for catalog operations, and // doesn't not provide interface to access them. EpochOp::CreateTable(entry) => { entries.push(ManifestOperation::CreateTable(entry)) } EpochOp::DropTable(entry) => entries.push(ManifestOperation::DropTable(entry)), // For other operations, maintain the snapshot in version manager EpochOp::AddRowSet((entry, rowset)) => { // record the rowset into the pool inner .rowsets .insert((entry.table_id.table_id, entry.rowset_id), Arc::new(rowset)); // update the snapshot snapshot.add_rowset(entry.table_id.table_id, entry.rowset_id); entries.push(ManifestOperation::AddRowSet(entry)); } EpochOp::DeleteRowSet(entry) => { rowset_deletion_to_apply.push((entry.table_id.table_id, entry.rowset_id)); snapshot.delete_rowset(entry.table_id.table_id, entry.rowset_id); entries.push(ManifestOperation::DeleteRowSet(entry)); } EpochOp::AddDV((entry, dv)) => { // record the DV into the pool inner .dvs .insert((entry.table_id.table_id, entry.dv_id), Arc::new(dv)); // update the snapshot snapshot.add_dv(entry.table_id.table_id, entry.rowset_id, entry.dv_id); entries.push(ManifestOperation::AddDV(entry)); } EpochOp::DeleteDV(entry) => { // TODO: record delete op and apply it later snapshot.delete_dv(entry.table_id.table_id, entry.rowset_id, entry.dv_id); entries.push(ManifestOperation::DeleteDV(entry)); } } } } // Persist the change onto the disk. manifest.append(&entries).await?; // Add epoch number and make the modified snapshot available. let mut inner = self.inner.lock(); assert_eq!(inner.epoch, current_epoch); inner.epoch += 1; let epoch = inner.epoch; inner.status.insert(epoch, Arc::new(snapshot)); inner .rowset_deletion_to_apply .insert(epoch, rowset_deletion_to_apply); Ok(epoch) } /// Pin a snapshot of one epoch, so that all files at this epoch won't be deleted. pub fn pin(&self) -> (u64, Arc<Snapshot>) { let mut inner = self.inner.lock(); let epoch = inner.epoch; *inner.ref_cnt.entry(epoch).or_default() += 1; (epoch, inner.status.get(&epoch).unwrap().clone()) } /// Unpin a snapshot of one epoch. When reference counter becomes 0, files might be vacuumed. pub fn unpin(&self, epoch: u64) { let mut inner = self.inner.lock(); let ref_cnt = inner .ref_cnt .get_mut(&epoch) .expect("epoch not registered!"); *ref_cnt -= 1; if *ref_cnt == 0 { inner.ref_cnt.remove(&epoch).unwrap(); if epoch != inner.epoch { // TODO: precisely pass the epoch number that can be vacuum. self.tx.send(()).unwrap(); } } } pub fn get_rowset(&self, table_id: u32, rowset_id: u32) -> Arc<DiskRowset> { let inner = self.inner.lock(); inner.rowsets.get(&(table_id, rowset_id)).unwrap().clone() } pub fn get_dv(&self, table_id: u32, dv_id: u64) -> Arc<DeleteVector> { let inner = self.inner.lock(); inner.dvs.get(&(table_id, dv_id)).unwrap().clone() } pub async fn find_vacuum(self: &Arc<Self>) -> StorageResult<Vec<(u32, u32)>> { let mut inner = self.inner.lock(); let min_pinned_epoch = inner.ref_cnt.keys().min().cloned(); // If there is no pinned epoch, all deletions can be applied. let vacuum_epoch = min_pinned_epoch.unwrap_or(inner.epoch); let can_apply = |epoch, vacuum_epoch| epoch <= vacuum_epoch; // Fetch to-be-applied deletions. let mut deletions = vec![]; for (epoch, deletion) in &inner.rowset_deletion_to_apply { if can_apply(*epoch, vacuum_epoch) { deletions.extend(deletion.iter().cloned()); } } inner .rowset_deletion_to_apply .retain(|k, _| !can_apply(*k, vacuum_epoch)); for deletion in &deletions { let rowset = inner.rowsets.remove(deletion).unwrap(); match Arc::try_unwrap(rowset) { Ok(rowset) => drop(rowset), Err(_) => panic!("rowset {:?} is still being used", deletion), } } Ok(deletions) } pub async fn do_vacuum(self: &Arc<Self>) -> StorageResult<()> { let deletions = self.find_vacuum().await?; for (table_id, rowset_id) in deletions { let path = self .storage_options .path .join(format!("{}_{}", table_id, rowset_id)); info!("vacuum {}_{}", table_id, rowset_id); tokio::fs::remove_dir_all(path).await?; } Ok(()) } pub async fn run( self: &Arc<Self>, mut stop: tokio::sync::mpsc::UnboundedReceiver<()>, ) -> StorageResult<()> { let mut vacuum_notifier = self.rx.lock().take().unwrap(); loop { select! { Some(_) = vacuum_notifier.recv() => self.do_vacuum().await?, Some(_) = stop.recv() => break } } Ok(()) } }
37.985632
100
0.586126
d7fd2b3441e85ec3eb2aca7bcaf7a71178825b8e
17,293
// Copyright (c) 2018-2022 The MobileCoin Foundation //! Database storage for monitors //! * Provides monitor configuration and status from MonitorId. //! * MonitorId is a hash of the instantiation parameters. use crate::{database_key::DatabaseByteArrayKey, db_crypto::DbCryptoProvider, error::Error}; use lmdb::{Cursor, Database, DatabaseFlags, Environment, RwTransaction, Transaction, WriteFlags}; use mc_account_keys::AccountKey; use mc_common::{ logger::{log, Logger}, HashMap, }; use mc_crypto_digestible::{Digestible, MerlinTranscript}; use mc_crypto_keys::RistrettoPublic; use mc_util_serial::Message; use std::{convert::TryFrom, ops::Range, sync::Arc}; // LMDB Database Names pub const MONITOR_ID_TO_MONITOR_DATA_DB_NAME: &str = "mobilecoind_db:monitor_store:monitor_id_to_monitor_data"; /// Type used as the stored data in the monitor_id_to_monitor_data database. #[derive(Clone, Eq, Hash, PartialEq, Message)] pub struct MonitorData { /// The private key pair for the account this monitor watches. #[prost(message, required, tag = "1")] pub account_key: AccountKey, /// The smallest subaddress index in the range this monitor watches. #[prost(uint64, tag = "2")] pub first_subaddress: u64, /// The number of subaddresses this monitor watches. #[prost(uint64, tag = "3")] pub num_subaddresses: u64, /// The first block this monitor should process. #[prost(uint64, tag = "4")] pub first_block: u64, /// The next block this monitor needs to process. #[prost(uint64, tag = "5")] pub next_block: u64, /// Optional monitor name. #[prost(string, tag = "6")] pub name: String, } impl MonitorData { pub fn new( account_key: AccountKey, first_subaddress: u64, num_subaddresses: u64, first_block: u64, name: &str, ) -> Result<Self, Error> { if num_subaddresses == 0 { return Err(Error::InvalidArgument( "num_subaddresses".to_string(), "must be greater than zero".to_string(), )); } Ok(Self { account_key, first_subaddress, num_subaddresses, first_block, // The next block we need to sync is our first block. next_block: first_block, name: name.to_owned(), }) } pub fn subaddress_indexes(&self) -> Range<u64> { self.first_subaddress..self.first_subaddress + self.num_subaddresses } } /// Type used as the key in the monitor_id_to_monitor_data database pub type MonitorId = DatabaseByteArrayKey; impl From<&MonitorData> for MonitorId { // When constructing a MonitorId from a given MonitorData object we only want to // hash the data that doesn't change over time. // Name isn't included here - two monitors with identical address/subaddress // range/first_block should have the same id even if they have a different // name, fn from(src: &MonitorData) -> MonitorId { // The structure of mc_account_keys::PublicAddress changed when the fog // signature scheme was implemented. This re-implements the original // structure in order to maintain a consistent hash in the database. // // The never_omit attributes are needed because of a change in the digestible // crate that now omits empty strings/vectors by default. // // This should eventually be removed. #[derive(Debug, Digestible)] struct PublicAddress { view_public_key: RistrettoPublic, spend_public_key: RistrettoPublic, #[digestible(never_omit)] fog_report_url: String, #[digestible(never_omit)] fog_report_id: String, #[digestible(never_omit)] fog_authority_fingerprint_sig: Vec<u8>, } #[derive(Debug, Digestible)] struct ConstMonitorData { // We use PublicAddress and not AccountKey so that the monitor_id is not sensitive. pub address: PublicAddress, pub first_subaddress: u64, pub num_subaddresses: u64, pub first_block: u64, } let real_subaddress = src.account_key.default_subaddress(); let const_data = ConstMonitorData { address: PublicAddress { view_public_key: *real_subaddress.view_public_key(), spend_public_key: *real_subaddress.spend_public_key(), fog_report_url: real_subaddress .fog_report_url() .unwrap_or_default() .to_owned(), fog_report_id: real_subaddress .fog_report_id() .unwrap_or_default() .to_owned(), fog_authority_fingerprint_sig: real_subaddress .fog_authority_sig() .unwrap_or_default() .to_vec(), }, first_subaddress: src.first_subaddress, num_subaddresses: src.num_subaddresses, first_block: src.first_block, }; let temp: [u8; 32] = const_data.digest32::<MerlinTranscript>(b"monitor_data"); Self::from(temp) } } /// Wrapper for the monitor_id_to_monitor_data database #[derive(Clone)] pub struct MonitorStore { /// Retain a reference to the Environment so the Database handles are valid. _env: Arc<Environment>, /// Crypto provider, used for managing database encryption. crypto_provider: DbCryptoProvider, /// Mapping of MonitorId -> MonitorData monitor_id_to_monitor_data: Database, /// Logger. logger: Logger, } /// A DB mapping account IDs to keys impl MonitorStore { pub fn new( env: Arc<Environment>, crypto_provider: DbCryptoProvider, logger: Logger, ) -> Result<Self, Error> { let monitor_id_to_monitor_data = env.create_db( Some(MONITOR_ID_TO_MONITOR_DATA_DB_NAME), DatabaseFlags::empty(), )?; Ok(Self { _env: env, crypto_provider, monitor_id_to_monitor_data, logger, }) } /// Add a new monitor. pub fn add<'env>( &self, db_txn: &mut RwTransaction<'env>, data: &MonitorData, ) -> Result<MonitorId, Error> { let monitor_id = MonitorId::from(data); let key_bytes = monitor_id.as_bytes(); let value_bytes = self .crypto_provider .encrypt(&mc_util_serial::encode(data))?; log::trace!(self.logger, "adding new monitor {}: {:?}", monitor_id, data); match db_txn.put( self.monitor_id_to_monitor_data, key_bytes, &value_bytes, WriteFlags::NO_OVERWRITE, ) { Ok(_) => Ok(monitor_id), Err(lmdb::Error::KeyExist) => Err(Error::MonitorIdExists), Err(err) => Err(err.into()), } } /// Delete data for a given monitor. pub fn remove<'env>( &self, db_txn: &mut RwTransaction<'env>, monitor_id: &MonitorId, ) -> Result<(), Error> { db_txn.del(self.monitor_id_to_monitor_data, monitor_id, None)?; Ok(()) } /// Get the MonitorData for a given `monitor_id`. pub fn get_data( &self, db_txn: &impl Transaction, monitor_id: &MonitorId, ) -> Result<MonitorData, Error> { match db_txn.get(self.monitor_id_to_monitor_data, monitor_id) { Ok(value_bytes) => { let value_bytes = self.crypto_provider.decrypt(value_bytes)?; let data: MonitorData = mc_util_serial::decode(&value_bytes)?; Ok(data) } Err(lmdb::Error::NotFound) => Err(Error::MonitorIdNotFound), Err(err) => Err(Error::Lmdb(err)), } } /// Get a hashmap of all MonitorId -> MonitorData. pub fn get_map( &self, db_txn: &impl Transaction, ) -> Result<HashMap<MonitorId, MonitorData>, Error> { let mut cursor = db_txn.open_ro_cursor(self.monitor_id_to_monitor_data)?; cursor .iter() .map(|result| { result .map_err(Error::from) .and_then(|(key_bytes, value_bytes)| { let monitor_id = MonitorId::try_from(key_bytes) .map_err(|_| Error::KeyDeserialization)?; let value_bytes = self.crypto_provider.decrypt(value_bytes)?; let data: MonitorData = mc_util_serial::decode(&value_bytes)?; Ok((monitor_id, data)) }) }) .collect::<Result<HashMap<_, _>, Error>>() } /// Get a list of all MonitorIds in database. pub fn get_ids(&self, db_txn: &impl Transaction) -> Result<Vec<MonitorId>, Error> { let mut cursor = db_txn.open_ro_cursor(self.monitor_id_to_monitor_data)?; cursor .iter() .map(|result| { result .map_err(Error::from) .and_then(|(key_bytes, _value_bytes)| { MonitorId::try_from(key_bytes).map_err(|_| Error::KeyDeserialization) }) }) .collect::<Result<Vec<_>, Error>>() } /// Set the MonitorData for an existing monitor pub fn set_data<'env>( &self, db_txn: &mut RwTransaction<'env>, monitor_id: &MonitorId, data: &MonitorData, ) -> Result<(), Error> { let key_bytes = monitor_id.to_vec(); match db_txn.get(self.monitor_id_to_monitor_data, &key_bytes) { Ok(_value_bytes) => { let new_value_bytes = self .crypto_provider .encrypt(&mc_util_serial::encode(data))?; db_txn.put( self.monitor_id_to_monitor_data, &key_bytes, &new_value_bytes, WriteFlags::empty(), )?; Ok(()) } Err(lmdb::Error::NotFound) => Err(Error::MonitorIdNotFound), Err(err) => Err(Error::Lmdb(err)), } } /// Re-encrypt the encrypted parts of the database with a new password. /// This will fail if the current password is not set in the crypto_provider /// since part of the re-encryption process relies on being able to /// decrypt the existing data. pub fn re_encrypt<'env>( &self, db_txn: &mut RwTransaction<'env>, new_password: &[u8], ) -> Result<(), Error> { let mut cursor = db_txn.open_rw_cursor(self.monitor_id_to_monitor_data)?; for (key_bytes, value_bytes) in cursor.iter().filter_map(|r| r.ok()) { let decrypted_bytes = self.crypto_provider.decrypt(value_bytes)?; let encrypted_bytes = self .crypto_provider .encrypt_with_password(new_password, &decrypted_bytes)?; cursor.put(&key_bytes, &encrypted_bytes, WriteFlags::CURRENT)?; } Ok(()) } } #[cfg(test)] mod test { use super::*; use crate::{ error::Error, test_utils::{get_test_databases, get_test_monitor_data_and_id, BlockVersion}, }; use mc_account_keys::RootIdentity; use mc_common::logger::{test_with_logger, Logger}; use mc_util_from_random::FromRandom; use rand_chacha::ChaChaRng; use rand_core::SeedableRng; use std::{assert_matches::assert_matches, collections::HashSet}; /// A randomly generated RSA subjectPublicKeyInfo, used as a fog authority. const AUTHORITY_PUBKEY: &str = r"-----BEGIN PUBLIC KEY----- MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAobfcLcLdKL3O4d1XOLE6 lGgcFOKZHsXT2Pbh+NF14EEwMCpvPiaOwfuLvycItdE3P2K+725B2CiAJdurx5yj 8ctc1M0N+Hed0vkO6R9FtYFLTZVPipTLqc03iowZALfqV6M0b3POXMyEMLTC14B0 wYerb58o1uACwmCzt5lXGdL3ZbiMZ+y8GdCIBEeqLHYpyC5nXg0L9U5EsYfUuYkN tDZT6zE7/D+tWYArLtnRMBw4h3sPgKNWbu6wMDnBpiWXTKHsaJS3sfthlyLL0gyX lb3gVdL7kBpUTTLGXE96VjojmPwM34+qNu4B39wLWhUuQ9ugjeDK1mMfYMJvVydm nqH0WdmPFprsiYxMQgioP3mCThKcKGBBbdn3Ii8ZtFQN/NM8WteLgmUVZQ+fwF4G L1OWnw6IEnHa8a0Shh8t8DGUl2dFjp8YCjOgyk0VqPGkD3c1Z6j95BZEDXSCziYj C17bXAtQjU1ra+Uxg/e2vaEn7r8lzvPs/Iyc8Y8zt8eHRWgSr14trvxJRQhvXwwp iX3vQok+sdmBmOS0Ox6nL4LLbnMxNkJ6c1P+LKE5eqz4oiShLDVCgWsdWyQSMuJU pa4ba4HyA6JNtKvb8sk2CYXrBtp3PlBwclBOxSEAZDVq82o6dJ31MklpF0EG1y8C pKZkdp8MQU5TLFOE9qjNeVsCAwEAAQ== -----END PUBLIC KEY-----"; /// Ensure the monitor ID for a test-vector key has not changed. #[test] fn monitor_id_stability() { /// The constant output by mobilecoind when the 1.0.1 release has been /// patched with stability-1.0.1.diff from the root of this tree. const HEXPECTED: &str = r"cd57649f325d525cf96120dd303ab3bba6d15071861425c62fad6949335cc604"; /// The fog output by mobilecoind when the 1.0.1 release has been /// patched with stability-1.0.1.diff from the root of this tree. const FOG_HEXPECTED: &str = r"e4bc6cd685d5b272e5a34c6b0aacf820029ad108df0007c46b0df1ba645107e5"; let mut rng = ChaChaRng::seed_from_u64(0); let identity = RootIdentity::from_random(&mut rng); let key = AccountKey::try_from(&identity) .expect("Could not create account key from non-fog identity"); let data = MonitorData::new(key, 1, 10, 1, "test").expect("Could not create monitor data"); let id = MonitorId::from(&data); let expected = hex::decode(HEXPECTED).expect("Could not decode expected data to bytes"); assert_eq!(expected, id.as_bytes().to_vec(), "{}", hex_fmt::HexFmt(id)); let fog_authority_spki = pem::parse(AUTHORITY_PUBKEY) .expect("Could not parse pubkey") .contents; let fog_identity = RootIdentity::random_with_fog( &mut rng, "fog://fog.unittest.mobilecoin.com", "", &fog_authority_spki, ); let fog_key = AccountKey::from(&fog_identity); let fog_data = MonitorData::new(fog_key, 10, 100, 10, "fog test") .expect("Could not create monitor data"); let fog_id = MonitorId::from(&fog_data); let fog_expected = hex::decode(FOG_HEXPECTED).expect("Could not decode expected data to bytes"); assert_eq!( fog_expected, fog_id.as_bytes().to_vec(), "{}/{}", FOG_HEXPECTED, HEXPECTED ); } // MonitorStore basic functionality tests #[test_with_logger] fn test_monitor_store(logger: Logger) { let mut rng = ChaChaRng::from_seed([123u8; 32]); // Set up a db with 3 random recipients and 10 blocks. let (_ledger_db, mobilecoind_db) = get_test_databases(BlockVersion::MAX, 3, &[], 10, logger.clone(), &mut rng); // Check that there are no monitors yet. assert_eq!( mobilecoind_db .get_monitor_map() .expect("failed to get empty map") .keys() .cloned() .collect::<Vec<MonitorId>>(), vec![] ); log::trace!(logger, "confirmed database was created with no monitors"); // Insert the monitors and check that that they appear in the db. let (mut monitor_data0, monitor_id0) = get_test_monitor_data_and_id(&mut rng); let (monitor_data1, monitor_id1) = get_test_monitor_data_and_id(&mut rng); let (_monitor_data, monitor_id2) = get_test_monitor_data_and_id(&mut rng); monitor_data0.name = "test name".to_owned(); let _ = mobilecoind_db .add_monitor(&monitor_data0) .expect("failed inserting monitor 0"); assert_eq!( mobilecoind_db .get_monitor_map() .expect("failed to get map") .keys() .cloned() .collect::<Vec<MonitorId>>(), vec![monitor_id0] ); let _ = mobilecoind_db .add_monitor(&monitor_data1) .expect("failed inserting monitor 1"); assert_eq!( mobilecoind_db .get_monitor_map() .expect("failed to get map") .keys() .cloned() .collect::<HashSet<_>>(), HashSet::from([monitor_id0, monitor_id1]) ); // Check that monitor data is recoverable. assert_eq!( mobilecoind_db .get_monitor_data(&monitor_id1) .expect("failed getting monitor data 1"), monitor_data1 ); assert_eq!( mobilecoind_db .get_monitor_data(&monitor_id0) .expect("failed getting monitor data 0"), monitor_data0 ); // monitor_id2 was never inserted into the database, so getting its data should // fail. assert_matches!( mobilecoind_db.get_monitor_data(&monitor_id2), Err(Error::MonitorIdNotFound) ); } }
35.877593
100
0.602382
d7519c6d0936c186490cc0ba43f3f81e8c78d590
68,943
use core::borrow::Borrow; use core::cmp::Ordering; use core::fmt::{self, Debug}; use core::hash::{Hash, Hasher}; use core::iter::{FromIterator, FusedIterator}; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop}; use core::ops::{Index, RangeBounds}; use core::ptr; use super::borrow::DormantMutRef; use super::navigate::LeafRange; use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root}; use super::search::SearchResult::*; mod entry; pub use entry::{Entry, OccupiedEntry, OccupiedError, VacantEntry}; use Entry::*; /// Minimum number of elements in nodes that are not a root. /// We might temporarily have fewer elements during methods. pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT; // A tree in a `BTreeMap` is a tree in the `node` module with additional invariants: // - Keys must appear in ascending order (according to the key's type). // - If the root node is internal, it must contain at least 1 element. // - Every non-root node contains at least MIN_LEN elements. // // An empty map may be represented both by the absence of a root node or by a // root node that is an empty leaf. /// A map based on a [B-Tree]. /// /// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing /// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal /// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of /// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this /// is done is *very* inefficient for modern computer architectures. In particular, every element /// is stored in its own individually heap-allocated node. This means that every single insertion /// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these /// are both notably expensive things to do in practice, we are forced to at very least reconsider /// the BST strategy. /// /// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing /// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in /// searches. However, this does mean that searches will have to do *more* comparisons on average. /// The precise number of comparisons depends on the node search strategy used. For optimal cache /// efficiency, one could search the nodes linearly. For optimal comparisons, one could search /// the node using binary search. As a compromise, one could also perform a linear search /// that initially only checks every i<sup>th</sup> element for some choice of i. /// /// Currently, our implementation simply performs naive linear search. This provides excellent /// performance on *small* nodes of elements which are cheap to compare. However in the future we /// would like to further explore choosing the optimal search strategy based on the choice of B, /// and possibly other factors. Using linear search, searching for a random element is expected /// to take O(B * log(n)) comparisons, which is generally worse than a BST. In practice, /// however, performance is excellent. /// /// It is a logic error for a key to be modified in such a way that the key's ordering relative to /// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is /// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. /// The behavior resulting from such a logic error is not specified, but will not result in /// undefined behavior. This could include panics, incorrect results, aborts, memory leaks, and /// non-termination. /// /// [B-Tree]: https://en.wikipedia.org/wiki/B-tree /// [`Cell`]: core::cell::Cell /// [`RefCell`]: core::cell::RefCell /// /// # Examples /// /// ``` /// use std::collections::BTreeMap; /// /// // type inference lets us omit an explicit type signature (which /// // would be `BTreeMap<&str, &str>` in this example). /// let mut movie_reviews = BTreeMap::new(); /// /// // review some movies. /// movie_reviews.insert("Office Space", "Deals with real issues in the workplace."); /// movie_reviews.insert("Pulp Fiction", "Masterpiece."); /// movie_reviews.insert("The Godfather", "Very enjoyable."); /// movie_reviews.insert("The Blues Brothers", "Eye lyked it a lot."); /// /// // check for a specific one. /// if !movie_reviews.contains_key("Les Misérables") { /// println!("We've got {} reviews, but Les Misérables ain't one.", /// movie_reviews.len()); /// } /// /// // oops, this review has a lot of spelling mistakes, let's delete it. /// movie_reviews.remove("The Blues Brothers"); /// /// // look up the values associated with some keys. /// let to_find = ["Up!", "Office Space"]; /// for movie in &to_find { /// match movie_reviews.get(movie) { /// Some(review) => println!("{}: {}", movie, review), /// None => println!("{} is unreviewed.", movie) /// } /// } /// /// // Look up the value for a key (will panic if the key is not found). /// println!("Movie review: {}", movie_reviews["Office Space"]); /// /// // iterate over everything. /// for (movie, review) in &movie_reviews { /// println!("{}: \"{}\"", movie, review); /// } /// ``` /// /// `BTreeMap` also implements an [`Entry API`], which allows for more complex /// methods of getting, setting, updating and removing keys and their values: /// /// [`Entry API`]: BTreeMap::entry /// /// ``` /// use std::collections::BTreeMap; /// /// // type inference lets us omit an explicit type signature (which /// // would be `BTreeMap<&str, u8>` in this example). /// let mut player_stats = BTreeMap::new(); /// /// fn random_stat_buff() -> u8 { /// // could actually return some random value here - let's just return /// // some fixed value for now /// 42 /// } /// /// // insert a key only if it doesn't already exist /// player_stats.entry("health").or_insert(100); /// /// // insert a key using a function that provides a new value only if it /// // doesn't already exist /// player_stats.entry("defence").or_insert_with(random_stat_buff); /// /// // update a key, guarding against the key possibly not being set /// let stat = player_stats.entry("attack").or_insert(100); /// *stat += random_stat_buff(); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(not(test), rustc_diagnostic_item = "BTreeMap")] pub struct BTreeMap<K, V> { root: Option<Root<K, V>>, length: usize, } #[stable(feature = "btree_drop", since = "1.7.0")] unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for BTreeMap<K, V> { fn drop(&mut self) { if let Some(root) = self.root.take() { Dropper { front: root.into_dying().first_leaf_edge(), remaining_length: self.length }; } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Clone, V: Clone> Clone for BTreeMap<K, V> { fn clone(&self) -> BTreeMap<K, V> { fn clone_subtree<'a, K: Clone, V: Clone>( node: NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>, ) -> BTreeMap<K, V> where K: 'a, V: 'a, { match node.force() { Leaf(leaf) => { let mut out_tree = BTreeMap { root: Some(Root::new()), length: 0 }; { let root = out_tree.root.as_mut().unwrap(); // unwrap succeeds because we just wrapped let mut out_node = match root.borrow_mut().force() { Leaf(leaf) => leaf, Internal(_) => unreachable!(), }; let mut in_edge = leaf.first_edge(); while let Ok(kv) = in_edge.right_kv() { let (k, v) = kv.into_kv(); in_edge = kv.right_edge(); out_node.push(k.clone(), v.clone()); out_tree.length += 1; } } out_tree } Internal(internal) => { let mut out_tree = clone_subtree(internal.first_edge().descend()); { let out_root = BTreeMap::ensure_is_owned(&mut out_tree.root); let mut out_node = out_root.push_internal_level(); let mut in_edge = internal.first_edge(); while let Ok(kv) = in_edge.right_kv() { let (k, v) = kv.into_kv(); in_edge = kv.right_edge(); let k = (*k).clone(); let v = (*v).clone(); let subtree = clone_subtree(in_edge.descend()); // We can't destructure subtree directly // because BTreeMap implements Drop let (subroot, sublength) = unsafe { let subtree = ManuallyDrop::new(subtree); let root = ptr::read(&subtree.root); let length = subtree.length; (root, length) }; out_node.push(k, v, subroot.unwrap_or_else(Root::new)); out_tree.length += 1 + sublength; } } out_tree } } } if self.is_empty() { // Ideally we'd call `BTreeMap::new` here, but that has the `K: // Ord` constraint, which this method lacks. BTreeMap { root: None, length: 0 } } else { clone_subtree(self.root.as_ref().unwrap().reborrow()) // unwrap succeeds because not empty } } } impl<K, Q: ?Sized> super::Recover<Q> for BTreeMap<K, ()> where K: Borrow<Q> + Ord, Q: Ord, { type Key = K; fn get(&self, key: &Q) -> Option<&K> { let root_node = self.root.as_ref()?.reborrow(); match root_node.search_tree(key) { Found(handle) => Some(handle.into_kv().0), GoDown(_) => None, } } fn take(&mut self, key: &Q) -> Option<K> { let (map, dormant_map) = DormantMutRef::new(self); let root_node = map.root.as_mut()?.borrow_mut(); match root_node.search_tree(key) { Found(handle) => { Some(OccupiedEntry { handle, dormant_map, _marker: PhantomData }.remove_kv().0) } GoDown(_) => None, } } fn replace(&mut self, key: K) -> Option<K> { let (map, dormant_map) = DormantMutRef::new(self); let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut(); match root_node.search_tree::<K>(&key) { Found(mut kv) => Some(mem::replace(kv.key_mut(), key)), GoDown(handle) => { VacantEntry { key, handle, dormant_map, _marker: PhantomData }.insert(()); None } } } } /// An iterator over the entries of a `BTreeMap`. /// /// This `struct` is created by the [`iter`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`iter`]: BTreeMap::iter #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, K: 'a, V: 'a> { range: Range<'a, K, V>, length: usize, } #[stable(feature = "collection_debug", since = "1.17.0")] impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A mutable iterator over the entries of a `BTreeMap`. /// /// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`iter_mut`]: BTreeMap::iter_mut #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug)] pub struct IterMut<'a, K: 'a, V: 'a> { range: RangeMut<'a, K, V>, length: usize, } /// An owning iterator over the entries of a `BTreeMap`. /// /// This `struct` is created by the [`into_iter`] method on [`BTreeMap`] /// (provided by the `IntoIterator` trait). See its documentation for more. /// /// [`into_iter`]: IntoIterator::into_iter #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter<K, V> { range: LeafRange<marker::Dying, K, V>, length: usize, } impl<K, V> IntoIter<K, V> { /// Returns an iterator of references over the remaining items. #[inline] pub(super) fn iter(&self) -> Iter<'_, K, V> { let range = Range { inner: self.range.reborrow() }; Iter { range: range, length: self.length } } } #[stable(feature = "collection_debug", since = "1.17.0")] impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } /// A simplified version of `IntoIter` that is not double-ended and has only one /// purpose: to drop the remainder of an `IntoIter`. Therefore it also serves to /// drop an entire tree without the need to first look up a `back` leaf edge. struct Dropper<K, V> { front: Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge>, remaining_length: usize, } /// An iterator over the keys of a `BTreeMap`. /// /// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`keys`]: BTreeMap::keys #[stable(feature = "rust1", since = "1.0.0")] pub struct Keys<'a, K: 'a, V: 'a> { inner: Iter<'a, K, V>, } #[stable(feature = "collection_debug", since = "1.17.0")] impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// An iterator over the values of a `BTreeMap`. /// /// This `struct` is created by the [`values`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`values`]: BTreeMap::values #[stable(feature = "rust1", since = "1.0.0")] pub struct Values<'a, K: 'a, V: 'a> { inner: Iter<'a, K, V>, } #[stable(feature = "collection_debug", since = "1.17.0")] impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A mutable iterator over the values of a `BTreeMap`. /// /// This `struct` is created by the [`values_mut`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`values_mut`]: BTreeMap::values_mut #[stable(feature = "map_values_mut", since = "1.10.0")] pub struct ValuesMut<'a, K: 'a, V: 'a> { inner: IterMut<'a, K, V>, } #[stable(feature = "map_values_mut", since = "1.10.0")] impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish() } } /// An owning iterator over the keys of a `BTreeMap`. /// /// This `struct` is created by the [`into_keys`] method on [`BTreeMap`]. /// See its documentation for more. /// /// [`into_keys`]: BTreeMap::into_keys #[stable(feature = "map_into_keys_values", since = "1.54.0")] pub struct IntoKeys<K, V> { inner: IntoIter<K, V>, } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K: fmt::Debug, V> fmt::Debug for IntoKeys<K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.inner.iter().map(|(key, _)| key)).finish() } } /// An owning iterator over the values of a `BTreeMap`. /// /// This `struct` is created by the [`into_values`] method on [`BTreeMap`]. /// See its documentation for more. /// /// [`into_values`]: BTreeMap::into_values #[stable(feature = "map_into_keys_values", since = "1.54.0")] pub struct IntoValues<K, V> { inner: IntoIter<K, V>, } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V: fmt::Debug> fmt::Debug for IntoValues<K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish() } } /// An iterator over a sub-range of entries in a `BTreeMap`. /// /// This `struct` is created by the [`range`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`range`]: BTreeMap::range #[stable(feature = "btree_range", since = "1.17.0")] pub struct Range<'a, K: 'a, V: 'a> { inner: LeafRange<marker::Immut<'a>, K, V>, } #[stable(feature = "collection_debug", since = "1.17.0")] impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Range<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A mutable iterator over a sub-range of entries in a `BTreeMap`. /// /// This `struct` is created by the [`range_mut`] method on [`BTreeMap`]. See its /// documentation for more. /// /// [`range_mut`]: BTreeMap::range_mut #[stable(feature = "btree_range", since = "1.17.0")] pub struct RangeMut<'a, K: 'a, V: 'a> { inner: LeafRange<marker::ValMut<'a>, K, V>, // Be invariant in `K` and `V` _marker: PhantomData<&'a mut (K, V)>, } #[stable(feature = "collection_debug", since = "1.17.0")] impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for RangeMut<'_, K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let range = Range { inner: self.inner.reborrow() }; f.debug_list().entries(range).finish() } } impl<K, V> BTreeMap<K, V> { /// Makes a new, empty `BTreeMap`. /// /// Does not allocate anything on its own. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// /// // entries can now be inserted into the empty map /// map.insert(1, "a"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] pub const fn new() -> BTreeMap<K, V> where K: Ord, { BTreeMap { root: None, length: 0 } } /// Clears the map, removing all elements. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(1, "a"); /// a.clear(); /// assert!(a.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn clear(&mut self) { *self = BTreeMap { root: None, length: 0 }; } /// Returns a reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but the ordering /// on the borrowed form *must* match the ordering on the key type. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V> where K: Borrow<Q> + Ord, Q: Ord, { let root_node = self.root.as_ref()?.reborrow(); match root_node.search_tree(key) { Found(handle) => Some(handle.into_kv().1), GoDown(_) => None, } } /// Returns the key-value pair corresponding to the supplied key. /// /// The supplied key may be any borrowed form of the map's key type, but the ordering /// on the borrowed form *must* match the ordering on the key type. /// /// # Examples /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); /// assert_eq!(map.get_key_value(&2), None); /// ``` #[stable(feature = "map_get_key_value", since = "1.40.0")] pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)> where K: Borrow<Q> + Ord, Q: Ord, { let root_node = self.root.as_ref()?.reborrow(); match root_node.search_tree(k) { Found(handle) => Some(handle.into_kv()), GoDown(_) => None, } } /// Returns the first key-value pair in the map. /// The key in this pair is the minimum key in the map. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// assert_eq!(map.first_key_value(), None); /// map.insert(1, "b"); /// map.insert(2, "a"); /// assert_eq!(map.first_key_value(), Some((&1, &"b"))); /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn first_key_value(&self) -> Option<(&K, &V)> where K: Ord, { let root_node = self.root.as_ref()?.reborrow(); root_node.first_leaf_edge().right_kv().ok().map(Handle::into_kv) } /// Returns the first entry in the map for in-place manipulation. /// The key of this entry is the minimum key in the map. /// /// # Examples /// /// ``` /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// map.insert(2, "b"); /// if let Some(mut entry) = map.first_entry() { /// if *entry.key() > 0 { /// entry.insert("first"); /// } /// } /// assert_eq!(*map.get(&1).unwrap(), "first"); /// assert_eq!(*map.get(&2).unwrap(), "b"); /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn first_entry(&mut self) -> Option<OccupiedEntry<'_, K, V>> where K: Ord, { let (map, dormant_map) = DormantMutRef::new(self); let root_node = map.root.as_mut()?.borrow_mut(); let kv = root_node.first_leaf_edge().right_kv().ok()?; Some(OccupiedEntry { handle: kv.forget_node_type(), dormant_map, _marker: PhantomData }) } /// Removes and returns the first element in the map. /// The key of this element is the minimum key that was in the map. /// /// # Examples /// /// Draining elements in ascending order, while keeping a usable map each iteration. /// /// ``` /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// map.insert(2, "b"); /// while let Some((key, _val)) = map.pop_first() { /// assert!(map.iter().all(|(k, _v)| *k > key)); /// } /// assert!(map.is_empty()); /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn pop_first(&mut self) -> Option<(K, V)> where K: Ord, { self.first_entry().map(|entry| entry.remove_entry()) } /// Returns the last key-value pair in the map. /// The key in this pair is the maximum key in the map. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "b"); /// map.insert(2, "a"); /// assert_eq!(map.last_key_value(), Some((&2, &"a"))); /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn last_key_value(&self) -> Option<(&K, &V)> where K: Ord, { let root_node = self.root.as_ref()?.reborrow(); root_node.last_leaf_edge().left_kv().ok().map(Handle::into_kv) } /// Returns the last entry in the map for in-place manipulation. /// The key of this entry is the maximum key in the map. /// /// # Examples /// /// ``` /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// map.insert(2, "b"); /// if let Some(mut entry) = map.last_entry() { /// if *entry.key() > 0 { /// entry.insert("last"); /// } /// } /// assert_eq!(*map.get(&1).unwrap(), "a"); /// assert_eq!(*map.get(&2).unwrap(), "last"); /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn last_entry(&mut self) -> Option<OccupiedEntry<'_, K, V>> where K: Ord, { let (map, dormant_map) = DormantMutRef::new(self); let root_node = map.root.as_mut()?.borrow_mut(); let kv = root_node.last_leaf_edge().left_kv().ok()?; Some(OccupiedEntry { handle: kv.forget_node_type(), dormant_map, _marker: PhantomData }) } /// Removes and returns the last element in the map. /// The key of this element is the maximum key that was in the map. /// /// # Examples /// /// Draining elements in descending order, while keeping a usable map each iteration. /// /// ``` /// #![feature(map_first_last)] /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// map.insert(2, "b"); /// while let Some((key, _val)) = map.pop_last() { /// assert!(map.iter().all(|(k, _v)| *k < key)); /// } /// assert!(map.is_empty()); /// ``` #[unstable(feature = "map_first_last", issue = "62924")] pub fn pop_last(&mut self) -> Option<(K, V)> where K: Ord, { self.last_entry().map(|entry| entry.remove_entry()) } /// Returns `true` if the map contains a value for the specified key. /// /// The key may be any borrowed form of the map's key type, but the ordering /// on the borrowed form *must* match the ordering on the key type. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool where K: Borrow<Q> + Ord, Q: Ord, { self.get(key).is_some() } /// Returns a mutable reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, but the ordering /// on the borrowed form *must* match the ordering on the key type. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; /// } /// assert_eq!(map[&1], "b"); /// ``` // See `get` for implementation notes, this is basically a copy-paste with mut's added #[stable(feature = "rust1", since = "1.0.0")] pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V> where K: Borrow<Q> + Ord, Q: Ord, { let root_node = self.root.as_mut()?.borrow_mut(); match root_node.search_tree(key) { Found(handle) => Some(handle.into_val_mut()), GoDown(_) => None, } } /// Inserts a key-value pair into the map. /// /// If the map did not have this key present, `None` is returned. /// /// If the map did have this key present, the value is updated, and the old /// value is returned. The key is not updated, though; this matters for /// types that can be `==` without being identical. See the [module-level /// documentation] for more. /// /// [module-level documentation]: index.html#insert-and-complex-keys /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// assert_eq!(map.insert(37, "a"), None); /// assert_eq!(map.is_empty(), false); /// /// map.insert(37, "b"); /// assert_eq!(map.insert(37, "c"), Some("b")); /// assert_eq!(map[&37], "c"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn insert(&mut self, key: K, value: V) -> Option<V> where K: Ord, { match self.entry(key) { Occupied(mut entry) => Some(entry.insert(value)), Vacant(entry) => { entry.insert(value); None } } } /// Tries to insert a key-value pair into the map, and returns /// a mutable reference to the value in the entry. /// /// If the map already had this key present, nothing is updated, and /// an error containing the occupied entry and the value is returned. /// /// # Examples /// /// Basic usage: /// /// ``` /// #![feature(map_try_insert)] /// /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a"); /// /// let err = map.try_insert(37, "b").unwrap_err(); /// assert_eq!(err.entry.key(), &37); /// assert_eq!(err.entry.get(), &"a"); /// assert_eq!(err.value, "b"); /// ``` #[unstable(feature = "map_try_insert", issue = "82766")] pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V>> where K: Ord, { match self.entry(key) { Occupied(entry) => Err(OccupiedError { entry, value }), Vacant(entry) => Ok(entry.insert(value)), } } /// Removes a key from the map, returning the value at the key if the key /// was previously in the map. /// /// The key may be any borrowed form of the map's key type, but the ordering /// on the borrowed form *must* match the ordering on the key type. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// ``` #[doc(alias = "delete")] #[stable(feature = "rust1", since = "1.0.0")] pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V> where K: Borrow<Q> + Ord, Q: Ord, { self.remove_entry(key).map(|(_, v)| v) } /// Removes a key from the map, returning the stored key and value if the key /// was previously in the map. /// /// The key may be any borrowed form of the map's key type, but the ordering /// on the borrowed form *must* match the ordering on the key type. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(1, "a"); /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); /// assert_eq!(map.remove_entry(&1), None); /// ``` #[stable(feature = "btreemap_remove_entry", since = "1.45.0")] pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)> where K: Borrow<Q> + Ord, Q: Ord, { let (map, dormant_map) = DormantMutRef::new(self); let root_node = map.root.as_mut()?.borrow_mut(); match root_node.search_tree(key) { Found(handle) => { Some(OccupiedEntry { handle, dormant_map, _marker: PhantomData }.remove_entry()) } GoDown(_) => None, } } /// Retains only the elements specified by the predicate. /// /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)` returns `false`. /// /// # Examples /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x*10)).collect(); /// // Keep only the elements with even-numbered keys. /// map.retain(|&k, _| k % 2 == 0); /// assert!(map.into_iter().eq(vec![(0, 0), (2, 20), (4, 40), (6, 60)])); /// ``` #[inline] #[stable(feature = "btree_retain", since = "1.53.0")] pub fn retain<F>(&mut self, mut f: F) where K: Ord, F: FnMut(&K, &mut V) -> bool, { self.drain_filter(|k, v| !f(k, v)); } /// Moves all elements from `other` into `Self`, leaving `other` empty. /// /// # Examples /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(1, "a"); /// a.insert(2, "b"); /// a.insert(3, "c"); /// /// let mut b = BTreeMap::new(); /// b.insert(3, "d"); /// b.insert(4, "e"); /// b.insert(5, "f"); /// /// a.append(&mut b); /// /// assert_eq!(a.len(), 5); /// assert_eq!(b.len(), 0); /// /// assert_eq!(a[&1], "a"); /// assert_eq!(a[&2], "b"); /// assert_eq!(a[&3], "d"); /// assert_eq!(a[&4], "e"); /// assert_eq!(a[&5], "f"); /// ``` #[stable(feature = "btree_append", since = "1.11.0")] pub fn append(&mut self, other: &mut Self) where K: Ord, { // Do we have to append anything at all? if other.is_empty() { return; } // We can just swap `self` and `other` if `self` is empty. if self.is_empty() { mem::swap(self, other); return; } let self_iter = mem::take(self).into_iter(); let other_iter = mem::take(other).into_iter(); let root = BTreeMap::ensure_is_owned(&mut self.root); root.append_from_sorted_iters(self_iter, other_iter, &mut self.length) } /// Constructs a double-ended iterator over a sub-range of elements in the map. /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will /// yield elements from min (inclusive) to max (exclusive). /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive /// range from 4 to 10. /// /// # Panics /// /// Panics if range `start > end`. /// Panics if range `start == end` and both bounds are `Excluded`. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// use std::ops::Bound::Included; /// /// let mut map = BTreeMap::new(); /// map.insert(3, "a"); /// map.insert(5, "b"); /// map.insert(8, "c"); /// for (&key, &value) in map.range((Included(&4), Included(&8))) { /// println!("{}: {}", key, value); /// } /// assert_eq!(Some((&5, &"b")), map.range(4..).next()); /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range<T: ?Sized, R>(&self, range: R) -> Range<'_, K, V> where T: Ord, K: Borrow<T> + Ord, R: RangeBounds<T>, { if let Some(root) = &self.root { Range { inner: root.reborrow().range_search(range) } } else { Range { inner: LeafRange::none() } } } /// Constructs a mutable double-ended iterator over a sub-range of elements in the map. /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will /// yield elements from min (inclusive) to max (exclusive). /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive /// range from 4 to 10. /// /// # Panics /// /// Panics if range `start > end`. /// Panics if range `start == end` and both bounds are `Excluded`. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<&str, i32> = ["Alice", "Bob", "Carol", "Cheryl"] /// .iter() /// .map(|&s| (s, 0)) /// .collect(); /// for (_, balance) in map.range_mut("B".."Cheryl") { /// *balance += 100; /// } /// for (name, balance) in &map { /// println!("{} => {}", name, balance); /// } /// ``` #[stable(feature = "btree_range", since = "1.17.0")] pub fn range_mut<T: ?Sized, R>(&mut self, range: R) -> RangeMut<'_, K, V> where T: Ord, K: Borrow<T> + Ord, R: RangeBounds<T>, { if let Some(root) = &mut self.root { RangeMut { inner: root.borrow_valmut().range_search(range), _marker: PhantomData } } else { RangeMut { inner: LeafRange::none(), _marker: PhantomData } } } /// Gets the given key's corresponding entry in the map for in-place manipulation. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut count: BTreeMap<&str, usize> = BTreeMap::new(); /// /// // count the number of occurrences of letters in the vec /// for x in vec!["a", "b", "a", "c", "a", "b"] { /// *count.entry(x).or_insert(0) += 1; /// } /// /// assert_eq!(count["a"], 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn entry(&mut self, key: K) -> Entry<'_, K, V> where K: Ord, { // FIXME(@porglezomp) Avoid allocating if we don't insert let (map, dormant_map) = DormantMutRef::new(self); let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut(); match root_node.search_tree(&key) { Found(handle) => Occupied(OccupiedEntry { handle, dormant_map, _marker: PhantomData }), GoDown(handle) => { Vacant(VacantEntry { key, handle, dormant_map, _marker: PhantomData }) } } } /// Splits the collection into two at the given key. Returns everything after the given key, /// including the key. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(1, "a"); /// a.insert(2, "b"); /// a.insert(3, "c"); /// a.insert(17, "d"); /// a.insert(41, "e"); /// /// let b = a.split_off(&3); /// /// assert_eq!(a.len(), 2); /// assert_eq!(b.len(), 3); /// /// assert_eq!(a[&1], "a"); /// assert_eq!(a[&2], "b"); /// /// assert_eq!(b[&3], "c"); /// assert_eq!(b[&17], "d"); /// assert_eq!(b[&41], "e"); /// ``` #[stable(feature = "btree_split_off", since = "1.11.0")] pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self where K: Borrow<Q> + Ord, { if self.is_empty() { return Self::new(); } let total_num = self.len(); let left_root = self.root.as_mut().unwrap(); // unwrap succeeds because not empty let right_root = left_root.split_off(key); let (new_left_len, right_len) = Root::calc_split_length(total_num, &left_root, &right_root); self.length = new_left_len; BTreeMap { root: Some(right_root), length: right_len } } /// Creates an iterator that visits all elements (key-value pairs) in /// ascending key order and uses a closure to determine if an element should /// be removed. If the closure returns `true`, the element is removed from /// the map and yielded. If the closure returns `false`, or panics, the /// element remains in the map and will not be yielded. /// /// The iterator also lets you mutate the value of each element in the /// closure, regardless of whether you choose to keep or remove it. /// /// If the iterator is only partially consumed or not consumed at all, each /// of the remaining elements is still subjected to the closure, which may /// change its value and, by returning `true`, have the element removed and /// dropped. /// /// It is unspecified how many more elements will be subjected to the /// closure if a panic occurs in the closure, or a panic occurs while /// dropping an element, or if the `DrainFilter` value is leaked. /// /// # Examples /// /// Splitting a map into even and odd keys, reusing the original map: /// /// ``` /// #![feature(btree_drain_filter)] /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect(); /// let evens: BTreeMap<_, _> = map.drain_filter(|k, _v| k % 2 == 0).collect(); /// let odds = map; /// assert_eq!(evens.keys().copied().collect::<Vec<_>>(), vec![0, 2, 4, 6]); /// assert_eq!(odds.keys().copied().collect::<Vec<_>>(), vec![1, 3, 5, 7]); /// ``` #[unstable(feature = "btree_drain_filter", issue = "70530")] pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F> where K: Ord, F: FnMut(&K, &mut V) -> bool, { DrainFilter { pred, inner: self.drain_filter_inner() } } pub(super) fn drain_filter_inner(&mut self) -> DrainFilterInner<'_, K, V> where K: Ord, { if let Some(root) = self.root.as_mut() { let (root, dormant_root) = DormantMutRef::new(root); let front = root.borrow_mut().first_leaf_edge(); DrainFilterInner { length: &mut self.length, dormant_root: Some(dormant_root), cur_leaf_edge: Some(front), } } else { DrainFilterInner { length: &mut self.length, dormant_root: None, cur_leaf_edge: None } } } /// Creates a consuming iterator visiting all the keys, in sorted order. /// The map cannot be used after calling this. /// The iterator element type is `K`. /// /// # Examples /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(2, "b"); /// a.insert(1, "a"); /// /// let keys: Vec<i32> = a.into_keys().collect(); /// assert_eq!(keys, [1, 2]); /// ``` #[inline] #[stable(feature = "map_into_keys_values", since = "1.54.0")] pub fn into_keys(self) -> IntoKeys<K, V> { IntoKeys { inner: self.into_iter() } } /// Creates a consuming iterator visiting all the values, in order by key. /// The map cannot be used after calling this. /// The iterator element type is `V`. /// /// # Examples /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(1, "hello"); /// a.insert(2, "goodbye"); /// /// let values: Vec<&str> = a.into_values().collect(); /// assert_eq!(values, ["hello", "goodbye"]); /// ``` #[inline] #[stable(feature = "map_into_keys_values", since = "1.54.0")] pub fn into_values(self) -> IntoValues<K, V> { IntoValues { inner: self.into_iter() } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> IntoIterator for &'a BTreeMap<K, V> { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Iter<'a, K, V> { self.iter() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option<(&'a K, &'a V)> { if self.length == 0 { None } else { self.length -= 1; Some(unsafe { self.range.inner.next_unchecked() }) } } fn size_hint(&self) -> (usize, Option<usize>) { (self.length, Some(self.length)) } fn last(mut self) -> Option<(&'a K, &'a V)> { self.next_back() } fn min(mut self) -> Option<(&'a K, &'a V)> { self.next() } fn max(mut self) -> Option<(&'a K, &'a V)> { self.next_back() } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for Iter<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a V)> { if self.length == 0 { None } else { self.length -= 1; Some(unsafe { self.range.inner.next_back_unchecked() }) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for Iter<'_, K, V> { fn len(&self) -> usize { self.length } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> Clone for Iter<'_, K, V> { fn clone(&self) -> Self { Iter { range: self.range.clone(), length: self.length } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> IntoIterator for &'a mut BTreeMap<K, V> { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> IterMut<'a, K, V> { self.iter_mut() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option<(&'a K, &'a mut V)> { if self.length == 0 { None } else { self.length -= 1; Some(unsafe { self.range.inner.next_unchecked() }) } } fn size_hint(&self) -> (usize, Option<usize>) { (self.length, Some(self.length)) } fn last(mut self) -> Option<(&'a K, &'a mut V)> { self.next_back() } fn min(mut self) -> Option<(&'a K, &'a mut V)> { self.next() } fn max(mut self) -> Option<(&'a K, &'a mut V)> { self.next_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> { if self.length == 0 { None } else { self.length -= 1; Some(unsafe { self.range.inner.next_back_unchecked() }) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for IterMut<'_, K, V> { fn len(&self) -> usize { self.length } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for IterMut<'_, K, V> {} impl<'a, K, V> IterMut<'a, K, V> { /// Returns an iterator of references over the remaining items. #[inline] pub(super) fn iter(&self) -> Iter<'_, K, V> { Iter { range: self.range.iter(), length: self.length } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> IntoIterator for BTreeMap<K, V> { type Item = (K, V); type IntoIter = IntoIter<K, V>; fn into_iter(self) -> IntoIter<K, V> { let mut me = ManuallyDrop::new(self); if let Some(root) = me.root.take() { let full_range = root.into_dying().full_range(); IntoIter { range: full_range, length: me.length } } else { IntoIter { range: LeafRange::none(), length: 0 } } } } impl<K, V> Drop for Dropper<K, V> { fn drop(&mut self) { // Similar to advancing a non-fusing iterator. fn next_or_end<K, V>( this: &mut Dropper<K, V>, ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>> { if this.remaining_length == 0 { unsafe { ptr::read(&this.front).deallocating_end() } None } else { this.remaining_length -= 1; Some(unsafe { this.front.deallocating_next_unchecked() }) } } struct DropGuard<'a, K, V>(&'a mut Dropper<K, V>); impl<'a, K, V> Drop for DropGuard<'a, K, V> { fn drop(&mut self) { // Continue the same loop we perform below. This only runs when unwinding, so we // don't have to care about panics this time (they'll abort). while let Some(kv) = next_or_end(&mut self.0) { kv.drop_key_val(); } } } while let Some(kv) = next_or_end(self) { let guard = DropGuard(self); kv.drop_key_val(); mem::forget(guard); } } } #[stable(feature = "btree_drop", since = "1.7.0")] impl<K, V> Drop for IntoIter<K, V> { fn drop(&mut self) { if let Some(front) = self.range.take_front() { Dropper { front, remaining_length: self.length }; } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> Iterator for IntoIter<K, V> { type Item = (K, V); fn next(&mut self) -> Option<(K, V)> { if self.length == 0 { None } else { self.length -= 1; let kv = unsafe { self.range.deallocating_next_unchecked() }; Some(kv.into_key_val()) } } fn size_hint(&self) -> (usize, Option<usize>) { (self.length, Some(self.length)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> DoubleEndedIterator for IntoIter<K, V> { fn next_back(&mut self) -> Option<(K, V)> { if self.length == 0 { None } else { self.length -= 1; let kv = unsafe { self.range.deallocating_next_back_unchecked() }; Some(kv.into_key_val()) } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for IntoIter<K, V> { fn len(&self) -> usize { self.length } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for IntoIter<K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; fn next(&mut self) -> Option<&'a K> { self.inner.next().map(|(k, _)| k) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } fn last(mut self) -> Option<&'a K> { self.next_back() } fn min(mut self) -> Option<&'a K> { self.next() } fn max(mut self) -> Option<&'a K> { self.next_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> { fn next_back(&mut self) -> Option<&'a K> { self.inner.next_back().map(|(k, _)| k) } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for Keys<'_, K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for Keys<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> Clone for Keys<'_, K, V> { fn clone(&self) -> Self { Keys { inner: self.inner.clone() } } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; fn next(&mut self) -> Option<&'a V> { self.inner.next().map(|(_, v)| v) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } fn last(mut self) -> Option<&'a V> { self.next_back() } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> { fn next_back(&mut self) -> Option<&'a V> { self.inner.next_back().map(|(_, v)| v) } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> ExactSizeIterator for Values<'_, K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for Values<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<K, V> Clone for Values<'_, K, V> { fn clone(&self) -> Self { Values { inner: self.inner.clone() } } } /// An iterator produced by calling `drain_filter` on BTreeMap. #[unstable(feature = "btree_drain_filter", issue = "70530")] pub struct DrainFilter<'a, K, V, F> where K: 'a, V: 'a, F: 'a + FnMut(&K, &mut V) -> bool, { pred: F, inner: DrainFilterInner<'a, K, V>, } /// Most of the implementation of DrainFilter are generic over the type /// of the predicate, thus also serving for BTreeSet::DrainFilter. pub(super) struct DrainFilterInner<'a, K: 'a, V: 'a> { /// Reference to the length field in the borrowed map, updated live. length: &'a mut usize, /// Buried reference to the root field in the borrowed map. /// Wrapped in `Option` to allow drop handler to `take` it. dormant_root: Option<DormantMutRef<'a, Root<K, V>>>, /// Contains a leaf edge preceding the next element to be returned, or the last leaf edge. /// Empty if the map has no root, if iteration went beyond the last leaf edge, /// or if a panic occurred in the predicate. cur_leaf_edge: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>, } #[unstable(feature = "btree_drain_filter", issue = "70530")] impl<K, V, F> Drop for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool, { fn drop(&mut self) { self.for_each(drop); } } #[unstable(feature = "btree_drain_filter", issue = "70530")] impl<K, V, F> fmt::Debug for DrainFilter<'_, K, V, F> where K: fmt::Debug, V: fmt::Debug, F: FnMut(&K, &mut V) -> bool, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("DrainFilter").field(&self.inner.peek()).finish() } } #[unstable(feature = "btree_drain_filter", issue = "70530")] impl<K, V, F> Iterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool, { type Item = (K, V); fn next(&mut self) -> Option<(K, V)> { self.inner.next(&mut self.pred) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } impl<'a, K: 'a, V: 'a> DrainFilterInner<'a, K, V> { /// Allow Debug implementations to predict the next element. pub(super) fn peek(&self) -> Option<(&K, &V)> { let edge = self.cur_leaf_edge.as_ref()?; edge.reborrow().next_kv().ok().map(Handle::into_kv) } /// Implementation of a typical `DrainFilter::next` method, given the predicate. pub(super) fn next<F>(&mut self, pred: &mut F) -> Option<(K, V)> where F: FnMut(&K, &mut V) -> bool, { while let Ok(mut kv) = self.cur_leaf_edge.take()?.next_kv() { let (k, v) = kv.kv_mut(); if pred(k, v) { *self.length -= 1; let (kv, pos) = kv.remove_kv_tracking(|| { // SAFETY: we will touch the root in a way that will not // invalidate the position returned. let root = unsafe { self.dormant_root.take().unwrap().awaken() }; root.pop_internal_level(); self.dormant_root = Some(DormantMutRef::new(root).1); }); self.cur_leaf_edge = Some(pos); return Some(kv); } self.cur_leaf_edge = Some(kv.next_leaf_edge()); } None } /// Implementation of a typical `DrainFilter::size_hint` method. pub(super) fn size_hint(&self) -> (usize, Option<usize>) { // In most of the btree iterators, `self.length` is the number of elements // yet to be visited. Here, it includes elements that were visited and that // the predicate decided not to drain. Making this upper bound more accurate // requires maintaining an extra field and is not worth while. (0, Some(*self.length)) } } #[unstable(feature = "btree_drain_filter", issue = "70530")] impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} #[stable(feature = "btree_range", since = "1.17.0")] impl<'a, K, V> Iterator for Range<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next_checked() } fn last(mut self) -> Option<(&'a K, &'a V)> { self.next_back() } fn min(mut self) -> Option<(&'a K, &'a V)> { self.next() } fn max(mut self) -> Option<(&'a K, &'a V)> { self.next_back() } } #[stable(feature = "map_values_mut", since = "1.10.0")] impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { type Item = &'a mut V; fn next(&mut self) -> Option<&'a mut V> { self.inner.next().map(|(_, v)| v) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } fn last(mut self) -> Option<&'a mut V> { self.next_back() } } #[stable(feature = "map_values_mut", since = "1.10.0")] impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> { fn next_back(&mut self) -> Option<&'a mut V> { self.inner.next_back().map(|(_, v)| v) } } #[stable(feature = "map_values_mut", since = "1.10.0")] impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for ValuesMut<'_, K, V> {} #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> Iterator for IntoKeys<K, V> { type Item = K; fn next(&mut self) -> Option<K> { self.inner.next().map(|(k, _)| k) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } fn last(mut self) -> Option<K> { self.next_back() } fn min(mut self) -> Option<K> { self.next() } fn max(mut self) -> Option<K> { self.next_back() } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> DoubleEndedIterator for IntoKeys<K, V> { fn next_back(&mut self) -> Option<K> { self.inner.next_back().map(|(k, _)| k) } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> ExactSizeIterator for IntoKeys<K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> FusedIterator for IntoKeys<K, V> {} #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> Iterator for IntoValues<K, V> { type Item = V; fn next(&mut self) -> Option<V> { self.inner.next().map(|(_, v)| v) } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } fn last(mut self) -> Option<V> { self.next_back() } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> DoubleEndedIterator for IntoValues<K, V> { fn next_back(&mut self) -> Option<V> { self.inner.next_back().map(|(_, v)| v) } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> ExactSizeIterator for IntoValues<K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "map_into_keys_values", since = "1.54.0")] impl<K, V> FusedIterator for IntoValues<K, V> {} #[stable(feature = "btree_range", since = "1.17.0")] impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a V)> { self.inner.next_back_checked() } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for Range<'_, K, V> {} #[stable(feature = "btree_range", since = "1.17.0")] impl<K, V> Clone for Range<'_, K, V> { fn clone(&self) -> Self { Range { inner: self.inner.clone() } } } #[stable(feature = "btree_range", since = "1.17.0")] impl<'a, K, V> Iterator for RangeMut<'a, K, V> { type Item = (&'a K, &'a mut V); fn next(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next_checked() } fn last(mut self) -> Option<(&'a K, &'a mut V)> { self.next_back() } fn min(mut self) -> Option<(&'a K, &'a mut V)> { self.next() } fn max(mut self) -> Option<(&'a K, &'a mut V)> { self.next_back() } } impl<'a, K, V> RangeMut<'a, K, V> { /// Returns an iterator of references over the remaining items. #[inline] pub(super) fn iter(&self) -> Range<'_, K, V> { Range { inner: self.inner.reborrow() } } } #[stable(feature = "btree_range", since = "1.17.0")] impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> { self.inner.next_back_checked() } } #[stable(feature = "fused", since = "1.26.0")] impl<K, V> FusedIterator for RangeMut<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> { fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> BTreeMap<K, V> { let mut map = BTreeMap::new(); map.extend(iter); map } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Ord, V> Extend<(K, V)> for BTreeMap<K, V> { #[inline] fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) { iter.into_iter().for_each(move |(k, v)| { self.insert(k, v); }); } #[inline] fn extend_one(&mut self, (k, v): (K, V)) { self.insert(k, v); } } #[stable(feature = "extend_ref", since = "1.2.0")] impl<'a, K: Ord + Copy, V: Copy> Extend<(&'a K, &'a V)> for BTreeMap<K, V> { fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) { self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); } #[inline] fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) { self.insert(k, v); } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Hash, V: Hash> Hash for BTreeMap<K, V> { fn hash<H: Hasher>(&self, state: &mut H) { for elt in self { elt.hash(state); } } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Ord, V> Default for BTreeMap<K, V> { /// Creates an empty `BTreeMap`. fn default() -> BTreeMap<K, V> { BTreeMap::new() } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: PartialEq, V: PartialEq> PartialEq for BTreeMap<K, V> { fn eq(&self, other: &BTreeMap<K, V>) -> bool { self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b) } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Eq, V: Eq> Eq for BTreeMap<K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<K: PartialOrd, V: PartialOrd> PartialOrd for BTreeMap<K, V> { #[inline] fn partial_cmp(&self, other: &BTreeMap<K, V>) -> Option<Ordering> { self.iter().partial_cmp(other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Ord, V: Ord> Ord for BTreeMap<K, V> { #[inline] fn cmp(&self, other: &BTreeMap<K, V>) -> Ordering { self.iter().cmp(other.iter()) } } #[stable(feature = "rust1", since = "1.0.0")] impl<K: Debug, V: Debug> Debug for BTreeMap<K, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } #[stable(feature = "rust1", since = "1.0.0")] impl<K, Q: ?Sized, V> Index<&Q> for BTreeMap<K, V> where K: Borrow<Q> + Ord, Q: Ord, { type Output = V; /// Returns a reference to the value corresponding to the supplied key. /// /// # Panics /// /// Panics if the key is not present in the `BTreeMap`. #[inline] fn index(&self, key: &Q) -> &V { self.get(key).expect("no entry found for key") } } impl<K, V> BTreeMap<K, V> { /// Gets an iterator over the entries of the map, sorted by key. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert(3, "c"); /// map.insert(2, "b"); /// map.insert(1, "a"); /// /// for (key, value) in map.iter() { /// println!("{}: {}", key, value); /// } /// /// let (first_key, first_value) = map.iter().next().unwrap(); /// assert_eq!((*first_key, *first_value), (1, "a")); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter(&self) -> Iter<'_, K, V> { if let Some(root) = &self.root { let full_range = root.reborrow().full_range(); Iter { range: Range { inner: full_range }, length: self.length } } else { Iter { range: Range { inner: LeafRange::none() }, length: 0 } } } /// Gets a mutable iterator over the entries of the map, sorted by key. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut map = BTreeMap::new(); /// map.insert("a", 1); /// map.insert("b", 2); /// map.insert("c", 3); /// /// // add 10 to the value if the key isn't "a" /// for (key, value) in map.iter_mut() { /// if key != &"a" { /// *value += 10; /// } /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { if let Some(root) = &mut self.root { let full_range = root.borrow_valmut().full_range(); IterMut { range: RangeMut { inner: full_range, _marker: PhantomData }, length: self.length, } } else { IterMut { range: RangeMut { inner: LeafRange::none(), _marker: PhantomData }, length: 0, } } } /// Gets an iterator over the keys of the map, in sorted order. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(2, "b"); /// a.insert(1, "a"); /// /// let keys: Vec<_> = a.keys().cloned().collect(); /// assert_eq!(keys, [1, 2]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn keys(&self) -> Keys<'_, K, V> { Keys { inner: self.iter() } } /// Gets an iterator over the values of the map, in order by key. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(1, "hello"); /// a.insert(2, "goodbye"); /// /// let values: Vec<&str> = a.values().cloned().collect(); /// assert_eq!(values, ["hello", "goodbye"]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn values(&self) -> Values<'_, K, V> { Values { inner: self.iter() } } /// Gets a mutable iterator over the values of the map, in order by key. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// a.insert(1, String::from("hello")); /// a.insert(2, String::from("goodbye")); /// /// for value in a.values_mut() { /// value.push_str("!"); /// } /// /// let values: Vec<String> = a.values().cloned().collect(); /// assert_eq!(values, [String::from("hello!"), /// String::from("goodbye!")]); /// ``` #[stable(feature = "map_values_mut", since = "1.10.0")] pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { ValuesMut { inner: self.iter_mut() } } /// Returns the number of elements in the map. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a"); /// assert_eq!(a.len(), 1); /// ``` #[doc(alias = "length")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] pub const fn len(&self) -> usize { self.length } /// Returns `true` if the map contains no elements. /// /// # Examples /// /// Basic usage: /// /// ``` /// use std::collections::BTreeMap; /// /// let mut a = BTreeMap::new(); /// assert!(a.is_empty()); /// a.insert(1, "a"); /// assert!(!a.is_empty()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")] pub const fn is_empty(&self) -> bool { self.len() == 0 } /// If the root node is the empty (non-allocated) root node, allocate our /// own node. Is an associated function to avoid borrowing the entire BTreeMap. fn ensure_is_owned(root: &mut Option<Root<K, V>>) -> &mut Root<K, V> { root.get_or_insert_with(Root::new) } } #[cfg(test)] mod tests;
31.280853
110
0.542202
144a5187aa7ac04ede6e0a7cb31687cc06daf646
40,463
//! main structures to interact with Biscuit tokens use super::crypto::{KeyPair, PublicKey}; use super::datalog::{Check, Fact, Rule, SymbolTable, Term}; use super::error; use super::format::SerializedBiscuit; use builder::{BiscuitBuilder, BlockBuilder}; use prost::Message; use rand_core::{CryptoRng, RngCore}; use std::collections::HashSet; use crate::format::{convert::proto_block_to_token_block, schema}; use authorizer::Authorizer; pub mod authorizer; pub mod builder; pub mod unverified; /// maximum supported version of the serialization format pub const MAX_SCHEMA_VERSION: u32 = 2; /// some symbols are predefined and available in every implementation, to avoid /// transmitting them with every token pub fn default_symbol_table() -> SymbolTable { let mut syms = SymbolTable::new(); syms.insert("authority"); syms.insert("ambient"); syms.insert("resource"); syms.insert("operation"); syms.insert("right"); syms.insert("current_time"); syms.insert("revocation_id"); syms } /// This structure represents a valid Biscuit token /// /// It contains multiple `Block` elements, the associated symbol table, /// and a serialized version of this data /// /// ```rust /// extern crate biscuit_auth as biscuit; /// /// use biscuit::{KeyPair, Biscuit, builder::*}; /// /// fn main() { /// let root = KeyPair::new(); /// /// // first we define the authority block for global data, /// // like access rights /// // data from the authority block cannot be created in any other block /// let mut builder = Biscuit::builder(&root); /// builder.add_authority_fact(fact("right", &[string("/a/file1.txt"), s("read")])); /// /// // facts and rules can also be parsed from a string /// builder.add_authority_fact("right(\"/a/file1.txt\", \"read\")").expect("parse error"); /// /// let token1 = builder.build().unwrap(); /// /// // we can create a new block builder from that token /// let mut builder2 = token1.create_block(); /// builder2.check_operation("read"); /// /// let token2 = token1.append(builder2).unwrap(); /// } /// ``` #[derive(Clone, Debug)] pub struct Biscuit { pub(crate) root_key_id: Option<u32>, pub(crate) authority: Block, pub(crate) blocks: Vec<Block>, pub(crate) symbols: SymbolTable, container: Option<SerializedBiscuit>, } impl Biscuit { /// create the first block's builder /// /// call [`builder::BiscuitBuilder::build`] to create the token pub fn builder(root: &KeyPair) -> BiscuitBuilder { Biscuit::builder_with_symbols(root, default_symbol_table()) } /// deserializes a token and validates the signature using the root public key pub fn from<T, F>(slice: T, f: F) -> Result<Self, error::Token> where F: Fn(Option<u32>) -> PublicKey, T: AsRef<[u8]>, { Biscuit::from_with_symbols(slice.as_ref(), f, default_symbol_table()) } /// deserializes a token and validates the signature using the root public key pub fn from_base64<T, F>(slice: T, f: F) -> Result<Self, error::Token> where F: Fn(Option<u32>) -> PublicKey, T: AsRef<[u8]>, { Biscuit::from_base64_with_symbols(slice, f, default_symbol_table()) } /// serializes the token pub fn to_vec(&self) -> Result<Vec<u8>, error::Token> { match self.container.as_ref() { None => Err(error::Token::InternalError), Some(c) => c.to_vec().map_err(error::Token::Format), } } /// serializes the token and encode it to a (URL safe) base64 string pub fn to_base64(&self) -> Result<String, error::Token> { match self.container.as_ref() { None => Err(error::Token::InternalError), Some(c) => c .to_vec() .map_err(error::Token::Format) .map(|v| base64::encode_config(v, base64::URL_SAFE)), } } /// serializes the token pub fn serialized_size(&self) -> Result<usize, error::Token> { match self.container.as_ref() { None => Err(error::Token::InternalError), Some(c) => Ok(c.serialized_size()), } } /// creates a sealed version of the token /// /// sealed tokens cannot be attenuated pub fn seal(&self) -> Result<Biscuit, error::Token> { match &self.container { None => Err(error::Token::InternalError), Some(c) => { let container = c.seal()?; let mut token = self.clone(); token.container = Some(container); Ok(token) } } } /// creates a authorizer from this token pub fn authorizer(&self) -> Result<Authorizer, error::Token> { Authorizer::from_token(self) } /// creates a new block builder pub fn create_block(&self) -> BlockBuilder { BlockBuilder::new() } /// adds a new block to the token /// /// since the public key is integrated into the token, the keypair can be /// discarded right after calling this function pub fn append(&self, block_builder: BlockBuilder) -> Result<Self, error::Token> { let keypair = KeyPair::new_with_rng(&mut rand::rngs::OsRng); self.append_with_keypair(&keypair, block_builder) } /// returns the list of context elements of each block /// /// the context is a free form text field in which application specific data /// can be stored pub fn context(&self) -> Vec<Option<String>> { let mut res = vec![self.authority.context.clone()]; for b in self.blocks.iter() { res.push(b.context.clone()); } res } /// returns a list of revocation identifiers for each block, in order /// /// if a token is generated with the same keys and the same content, /// those identifiers will stay the same pub fn revocation_identifiers(&self) -> Vec<Vec<u8>> { let mut res = Vec::new(); if let Some(token) = self.container.as_ref() { res.push(token.authority.signature.to_bytes().to_vec()); for block in token.blocks.iter() { res.push(block.signature.to_bytes().to_vec()); } } res } /// pretty printer for this token pub fn print(&self) -> String { let authority = print_block(&self.symbols, &self.authority); let blocks: Vec<_> = self .blocks .iter() .map(|b| print_block(&self.symbols, b)) .collect(); format!( "Biscuit {{\n symbols: {:?}\n authority: {}\n blocks: [\n {}\n ]\n}}", self.symbols.symbols, authority, blocks.join(",\n\t") ) } /// prints the content of a block as Datalog source code pub fn print_block_source(&self, index: usize) -> Option<String> { let block = if index == 0 { &self.authority } else { match self.blocks.get(index - 1) { None => return None, Some(block) => block, } }; Some(block.print_source(&self.symbols)) } /// create the first block's builder, sing a provided symbol table pub fn builder_with_symbols(root: &KeyPair, symbols: SymbolTable) -> BiscuitBuilder { BiscuitBuilder::new(root, symbols) } /// creates a new token /// /// the public part of the root keypair must be used for verification pub(crate) fn new( root_key_id: Option<u32>, root: &KeyPair, symbols: SymbolTable, authority: Block, ) -> Result<Biscuit, error::Token> { Self::new_with_rng( &mut rand::rngs::OsRng, root_key_id, root, symbols, authority, ) } /// creates a new token, using a provided CSPRNG /// /// the public part of the root keypair must be used for verification pub(crate) fn new_with_rng<T: RngCore + CryptoRng>( rng: &mut T, root_key_id: Option<u32>, root: &KeyPair, mut symbols: SymbolTable, authority: Block, ) -> Result<Biscuit, error::Token> { let h1 = symbols.symbols.iter().collect::<HashSet<_>>(); let h2 = authority.symbols.symbols.iter().collect::<HashSet<_>>(); if !h1.is_disjoint(&h2) { return Err(error::Token::SymbolTableOverlap); } symbols .symbols .extend(authority.symbols.symbols.iter().cloned()); let blocks = vec![]; let next_keypair = KeyPair::new_with_rng(rng); let container = SerializedBiscuit::new(root_key_id, root, &next_keypair, &authority)?; Ok(Biscuit { root_key_id, authority, blocks, symbols, container: Some(container), }) } /// deserializes a token and validates the signature using the root public key, with a custom symbol table pub fn from_with_symbols<F>( slice: &[u8], f: F, symbols: SymbolTable, ) -> Result<Self, error::Token> where F: Fn(Option<u32>) -> PublicKey, { let container = SerializedBiscuit::from_slice(slice, f).map_err(error::Token::Format)?; Biscuit::from_serialized_container(container, symbols) } fn from_serialized_container( container: SerializedBiscuit, mut symbols: SymbolTable, ) -> Result<Self, error::Token> { let authority: Block = schema::Block::decode(&container.authority.data[..]) .map_err(|e| { error::Token::Format(error::Format::BlockDeserializationError(format!( "error deserializing authority block: {:?}", e ))) }) .and_then(|b| proto_block_to_token_block(&b).map_err(error::Token::Format))?; let mut blocks = vec![]; for block in container.blocks.iter() { let deser: Block = schema::Block::decode(&block.data[..]) .map_err(|e| { error::Token::Format(error::Format::BlockDeserializationError(format!( "error deserializing block: {:?}", e ))) }) .and_then(|b| proto_block_to_token_block(&b).map_err(error::Token::Format))?; blocks.push(deser); } symbols .symbols .extend(authority.symbols.symbols.iter().cloned()); for block in blocks.iter() { symbols .symbols .extend(block.symbols.symbols.iter().cloned()); } let root_key_id = container.root_key_id; let container = Some(container); Ok(Biscuit { root_key_id, authority, blocks, symbols, container, }) } /// deserializes a token and validates the signature using the root public key, with a custom symbol table pub fn from_base64_with_symbols<T, F>( slice: T, f: F, symbols: SymbolTable, ) -> Result<Self, error::Token> where F: Fn(Option<u32>) -> PublicKey, T: AsRef<[u8]>, { let decoded = base64::decode_config(slice, base64::URL_SAFE)?; Biscuit::from_with_symbols(&decoded, f, symbols) } /// returns the internal representation of the token pub fn container(&self) -> Option<&SerializedBiscuit> { self.container.as_ref() } /// adds a new block to the token, using the provided CSPRNG /// /// since the public key is integrated into the token, the keypair can be /// discarded right after calling this function pub fn append_with_keypair( &self, keypair: &KeyPair, block_builder: BlockBuilder, ) -> Result<Self, error::Token> { if self.container.is_none() { return Err(error::Token::Sealed); } let block = block_builder.build(self.symbols.clone()); let h1 = self.symbols.symbols.iter().collect::<HashSet<_>>(); let h2 = block.symbols.symbols.iter().collect::<HashSet<_>>(); if !h1.is_disjoint(&h2) { return Err(error::Token::SymbolTableOverlap); } let authority = self.authority.clone(); let mut blocks = self.blocks.clone(); let mut symbols = self.symbols.clone(); let container = match self.container.as_ref() { None => return Err(error::Token::Sealed), Some(c) => c.append(keypair, &block)?, }; symbols .symbols .extend(block.symbols.symbols.iter().cloned()); blocks.push(block); Ok(Biscuit { root_key_id: self.root_key_id, authority, blocks, symbols, container: Some(container), }) } /// gets the list of symbols from a block pub fn block_symbols(&self, index: usize) -> Option<Vec<String>> { let block = if index == 0 { &self.authority } else { match self.blocks.get(index - 1) { None => return None, Some(block) => block, } }; Some(block.symbols.symbols.clone()) } /// returns the number of blocks (at least 1) pub fn block_count(&self) -> usize { 1 + self.blocks.len() } } fn print_block(symbols: &SymbolTable, block: &Block) -> String { let facts: Vec<_> = block.facts.iter().map(|f| symbols.print_fact(f)).collect(); let rules: Vec<_> = block.rules.iter().map(|r| symbols.print_rule(r)).collect(); let checks: Vec<_> = block .checks .iter() .map(|r| symbols.print_check(r)) .collect(); let facts = if facts.is_empty() { String::new() } else { format!( "\n {}\n ", facts.join(",\n ") ) }; let rules = if rules.is_empty() { String::new() } else { format!( "\n {}\n ", rules.join(",\n ") ) }; let checks = if checks.is_empty() { String::new() } else { format!( "\n {}\n ", checks.join(",\n ") ) }; format!( "Block {{\n symbols: {:?}\n version: {}\n context: \"{}\"\n facts: [{}]\n rules: [{}]\n checks: [{}]\n }}", block.symbols.symbols, block.version, block.context.as_deref().unwrap_or(""), facts, rules, checks, ) } /// a block contained in a token #[derive(Clone, Debug)] pub struct Block { /// list of symbols introduced by this block pub symbols: SymbolTable, /// list of facts provided by this block pub facts: Vec<Fact>, /// list of rules provided by this block pub rules: Vec<Rule>, /// checks that the token and ambient data must validate pub checks: Vec<Check>, /// contextual information that can be looked up before the verification /// (as an example, a user id to query rights into a database) pub context: Option<String>, /// format version used to generate this block pub version: u32, } impl Block { /// creates a new block /// /// blocks should be created through the BlockBuilder interface instead, to avoid mistakes pub fn new(base_symbols: SymbolTable) -> Block { Block { symbols: base_symbols, facts: vec![], rules: vec![], checks: vec![], context: None, version: MAX_SCHEMA_VERSION, } } pub fn symbol_add(&mut self, s: &str) -> Term { self.symbols.add(s) } pub fn symbol_insert(&mut self, s: &str) -> u64 { self.symbols.insert(s) } fn print_source(&self, symbols: &SymbolTable) -> String { let facts: Vec<_> = self.facts.iter().map(|f| symbols.print_fact(f)).collect(); let rules: Vec<_> = self.rules.iter().map(|r| symbols.print_rule(r)).collect(); let checks: Vec<_> = self.checks.iter().map(|r| symbols.print_check(r)).collect(); let mut res = facts.join(";\n"); if !facts.is_empty() { res.push_str(";\n"); } res.push_str(&rules.join(";\n")); if !rules.is_empty() { res.push_str(";\n"); } res.push_str(&checks.join(";\n")); if !checks.is_empty() { res.push_str(";\n"); } res } } #[cfg(test)] mod tests { use super::builder::{check, fact, int, pred, rule, s, var}; use super::*; use crate::crypto::KeyPair; use crate::error::*; use rand::prelude::*; use std::time::{Duration, SystemTime}; #[test] fn basic() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let serialized1 = { let mut builder = Biscuit::builder(&root); builder .add_authority_fact("right(\"file1\", \"read\")") .unwrap(); builder .add_authority_fact("right(\"file2\", \"read\")") .unwrap(); builder .add_authority_fact("right(\"file1\", \"write\")") .unwrap(); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); biscuit1.to_vec().unwrap() }; //println!("generated biscuit token: {} bytes:\n{}", serialized1.len(), serialized1.to_hex(16)); println!("generated biscuit token: {} bytes", serialized1.len()); //panic!(); /* for i in 0..9 { let biscuit1_deser = Biscuit::from(&serialized1, root.public).unwrap(); // new check: can only have read access1 let mut block2 = biscuit1_deser.create_block(); block2.add_check(&rule( "check1", &[var(0)], &[ pred("resource", &[var(0)]), pred("operation", &[s("read")]), pred("right", &[var(0), s("read")]), ], )); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1_deser.append(&keypair2, block2.to_block()).unwrap(); println!("biscuit2 (1 check): {}", biscuit2.print()); serialized1 = biscuit2.to_vec().unwrap(); } println!("generated biscuit token 2: {} bytes", serialized1.len()); panic!(); */ let serialized2 = { let biscuit1_deser = Biscuit::from(&serialized1, |_| root.public()).unwrap(); // new check: can only have read access1 let mut block2 = biscuit1_deser.create_block(); block2 .add_check(rule( "check1", &[var("resource")], &[ pred("resource", &[var("resource")]), pred("operation", &[s("read")]), pred("right", &[var("resource"), s("read")]), ], )) .unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1_deser .append_with_keypair(&keypair2, block2) .unwrap(); println!("biscuit2 (1 check): {}", biscuit2.print()); biscuit2.to_vec().unwrap() }; //println!("generated biscuit token 2: {} bytes\n{}", serialized2.len(), serialized2.to_hex(16)); println!("generated biscuit token 2: {} bytes", serialized2.len()); let serialized3 = { let biscuit2_deser = Biscuit::from(&serialized2, |_| root.public()).unwrap(); // new check: can only access file1 let mut block3 = biscuit2_deser.create_block(); block3 .add_check(rule( "check2", &[s("file1")], &[pred("resource", &[s("file1")])], )) .unwrap(); let keypair3 = KeyPair::new_with_rng(&mut rng); let biscuit3 = biscuit2_deser .append_with_keypair(&keypair3, block3) .unwrap(); biscuit3.to_vec().unwrap() }; //println!("generated biscuit token 3: {} bytes\n{}", serialized3.len(), serialized3.to_hex(16)); println!("generated biscuit token 3: {} bytes", serialized3.len()); //panic!(); let final_token = Biscuit::from(&serialized3, |_| root.public()).unwrap(); println!("final token:\n{}", final_token.print()); { let mut authorizer = final_token.authorizer().unwrap(); let mut facts = vec![ fact("resource", &[s("file1")]), fact("operation", &[s("read")]), ]; for fact in facts.drain(..) { authorizer.add_fact(fact).unwrap(); } //println!("final token: {:#?}", final_token); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res1: {:?}", res); res.unwrap(); } { let mut authorizer = final_token.authorizer().unwrap(); let mut facts = vec![ fact("resource", &[s("file2")]), fact("operation", &[s("write")]), ]; for fact in facts.drain(..) { authorizer.add_fact(fact).unwrap(); } authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res2: {:#?}", res); assert_eq!(res, Err(Token::FailedLogic(Logic::FailedChecks(vec![ FailedCheck::Block(FailedBlockCheck { block_id: 1, check_id: 0, rule: String::from("check if resource($resource), operation(\"read\"), right($resource, \"read\")") }), FailedCheck::Block(FailedBlockCheck { block_id: 2, check_id: 0, rule: String::from("check if resource(\"file1\")") }) ])))); } } #[test] fn folders() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder.add_right("/folder1/file1", "read"); builder.add_right("/folder1/file1", "write"); builder.add_right("/folder1/file2", "read"); builder.add_right("/folder1/file2", "write"); builder.add_right("/folder2/file3", "read"); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); let mut block2 = biscuit1.create_block(); block2.resource_prefix("/folder1/"); block2.check_right("read"); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1.append_with_keypair(&keypair2, block2).unwrap(); { let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("/folder1/file1"); authorizer.add_operation("read"); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res1: {:?}", res); println!("authorizer:\n{}", authorizer.print_world()); res.unwrap(); } { let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("/folder2/file3"); authorizer.add_operation("read"); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res2: {:?}", res); assert_eq!( res, Err(Token::FailedLogic(Logic::FailedChecks(vec![ FailedCheck::Block(FailedBlockCheck { block_id: 1, check_id: 0, rule: String::from( "check if resource($resource), $resource.starts_with(\"/folder1/\")" ) }), ]))) ); } { let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("/folder2/file1"); authorizer.add_operation("write"); let res = authorizer.authorize(); println!("res3: {:?}", res); assert_eq!(res, Err(Token::FailedLogic(Logic::FailedChecks(vec![ FailedCheck::Block(FailedBlockCheck { block_id: 1, check_id: 0, rule: String::from("check if resource($resource), $resource.starts_with(\"/folder1/\")") }), FailedCheck::Block(FailedBlockCheck { block_id: 1, check_id: 1, rule: String::from("check if resource($resource_name), operation(\"read\"), right($resource_name, \"read\")") }), ])))); } } #[test] fn constraints() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder.add_right("file1", "read"); builder.add_right("file2", "read"); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); let mut block2 = biscuit1.create_block(); block2.expiration_date(SystemTime::now() + Duration::from_secs(30)); block2.add_fact("key(1234)").unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1.append_with_keypair(&keypair2, block2).unwrap(); { let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("file1"); authorizer.add_operation("read"); authorizer.set_time(); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res1: {:?}", res); res.unwrap(); } { println!("biscuit2: {}", biscuit2.print()); let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("file1"); authorizer.add_operation("read"); authorizer.set_time(); authorizer.revocation_check(&[0, 1, 2, 5, 1234]); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res3: {:?}", res); // error message should be like this: //"authorizer check 0 failed: check if revocation_id($0), $0 not in [2, 1234, 1, 5, 0]" assert!(res.is_err()); } } #[test] fn sealed_token() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder.add_right("/folder1/file1", "read"); builder.add_right("/folder1/file1", "write"); builder.add_right("/folder1/file2", "read"); builder.add_right("/folder1/file2", "write"); builder.add_right("/folder2/file3", "read"); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); let mut block2 = biscuit1.create_block(); block2.resource_prefix("/folder1/"); block2.check_right("read"); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1.append_with_keypair(&keypair2, block2).unwrap(); //println!("biscuit2:\n{:#?}", biscuit2); //panic!(); { let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("/folder1/file1"); authorizer.add_operation("read"); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res1: {:?}", res); res.unwrap(); } let _serialized = biscuit2.to_vec().unwrap(); //println!("biscuit2 serialized ({} bytes):\n{}", serialized.len(), serialized.to_hex(16)); let sealed = biscuit2.seal().unwrap().to_vec().unwrap(); //println!("biscuit2 sealed ({} bytes):\n{}", sealed.len(), sealed.to_hex(16)); let biscuit3 = Biscuit::from(&sealed, |_| root.public()).unwrap(); { let mut authorizer = biscuit3.authorizer().unwrap(); authorizer.add_resource("/folder1/file1"); authorizer.add_operation("read"); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res1: {:?}", res); res.unwrap(); } } #[test] fn verif_no_blocks() { use crate::token::builder::*; let mut rng: StdRng = SeedableRng::seed_from_u64(1234); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder .add_authority_fact(fact("right", &[string("file1"), s("read")])) .unwrap(); builder .add_authority_fact(fact("right", &[string("file2"), s("read")])) .unwrap(); builder .add_authority_fact(fact("right", &[string("file1"), s("write")])) .unwrap(); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("{}", biscuit1.print()); let mut v = biscuit1.authorizer().expect("omg authorizer"); v.add_check(rule( "right", &[s("right")], &[pred("right", &[string("file2"), s("write")])], )) .unwrap(); //assert!(v.verify().is_err()); let res = v.authorize(); println!("res: {:?}", res); assert_eq!( res, Err(Token::FailedLogic(Logic::FailedChecks(vec![ FailedCheck::Authorizer(FailedAuthorizerCheck { check_id: 0, rule: String::from("check if right(\"file2\", \"write\")") }), ]))) ); } #[test] fn authorizer_queries() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder.add_right("file1", "read"); builder.add_right("file2", "read"); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); let mut block2 = biscuit1.create_block(); block2.expiration_date(SystemTime::now() + Duration::from_secs(30)); block2.add_fact("key(1234)").unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1.append_with_keypair(&keypair2, block2).unwrap(); let mut block3 = biscuit2.create_block(); block3.expiration_date(SystemTime::now() + Duration::from_secs(10)); block3.add_fact("key(5678)").unwrap(); let keypair3 = KeyPair::new_with_rng(&mut rng); let biscuit3 = biscuit2.append_with_keypair(&keypair3, block3).unwrap(); { let mut authorizer = biscuit3.authorizer().unwrap(); authorizer.add_resource("file1"); authorizer.add_operation("read"); authorizer.set_time(); let res = authorizer.authorize(); println!("res1: {:?}", res); let res2: Result<Vec<builder::Fact>, crate::error::Token> = authorizer.query(rule( "key_verif", &[builder::Term::Variable("id".to_string())], &[pred("key", &[builder::Term::Variable("id".to_string())])], )); println!("res2: {:?}", res2); let mut res2 = res2 .unwrap() .iter() .map(|f| f.to_string()) .collect::<Vec<_>>(); res2.sort(); assert_eq!( &res2, &[ fact("key_verif", &[int(1234)]), fact("key_verif", &[int(5678)]) ] .iter() .map(|f| f.to_string()) .collect::<Vec<_>>() ); } } #[test] fn check_head_name() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder .add_authority_check(check(&[pred("resource", &[s("hello")])])) .unwrap(); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); // new check: can only have read access1 let mut block2 = biscuit1.create_block(); block2.add_fact(fact("check1", &[s("test")])).unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1.append_with_keypair(&keypair2, block2).unwrap(); println!("biscuit2: {}", biscuit2.print()); //println!("generated biscuit token 2: {} bytes\n{}", serialized2.len(), serialized2.to_hex(16)); { let mut authorizer = biscuit2.authorizer().unwrap(); authorizer.add_resource("file1"); authorizer.add_operation("read"); println!("symbols before time: {:?}", authorizer.symbols); authorizer.set_time(); println!("world:\n{}", authorizer.print_world()); println!("symbols: {:?}", authorizer.symbols); let res = authorizer.authorize(); println!("res1: {:?}", res); assert_eq!( res, Err(Token::FailedLogic(Logic::FailedChecks(vec![ FailedCheck::Block(FailedBlockCheck { block_id: 0, check_id: 0, rule: String::from("check if resource(\"hello\")"), }), ]))) ); } } /* #[test] fn check_requires_fact_in_future_block() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder .add_authority_check(check(&[pred("name", &[var("name")])])) .unwrap(); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); let mut authorizer1 = biscuit1.verify().unwrap(); authorizer1.allow().unwrap(); let res1 = authorizer1.verify(); println!("res1: {:?}", res1); assert_eq!( res1, Err(Token::FailedLogic(Logic::FailedChecks(vec![ FailedCheck::Block(FailedBlockCheck { block_id: 0, check_id: 0, rule: String::from("check if name($name)"), }), ]))) ); let mut block2 = biscuit1.create_block(); block2.add_fact(fact("name", &[s("test")])).unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1 .append_with_keypair(&keypair2, block2) .unwrap(); println!("biscuit2 (with name fact): {}", biscuit2.print()); let mut authorizer2 = biscuit2.verify().unwrap(); authorizer2.allow().unwrap(); let res2 = authorizer2.verify(); assert_eq!(res2, Ok(0)); }*/ #[test] fn bytes_constraints() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let mut builder = Biscuit::builder(&root); builder.add_authority_fact("bytes(hex:0102AB)").unwrap(); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); let mut block2 = biscuit1.create_block(); block2 .add_rule("has_bytes($0) <- bytes($0), [ hex:00000000, hex:0102AB ].contains($0)") .unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1.append_with_keypair(&keypair2, block2).unwrap(); let mut authorizer = biscuit2.authorizer().unwrap(); authorizer .add_check("check if bytes($0), [ hex:00000000, hex:0102AB ].contains($0)") .unwrap(); authorizer.allow().unwrap(); let res = authorizer.authorize(); println!("res1: {:?}", res); res.unwrap(); let res: Vec<(Vec<u8>,)> = authorizer.query("data($0) <- bytes($0)").unwrap(); println!("query result: {:x?}", res); println!("query result: {:?}", res[0]); } #[test] fn block1_generates_authority_or_ambient() { let mut rng: StdRng = SeedableRng::seed_from_u64(0); let root = KeyPair::new_with_rng(&mut rng); let serialized1 = { let mut builder = Biscuit::builder(&root); builder .add_authority_fact("right(\"/folder1/file1\", \"read\")") .unwrap(); builder .add_authority_fact("right(\"/folder1/file1\", \"write\")") .unwrap(); builder .add_authority_fact("right(\"/folder2/file1\", \"read\")") .unwrap(); builder .add_authority_check("check if operation(\"read\")") .unwrap(); let biscuit1 = builder.build_with_rng(&mut rng).unwrap(); println!("biscuit1 (authority): {}", biscuit1.print()); biscuit1.to_vec().unwrap() }; //println!("generated biscuit token: {} bytes:\n{}", serialized1.len(), serialized1.to_hex(16)); println!("generated biscuit token: {} bytes", serialized1.len()); //panic!(); let serialized2 = { let biscuit1_deser = Biscuit::from(&serialized1, |_| root.public()).unwrap(); // new check: can only have read access1 let mut block2 = biscuit1_deser.create_block(); // Bypass `check if operation("read")` from authority block block2 .add_rule("operation(\"read\") <- operation($any)") .unwrap(); // Bypass `check if resource($file), $file.starts_with("/folder1/")` from block #1 block2 .add_rule("resource(\"/folder1/\") <- resource($any)") .unwrap(); // Add missing rights block2.add_rule("right($file, $right) <- right($any1, $any2), resource($file), operation($right)") .unwrap(); let keypair2 = KeyPair::new_with_rng(&mut rng); let biscuit2 = biscuit1_deser .append_with_keypair(&keypair2, block2) .unwrap(); println!("biscuit2 (1 check): {}", biscuit2.print()); biscuit2.to_vec().unwrap() }; //println!("generated biscuit token 2: {} bytes\n{}", serialized2.len(), serialized2.to_hex(16)); println!("generated biscuit token 2: {} bytes", serialized2.len()); let final_token = Biscuit::from(&serialized2, |_| root.public()).unwrap(); println!("final token:\n{}", final_token.print()); let mut authorizer = final_token.authorizer().unwrap(); authorizer.add_resource("/folder2/file1"); authorizer.add_operation("write"); authorizer .add_policy("allow if resource($file), operation($op), right($file, $op)") .unwrap(); authorizer.deny().unwrap(); let res = authorizer.authorize_with_limits(crate::token::authorizer::AuthorizerLimits { max_time: Duration::from_secs(1), ..Default::default() }); println!("res1: {:?}", res); println!("authorizer:\n{}", authorizer.print_world()); assert!(res.is_err()); } }
33.220854
193
0.537429
89ab8de3a34f39057632201053c9cf2707f33f76
6,261
use std::collections::HashSet; use amethyst_core::{ ecs::*, math::{convert, Matrix4}, transform::Transform, }; use amethyst_rendy::skinning::JointTransforms; use log::error; #[cfg(feature = "profiler")] use thread_profiler::profile_scope; use super::resources::*; /// System for performing vertex skinning. /// /// Needs to run after global transforms have been updated for the current frame. #[derive(Debug, Default)] pub struct VertexSkinningSystem; impl System for VertexSkinningSystem { fn build(self) -> Box<dyn ParallelRunnable> { let mut updated = HashSet::new(); let mut updated_skins = HashSet::new(); Box::new( SystemBuilder::new("VertexSkinningSystem") .read_component::<Joint>() .read_component::<Transform>() .write_component::<Skin>() .write_component::<JointTransforms>() .with_query( <(Entity, Read<Transform>, Read<Joint>)>::query() .filter(maybe_changed::<Transform>()), ) .build(move |_, world, _, global_transforms| { #[cfg(feature = "profiler")] profile_scope!("vertex_skinning_system"); updated.clear(); updated_skins.clear(); global_transforms.for_each(world, |(entity, _, joint)| { updated.insert(*entity); for skin in &joint.skins { updated_skins.insert(*skin); } }); let mut q = <(Entity, &Transform, &mut JointTransforms)>::query(); let (mut left, mut right) = world.split_for_query(&q); for entity in updated_skins.iter() { if let Ok(mut entry) = right.entry_mut(*entity) { if let Ok(skin) = entry.get_component_mut::<Skin>() { // Compute the joint global_transforms skin.joint_matrices.clear(); let bind_shape = skin.bind_shape_matrix; skin.joint_matrices.extend( skin.joints .iter() .zip(skin.inverse_bind_matrices.iter()) .map(|(joint_entity, inverse_bind_matrix)| { if let Ok(transform) = global_transforms.get(&left, *joint_entity) { Some((transform, inverse_bind_matrix)) } else { error!("Missing `Transform` Component for join entity {:?}",joint_entity ); None } }) .flatten() .map(|(global, inverse_bind_matrix)| { global.1.global_matrix() * inverse_bind_matrix * bind_shape }), ); // update the joint matrices in all referenced mesh entities for (entity, mesh_global, matrix) in q.iter_mut(&mut left) { if skin.meshes.contains(entity) { if let Some(global_inverse) = mesh_global.global_matrix().try_inverse() { matrix.matrices.clear(); matrix.matrices.extend(skin.joint_matrices.iter().map( |joint_matrix| { convert::<_, Matrix4<f32>>( global_inverse * joint_matrix, ) }, )); } } } } } } let mut q = <(Entity, &Transform, &mut JointTransforms)>::query(); let (mut left, right) = world.split_for_query(&q); for (entity, mesh_global, joint_transform) in q.iter_mut(&mut left) { if updated.contains(entity) { if let Some(global_inverse) = mesh_global.global_matrix().try_inverse() { if let Ok(skin) = <&Skin>::query().get(&right, joint_transform.skin) { joint_transform.matrices.clear(); joint_transform.matrices.extend( skin.joint_matrices.iter().map(|joint_matrix| { convert::<_, Matrix4<f32>>( global_inverse * joint_matrix, ) }), ); } else { error!( "Missing `Skin` Component for join transform entity {:?}", joint_transform.skin ); } } } } }), ) } }
47.431818
123
0.360326
bf9e4c98cacb9bd61366ce37449b5ac1adca94c7
8,731
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. #![allow(unused_variables)] use aast_parser::{ rust_aast_parser_types::{Env as AastEnv, Result as AastResult}, AastParser, Error as AastError, }; use anyhow; use bitflags::bitflags; use emit_program_rust::{emit_program, FromAstFlags}; use hhas_program_rust::HhasProgram; use hhbc_hhas_rust::{print_program, Write}; use instruction_sequence_rust::Error; use itertools::{Either, Either::*}; use ocamlrep::rc::RcOc; use options::{LangFlags, Options, PhpismFlags}; use oxidized::{ ast as Tast, namespace_env::Env as NamespaceEnv, parser_options::ParserOptions, pos::Pos, relative_path::RelativePath, }; use parser_core_types::{indexed_source_text::IndexedSourceText, source_text::SourceText}; use stack_limit::StackLimit; /// Common input needed for compilation. Extra care is taken /// so that everything is easily serializable at the FFI boundary /// until the migration from OCaml is fully complete pub struct Env { pub filepath: RelativePath, pub empty_namespace: NamespaceEnv, pub config_jsons: Vec<String>, pub config_list: Vec<String>, pub flags: EnvFlags, } bitflags! { // Note: these flags are intentionally packed into bits to overcome // the limitation of to-OCaml FFI functions having at most 5 parameters pub struct EnvFlags: u8 { const IS_SYSTEMLIB = 1 << 0; const IS_EVALED = 1 << 1; const FOR_DEBUGGER_EVAL = 1 << 2; const DUMP_SYMBOL_REFS = 1 << 3; } } /// Compilation profile. All times are in seconds, /// except when they are ignored and should not be reported, /// such as in the case hhvm.log_extern_compiler_perf is false /// (this avoids the need to read Options from OCaml, as /// they can be simply returned as NaNs to signal that /// they should _not_ be passed back as JSON to HHVM process) #[derive(Debug)] pub struct Profile { pub parsing_t: f64, pub codegen_t: f64, pub printing_t: f64, } // TODO(hrust) switch over to Option<Duration> once FFI is no longer needed; // however, for FFI it's easier to serialize to floating point in seconds // and use NaN for ignored (i.e., when hhvm.log_extern_compiler_perf is false) pub const IGNORED_DURATION: f64 = std::f64::NAN; pub fn is_ignored_duration(dt: &f64) -> bool { dt.is_nan() } pub fn from_text<W>( env: Env, stack_limit: &StackLimit, writer: &mut W, text: &[u8], ) -> anyhow::Result<Profile> where W: Write, W::Error: Send + Sync + 'static, // required by anyhow::Error { let opts = Options::from_configs(&env.config_jsons, &env.config_list).map_err(anyhow::Error::msg)?; let log_extern_compiler_perf = opts.log_extern_compiler_perf(); let mut ret = Profile { parsing_t: IGNORED_DURATION, codegen_t: IGNORED_DURATION, printing_t: IGNORED_DURATION, }; let ast = profile(log_extern_compiler_perf, &mut ret.parsing_t, || { parse_file(&opts, stack_limit, &env.filepath, text) }); let (program, codegen_t) = match ast { Either::Right((ast, is_hh_file)) => emit(&env, opts, is_hh_file, &ast), Either::Left((pos, msg, is_runtime_error)) => emit_fatal(&env, is_runtime_error, pos, msg), }; let program = program?; ret.codegen_t = codegen_t; profile(log_extern_compiler_perf, &mut ret.printing_t, || { print_program( Some(&env.filepath), env.flags.contains(EnvFlags::DUMP_SYMBOL_REFS), writer, &program, ) })?; Ok(ret) } fn emit<'p>( env: &Env, opts: Options, is_hh: bool, ast: &Tast::Program, ) -> (Result<HhasProgram<'p>, Error>, f64) { let mut flags = FromAstFlags::empty(); if is_hh { flags |= FromAstFlags::IS_HH_FILE; } if env.flags.contains(EnvFlags::IS_EVALED) { flags |= FromAstFlags::IS_EVALED; } if env.flags.contains(EnvFlags::FOR_DEBUGGER_EVAL) { flags |= FromAstFlags::FOR_DEBUGGER_EVAL; } if env.flags.contains(EnvFlags::IS_SYSTEMLIB) { flags |= FromAstFlags::IS_SYSTEMLIB; } let mut t = 0f64; let r = profile(opts.log_extern_compiler_perf(), &mut t, || { emit_program(opts, flags, &env.empty_namespace, ast) }); (r, t) } fn emit_fatal( env: &Env, is_runtime_error: bool, pos: Pos, msg: String, ) -> (Result<HhasProgram, Error>, f64) { //TODO(hrust): enable emit_program::emit_fatal_program unimplemented!() } fn create_parser_options(opts: &Options) -> ParserOptions { let hack_lang_flags = |flag| opts.hhvm.hack_lang_flags.contains(flag); let phpism_flags = |flag| opts.phpism_flags.contains(flag); let mut popt = ParserOptions::default(); popt.po_auto_namespace_map = opts .hhvm .aliased_namespaces .get() .iter() .map(|(x, y)| (x.to_owned(), y.to_owned())) .collect(); popt.po_codegen = true; popt.po_disallow_silence = false; popt.po_disallow_execution_operator = phpism_flags(PhpismFlags::DISALLOW_EXECUTION_OPERATOR); popt.po_disable_nontoplevel_declarations = phpism_flags(PhpismFlags::DISABLE_NONTOPLEVEL_DECLARATIONS); popt.po_disable_static_closures = phpism_flags(PhpismFlags::DISABLE_STATIC_CLOSURES); popt.po_disable_lval_as_an_expression = hack_lang_flags(LangFlags::DISABLE_LVAL_AS_AN_EXPRESSION); popt.po_enable_class_level_where_clauses = hack_lang_flags(LangFlags::ENABLE_CLASS_LEVEL_WHERE_CLAUSES); popt.po_disable_legacy_soft_typehints = hack_lang_flags(LangFlags::DISABLE_LEGACY_SOFT_TYPEHINTS); popt.po_allow_new_attribute_syntax = hack_lang_flags(LangFlags::ALLOW_NEW_ATTRIBUTE_SYNTAX); popt.po_disable_legacy_attribute_syntax = hack_lang_flags(LangFlags::DISABLE_LEGACY_ATTRIBUTE_SYNTAX); popt.po_const_default_func_args = hack_lang_flags(LangFlags::CONST_DEFAULT_FUNC_ARGS); popt.tco_const_static_props = hack_lang_flags(LangFlags::CONST_STATIC_PROPS); popt.po_abstract_static_props = hack_lang_flags(LangFlags::ABSTRACT_STATIC_PROPS); popt.po_disable_unset_class_const = hack_lang_flags(LangFlags::DISABLE_UNSET_CLASS_CONST); popt.po_disallow_func_ptrs_in_constants = hack_lang_flags(LangFlags::DISALLOW_FUNC_PTRS_IN_CONSTANTS); popt.po_enable_xhp_class_modifier = hack_lang_flags(LangFlags::ENABLE_XHP_CLASS_MODIFIER); popt } /// parse_file returns either error(Left) or ast(Right) /// - Left((Position, message, is_runtime_error)) /// - Right((ast, is_hh_file)) fn parse_file( opts: &Options, stack_limit: &StackLimit, filepath: &RelativePath, text: &[u8], ) -> Either<(Pos, String, bool), (Tast::Program, bool)> { let mut aast_env = AastEnv::default(); aast_env.codegen = true; aast_env.keep_errors = true; aast_env.show_all_errors = true; aast_env.fail_open = true; aast_env.parser_options = create_parser_options(opts); let source_text = SourceText::make(RcOc::new(filepath.clone()), text); let indexed_source_text = IndexedSourceText::new(source_text); let ast_result = AastParser::from_text(&aast_env, &indexed_source_text, Some(stack_limit)); match ast_result { Err(AastError::Other(msg)) => Left((Pos::make_none(), msg, false)), Err(AastError::ParserFatal(syntax_error, pos)) => { Left((pos, syntax_error.message.to_string(), false)) } Ok(ast) => match ast { AastResult { syntax_errors, .. } if !syntax_errors.is_empty() => unimplemented!(), AastResult { mut lowpri_errors, .. } if !lowpri_errors.is_empty() => { let (pos, msg) = lowpri_errors.pop().unwrap(); Left((pos, msg, false)) } AastResult { errors, aast, scoured_comments, file_mode, .. } => { if !errors.is_empty() { unimplemented!() } else { match aast { Ok(aast) => Right((aast, file_mode.is_hh_file())), Err(msg) => Left((Pos::make_none(), msg, false)), } } } }, } } fn profile<T, F>(log_extern_compiler_perf: bool, dt: &mut f64, f: F) -> T where F: FnOnce() -> T, { let t0 = std::time::Instant::now(); let ret = f(); *dt = if log_extern_compiler_perf { t0.elapsed().as_secs_f64() } else { IGNORED_DURATION }; ret }
35.064257
99
0.66052
f788d23cccb57d8d81413da7f20c70a79ee78c28
1,012
use crate::utils::{deno_bin_path, dvm_root}; use crate::version::current_version; use anyhow::Result; use semver_parser::version::parse as semver_parse; use std::fs; use std::process::exit; pub fn exec(version: Option<String>) -> Result<()> { let target_version = match version { Some(target_version) => match semver_parse(&target_version) { Ok(ver) => ver, Err(_) => { eprintln!("Invalid semver"); exit(1) } }, None => unimplemented!(), }; let target_exe_path = deno_bin_path(&target_version); if !target_exe_path.exists() { eprintln!("deno v{} is not installed.", target_version); exit(1) } let current_version = current_version().unwrap(); if current_version == target_version.to_string() { println!("Failed: deno v{} is in use.", target_version); exit(1); } let dvm_dir = dvm_root().join(format!("{}", target_version)); fs::remove_dir_all(&dvm_dir).unwrap(); println!("deno v{} removed.", target_version); Ok(()) }
25.3
65
0.648221
d50614a99056678f724c1ec0ffdc580e9d0d357a
3,206
// Copyright 2022 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::sync::Arc; use common_datablocks::DataBlock; use common_datavalues::DataSchema; use common_exception::Result; use common_meta_types::AuthInfo; use common_meta_types::GrantObject; use common_meta_types::RoleInfo; use common_meta_types::UserInfo; use common_meta_types::UserOptionFlag; use common_meta_types::UserPrivilegeSet; use common_tracing::tracing; use crate::procedures::Procedure; use crate::procedures::ProcedureFeatures; use crate::sessions::QueryContext; pub struct BootstrapTenantProcedure {} impl BootstrapTenantProcedure { pub fn try_create() -> Result<Box<dyn Procedure>> { Ok(Box::new(BootstrapTenantProcedure {})) } } #[async_trait::async_trait] impl Procedure for BootstrapTenantProcedure { fn name(&self) -> &str { "BOOTSTRAP_TENANT" } // args: // tenant_id: string // user_name: string // host_name: string // auth_type: string // password: string fn features(&self) -> ProcedureFeatures { ProcedureFeatures::default() .num_arguments(5) .management_mode_required(true) .user_option_flag(UserOptionFlag::TenantSetting) } async fn inner_eval(&self, ctx: Arc<QueryContext>, args: Vec<String>) -> Result<DataBlock> { let tenant = args[0].clone(); let user_name = args[1].clone(); let host_name = args[2].clone(); let auth_type = args[3].clone(); let password = args[4].clone(); let user_mgr = ctx.get_user_manager(); tracing::info!( "BootstrapTenant: tenant={}, user_name={}, host_name={}, auth_type={}", tenant, user_name, host_name, auth_type ); // Create account admin role. let mut account_admin_role = RoleInfo::new("account_admin".to_string()); account_admin_role.grants.grant_privileges( &account_admin_role.name, "", &GrantObject::Global, UserPrivilegeSet::available_privileges_on_global(), ); user_mgr .add_role(&tenant, account_admin_role.clone(), true) .await?; // Create user. let auth_info = AuthInfo::create(&Some(auth_type), &Some(password))?; let mut user_info = UserInfo::new(user_name.clone(), host_name.clone(), auth_info); user_info.grants.grant_role(account_admin_role.identity()); user_mgr.add_user(&tenant, user_info, true).await?; Ok(DataBlock::empty()) } fn schema(&self) -> Arc<DataSchema> { Arc::new(DataSchema::empty()) } }
32.06
96
0.656581
fc690319596c37ec789975223ba37bc1d7868b8a
1,296
use serde::Deserialize; use serde::Serialize; use sha2::{Digest, Sha256}; use std::{io::Read, path::Path, path::PathBuf}; use indexmap::IndexMap; use nu_errors::ShellError; #[derive(Deserialize, Serialize, Debug, Default)] pub struct Trusted { pub files: IndexMap<String, Vec<u8>>, } impl Trusted { pub fn new() -> Self { Trusted { files: IndexMap::new(), } } } pub fn is_file_trusted(nu_env_file: &Path, content: &[u8]) -> Result<bool, ShellError> { let contentdigest = Sha256::digest(&content).as_slice().to_vec(); let nufile = std::fs::canonicalize(nu_env_file)?; let trusted = read_trusted()?; Ok(trusted.files.get(&nufile.to_string_lossy().to_string()) == Some(&contentdigest)) } pub fn read_trusted() -> Result<Trusted, ShellError> { let config_path = crate::config::default_path_for(&Some(PathBuf::from("nu-env.toml")))?; let mut file = std::fs::OpenOptions::new() .read(true) .create(true) .write(true) .open(config_path) .map_err(|_| ShellError::untagged_runtime_error("Couldn't open nu-env.toml"))?; let mut doc = String::new(); file.read_to_string(&mut doc)?; let allowed = toml::de::from_str(doc.as_str()).unwrap_or_else(|_| Trusted::new()); Ok(allowed) }
28.8
92
0.64429
6157a50119dc02ef9a79077588c3f977c3469200
16,654
extern crate num; use bellman::{Circuit, ConstraintSystem, SynthesisError}; use sapling_crypto::jubjub::{JubjubEngine, JubjubParams, JubjubBls12}; use sapling_crypto::circuit::num::{AllocatedNum, Num}; use sapling_crypto::circuit::boolean::{AllocatedBit, Boolean}; use sapling_crypto::circuit::test::TestConstraintSystem; use sapling_crypto::pedersen_hash::{Personalization}; use pairing::bls12_381::{Bls12, Fr, FrRepr}; use pairing::{PrimeField, Field}; use crate::pedersen_hasher; use crate::circuit::merkle_proof; use crate::transactions::{NoteData, pubkey, note_hash}; use crate::circuit::transactions::{transfer, Note, nullifier}; use rand::os::OsRng; use rand::Rng; use num::BigInt; use num::Num as NumTrait; use std::str::FromStr; struct MerkleTreeAccumulator(Vec<Vec<Fr>>); const PROOF_LENGTH:usize = 32; lazy_static! { static ref JUBJUB_PARAMS: JubjubBls12 = JubjubBls12::new(); static ref MERKLE_DEFAULTS: Vec<Fr> = { let mut c = Fr::zero(); let mut res = vec![]; for i in 0 .. PROOF_LENGTH+1 { res.push(c); c = pedersen_hasher::compress::<Bls12>(&c, &c, Personalization::MerkleTree(i), &JUBJUB_PARAMS) } res }; } impl MerkleTreeAccumulator { pub fn new() -> Self { let mut res = MerkleTreeAccumulator(vec![]); for _ in 0 .. PROOF_LENGTH+1 { res.0.push(vec![]); } res } pub fn cell(&self, row: usize, index: usize) -> Fr { assert!(row <= PROOF_LENGTH, "too big row"); if index < self.0[row].len() { self.0[row][index] } else { MERKLE_DEFAULTS[row] } } pub fn size(&self) -> usize { self.0[0].len() } pub fn pushMany(&mut self, elements: &[Fr]) { let index = self.size(); let s = elements.len(); self.0[0].extend_from_slice(elements); for i in 1..PROOF_LENGTH+1 { let rl = self.0[i].len(); self.0[i].extend(vec![Fr::zero(); 1 + (index+s>>i) - rl]); for j in (index >> i) .. (index+s>>i) + 1 { self.0[i][j] = pedersen_hasher::compress::<Bls12>(&self.cell(i-1, j*2), &self.cell(i-1, j*2+1), Personalization::MerkleTree(i-1), &JUBJUB_PARAMS); } } } pub fn root(&self) -> Fr { self.cell(PROOF_LENGTH, 0) } pub fn proof(&self, index: usize) -> Vec<Fr> { (0..PROOF_LENGTH).map(|i| self.cell(i, (index >> i) ^ 1)).collect::<Vec<Fr>>() } } fn gen_rand_fr_limited<R: ::rand::Rng>(n: usize, rng: &mut R) -> Fr { let f :Fr = rng.gen(); if n == 256 { return f; } let mut f = f.into_repr(); let i = n>>6; let j = n&63; let c = f.as_ref()[i]; f.as_mut()[i] = c & ((1<< j)-1); for k in i + 1 .. 4 { f.as_mut()[k] = 0; } Fr::from_repr(f).unwrap() } fn rand_note<R: ::rand::Rng>(asset_id: Option<Fr>, amount: Option<Fr>, native_amount: Option<Fr>, txid:Option<Fr>, owner: Option<Fr>, rng: &mut R) -> NoteData<Bls12> { NoteData::<Bls12> { asset_id: asset_id.unwrap_or(gen_rand_fr_limited(64, rng)), amount: amount.unwrap_or(gen_rand_fr_limited(32, rng)), native_amount: native_amount.unwrap_or(gen_rand_fr_limited(32, rng)), txid: txid.unwrap_or(rng.gen()), owner: owner.unwrap_or(rng.gen()) } } pub fn alloc_note_data<E: JubjubEngine, CS:ConstraintSystem<E>>( mut cs: CS, data: Option<NoteData<E>>) -> Result<Note<E>, SynthesisError> { Ok(match data { Some(data) => { Note { asset_id: AllocatedNum::alloc(cs.namespace(|| "alloc asset_id"), || Ok(data.asset_id)).unwrap(), amount: AllocatedNum::alloc(cs.namespace(|| "alloc amount"), || Ok(data.amount)).unwrap(), native_amount: AllocatedNum::alloc(cs.namespace(|| "alloc native_amount"), || Ok(data.native_amount)).unwrap(), txid: AllocatedNum::alloc(cs.namespace(|| "alloc txid"), || Ok(data.txid)).unwrap(), owner: AllocatedNum::alloc(cs.namespace(|| "alloc owner"), || Ok(data.owner)).unwrap() } }, None => { Note { asset_id: AllocatedNum::alloc(cs.namespace(|| "alloc asset_id"), || Err(SynthesisError::AssignmentMissing)).unwrap(), amount: AllocatedNum::alloc(cs.namespace(|| "alloc amount"), || Err(SynthesisError::AssignmentMissing)).unwrap(), native_amount: AllocatedNum::alloc(cs.namespace(|| "alloc native_amount"), || Err(SynthesisError::AssignmentMissing)).unwrap(), txid: AllocatedNum::alloc(cs.namespace(|| "alloc txid"), || Err(SynthesisError::AssignmentMissing)).unwrap(), owner: AllocatedNum::alloc(cs.namespace(|| "alloc owner"), || Err(SynthesisError::AssignmentMissing)).unwrap() } } }) } pub fn alloc_proof_data<E: JubjubEngine, CS:ConstraintSystem<E>>( mut cs: CS, data: Option<Vec<(E::Fr, bool)>>) -> Result<Vec<(AllocatedNum<E>, Boolean)>, SynthesisError> { Ok(match data { Some(data) => { data.iter().enumerate().map(|(i, (sibling, path))| ( AllocatedNum::alloc(cs.namespace(|| format!("sibling[{}]", i)), || Ok(sibling.clone())).unwrap(), Boolean::Is(AllocatedBit::alloc(cs.namespace(|| format!("path[{}]", i)), Some(path.clone())).unwrap()) ) ).collect::<Vec<(AllocatedNum<E>, Boolean)>>() }, None => { (0..PROOF_LENGTH).map(|i| ( AllocatedNum::alloc(cs.namespace(|| format!("sibling[{}]", i)), || Err(SynthesisError::AssignmentMissing)).unwrap(), Boolean::Is(AllocatedBit::alloc(cs.namespace(|| format!("path[{}]", i)), None).unwrap()) ) ).collect::<Vec<(AllocatedNum<E>, Boolean)>>() } }) } // Unoptimized, for test cases only fn fr2big(a:Fr) -> BigInt { BigInt::from_str_radix(&format!("{}", a)[5..69], 16).unwrap() } fn big2fr(a:BigInt) -> Fr { Fr::from_str(&format!("{}", a)).unwrap() } #[test] pub fn test_merkle_tree_struct(){ let mut rng = OsRng::new().unwrap(); let n_notes = 64; let note_hashes = (0..n_notes).map(|_| rng.gen()).collect::<Vec<_>>(); let index = rng.gen_range(0, n_notes); let mut mt = MerkleTreeAccumulator::new(); mt.pushMany(&note_hashes); let sibling = mt.proof(index as usize); let leaf_data = note_hashes[index]; let cmp_root = crate::pedersen_hasher::merkle_root::<Bls12>(&sibling, index as u64, &leaf_data, &JUBJUB_PARAMS); assert!(cmp_root == mt.root(), "merkle proof results should be equal"); } #[test] pub fn test_merkle_proof(){ let mut rng = OsRng::new().unwrap(); let n_notes = 64; let note_hashes = (0..n_notes).map(|_| rng.gen()).collect::<Vec<_>>(); let index = rng.gen_range(0, n_notes); let index_bits = (0..PROOF_LENGTH).map(|j| (index>>j) & 1 == 1).collect::<Vec<_>>(); let mut mt = MerkleTreeAccumulator::new(); mt.pushMany(&note_hashes); let mut cs = TestConstraintSystem::<Bls12>::new(); let sibling = mt.proof(index as usize); let proof_data = sibling.iter().zip(index_bits.iter()).map(|(&f, &b)| (f, b)).collect::<Vec<_>>(); let proof = alloc_proof_data(cs.namespace(|| "alloc proof {}"), Some(proof_data)).unwrap(); let leaf_data = note_hashes[index]; let leaf = AllocatedNum::alloc(cs.namespace(|| "alloc leaf"), || Ok(leaf_data)).unwrap(); let res = merkle_proof::merkle_proof(cs.namespace(|| "exec merkle proof"), &proof, &leaf, &JUBJUB_PARAMS).unwrap(); if !cs.is_satisfied() { let not_satisfied = cs.which_is_unsatisfied().unwrap_or(""); assert!(false, format!("Constraints not satisfied: {}", not_satisfied)); } assert!(crate::pedersen_hasher::merkle_root::<Bls12>(&sibling, index as u64, &leaf_data, &JUBJUB_PARAMS) == res.get_value().unwrap(), "merkle proof results should be equal"); } #[test] fn test_transaction() { let mut rng = OsRng::new().unwrap(); let n_notes = 64; let sk_data: Fr = rng.gen(); let pk = pubkey::<Bls12>(&sk_data, &JUBJUB_PARAMS); let notes = (0..n_notes).map(|_| rand_note(Some(Fr::zero()), None, None, None, Some(pk), &mut rng)).collect::<Vec<_>>(); let note_hashes = notes.iter().map(|n| note_hash::<Bls12>(n, &JUBJUB_PARAMS)).collect::<Vec<_>>(); let mut mt = MerkleTreeAccumulator::new(); mt.pushMany(&note_hashes); let i0 = rng.gen_range(0, n_notes); let i1 = rng.gen_range(0, n_notes-1); let indexes = [i0, if i1 < i0 {i1} else {i1+1}]; let indexes_bits = indexes.iter().map(|i| (0..PROOF_LENGTH).map(|j| (i>>j) & 1 == 1).collect::<Vec<_>>()); let mut cs = TestConstraintSystem::<Bls12>::new(); let in_note_data = indexes.iter().map(|&i| notes[i].clone()).collect::<Vec<_>>(); let in_note = in_note_data.iter().enumerate().map(|(i, note)| alloc_note_data(cs.namespace(|| format!("alloc in_note {}", i)), Some(note.clone())).unwrap()).collect::<Vec<_>>(); let in_proof = indexes.iter().zip(indexes_bits).map(|(&i, bits)| { let proof = mt.proof(i as usize).iter().zip(bits.iter()).map(|(&f, &b)| (f, b)).collect::<Vec<_>>(); alloc_proof_data(cs.namespace(|| format!("alloc in_proof {}", i)), Some(proof)).unwrap() }).collect::<Vec<_>>(); let all_amount = fr2big(notes[indexes[0]].amount.clone()) + fr2big(notes[indexes[1]].amount.clone()); let all_native_amount = fr2big(notes[indexes[0]].native_amount.clone()) + fr2big(notes[indexes[1]].native_amount.clone()); let all_amount_p1 = &all_amount/BigInt::from(7); let all_native_amount_p1 = &all_amount/BigInt::from(5); let all_amount_p2 = &all_amount/BigInt::from(5); let all_native_amount_p2 = &all_amount/BigInt::from(3); let out_note_data = [ rand_note(Some(Fr::zero()), Some(big2fr(&all_amount - &all_amount_p1 + &all_amount_p2)), Some(big2fr(&all_native_amount - &all_native_amount_p1 + &all_native_amount_p2)), None, None, &mut rng), rand_note(Some(Fr::zero()), Some(big2fr(all_amount_p1.clone())), Some(big2fr(all_native_amount_p1.clone())), None, None, &mut rng) ]; let out_note = out_note_data.iter().enumerate().map(|(i, note)| alloc_note_data(cs.namespace(|| format!("alloc out_note {}", i)), Some(note.clone())).unwrap()).collect::<Vec<_>>(); let sk = AllocatedNum::alloc(cs.namespace(|| "alloc sk"), || Ok(sk_data)).unwrap(); let packed_asset_bn = (all_native_amount_p2<< 128) + (all_amount_p2<< 64); let packed_asset = AllocatedNum::alloc(cs.namespace(|| "alloc packed_asset"), || Ok(big2fr(packed_asset_bn))).unwrap(); let root_hash = AllocatedNum::alloc(cs.namespace(|| "alloc root_hash"), || Ok(mt.root())).unwrap(); let (out_hash, nf) = transfer(cs.namespace(||"exec transfer"), &in_note, &in_proof, &out_note, &root_hash, &sk, &packed_asset, &JUBJUB_PARAMS).unwrap(); if !cs.is_satisfied() { let not_satisfied = cs.which_is_unsatisfied().unwrap_or(""); assert!(false, format!("Constraints not satisfied: {}", not_satisfied)); } let nf_computed = in_note_data.iter().map(|note| { let hash = crate::transactions::note_hash(note, &JUBJUB_PARAMS); crate::transactions::nullifier::<Bls12>(&hash, &sk_data, &JUBJUB_PARAMS) }); let out_hash_computed = out_note_data.iter().map(|note| crate::transactions::note_hash(note, &JUBJUB_PARAMS)); assert!(out_hash.iter().zip(out_hash_computed).all(|(a, b)| a.get_value().unwrap() == b), "out hashes should be the same"); assert!(nf.iter().zip(nf_computed).all(|(a, b)| a.get_value().unwrap() == b), "nullifiers should be the same"); } #[test] fn test_transaction_withdraw() { let mut rng = OsRng::new().unwrap(); let n_notes = 64; let sk_data: Fr = rng.gen(); let pk = pubkey::<Bls12>(&sk_data, &JUBJUB_PARAMS); let notes = (0..n_notes).map(|_| rand_note(Some(Fr::zero()), None, None, None, Some(pk), &mut rng)).collect::<Vec<_>>(); let note_hashes = notes.iter().map(|n| note_hash::<Bls12>(n, &JUBJUB_PARAMS)).collect::<Vec<_>>(); let mut mt = MerkleTreeAccumulator::new(); mt.pushMany(&note_hashes); let i0 = rng.gen_range(0, n_notes); let i1 = rng.gen_range(0, n_notes-1); let indexes = [i0, if i1 < i0 {i1} else {i1+1}]; let indexes_bits = indexes.iter().map(|i| (0..PROOF_LENGTH).map(|j| (i>>j) & 1 == 1).collect::<Vec<_>>()); let mut cs = TestConstraintSystem::<Bls12>::new(); let in_note_data = indexes.iter().map(|&i| notes[i].clone()).collect::<Vec<_>>(); let in_note = in_note_data.iter().enumerate().map(|(i, note)| alloc_note_data(cs.namespace(|| format!("alloc in_note {}", i)), Some(note.clone())).unwrap()).collect::<Vec<_>>(); let in_proof = indexes.iter().zip(indexes_bits).map(|(&i, bits)| { let proof = mt.proof(i as usize).iter().zip(bits.iter()).map(|(&f, &b)| (f, b)).collect::<Vec<_>>(); alloc_proof_data(cs.namespace(|| format!("alloc in_proof {}", i)), Some(proof)).unwrap() }).collect::<Vec<_>>(); let all_amount = fr2big(notes[indexes[0]].amount.clone()) + fr2big(notes[indexes[1]].amount.clone()); let all_native_amount = fr2big(notes[indexes[0]].native_amount.clone()) + fr2big(notes[indexes[1]].native_amount.clone()); let all_amount_p1 = &all_amount/BigInt::from(7); let all_native_amount_p1 = &all_amount/BigInt::from(5); let all_amount_p2 = &all_amount/BigInt::from(-5); let all_native_amount_p2 = &all_amount/BigInt::from(-3); let out_note_data = [ rand_note(Some(Fr::zero()), Some(big2fr(&all_amount - &all_amount_p1 + &all_amount_p2)), Some(big2fr(&all_native_amount - &all_native_amount_p1 + &all_native_amount_p2)), None, None, &mut rng), rand_note(Some(Fr::zero()), Some(big2fr(all_amount_p1.clone())), Some(big2fr(all_native_amount_p1.clone())), None, None, &mut rng) ]; let out_note = out_note_data.iter().enumerate().map(|(i, note)| alloc_note_data(cs.namespace(|| format!("alloc out_note {}", i)), Some(note.clone())).unwrap()).collect::<Vec<_>>(); let sk = AllocatedNum::alloc(cs.namespace(|| "alloc sk"), || Ok(sk_data)).unwrap(); let u64num = BigInt::from_str("18446744073709551616").unwrap(); let packed_asset_bn = ((&u64num+&all_native_amount_p2)<< 128) + ((&u64num+&all_amount_p2)<< 64); let packed_asset = AllocatedNum::alloc(cs.namespace(|| "alloc packed_asset"), || Ok(big2fr(packed_asset_bn))).unwrap(); let root_hash = AllocatedNum::alloc(cs.namespace(|| "alloc root_hash"), || Ok(mt.root())).unwrap(); let (out_hash, nf) = transfer(cs.namespace(||"exec transfer"), &in_note, &in_proof, &out_note, &root_hash, &sk, &packed_asset, &JUBJUB_PARAMS).unwrap(); if !cs.is_satisfied() { let not_satisfied = cs.which_is_unsatisfied().unwrap_or(""); assert!(false, format!("Constraints not satisfied: {}", not_satisfied)); } let nf_computed = in_note_data.iter().map(|note| { let hash = crate::transactions::note_hash(note, &JUBJUB_PARAMS); crate::transactions::nullifier::<Bls12>(&hash, &sk_data, &JUBJUB_PARAMS) }); let out_hash_computed = out_note_data.iter().map(|note| crate::transactions::note_hash(note, &JUBJUB_PARAMS)); assert!(out_hash.iter().zip(out_hash_computed).all(|(a, b)| a.get_value().unwrap() == b), "out hashes should be the same"); assert!(nf.iter().zip(nf_computed).all(|(a, b)| a.get_value().unwrap() == b), "nullifiers should be the same"); } #[test] fn test_nullifier() -> Result<(), SynthesisError> { let rng = &mut OsRng::new().unwrap(); let params = JubjubBls12::new(); let mut cs = TestConstraintSystem::<Bls12>::new(); let nh = rng.gen::<Fr>(); let sk = rng.gen::<Fr>(); let nf = crate::transactions::nullifier::<Bls12>(&nh, &sk, &params); let nh_a = AllocatedNum::alloc(cs.namespace(|| "var nh_a"), || Ok(nh))?; let sk_a = AllocatedNum::alloc(cs.namespace(|| "var sk_a"), || Ok(sk))?; let sk_bits = sk_a.into_bits_le_strict(cs.namespace(|| "var sk_bits"))?; let nf_a = nullifier(&mut cs, &nh_a, &sk_bits, &params)?; if !cs.is_satisfied() { let not_satisfied = cs.which_is_unsatisfied().unwrap_or(""); assert!(false, format!("Constraints not satisfied: {}", not_satisfied)); } assert!(nf_a.get_value().unwrap() == nf, "Nf value should be the same"); Ok(()) }
38.461894
201
0.611325
012c8709bd4a64362a5489f5806816749c4071a3
365
// Note: If you change this test, change 'overlapping_marker_traits.rs' at the same time. use std::marker::PhantomPinned; use pin_project::pin_project; #[pin_project] //~ ERROR E0119 struct Struct<T> { #[pin] f: T, } // unsound Unpin impl impl<T> Unpin for Struct<T> {} fn is_unpin<T: Unpin>() {} fn main() { is_unpin::<Struct<PhantomPinned>>() }
17.380952
89
0.665753
f5f6bef9fdc291703f222118b85f01d2ff667b93
6,388
//! Tests auto-converted from "sass-spec/spec/non_conformant/parser/operations/division/strings/pairs.hrx" #[test] fn test() { assert_eq!( crate::rsass( "foo {\ \n test-1: literal/literal;\ \n test-2: literal /literal;\ \n test-3: literal/ literal;\ \n test-4: literal / literal;\ \n test-5: literal/\"quoted\";\ \n test-6: literal /\"quoted\";\ \n test-7: literal/ \"quoted\";\ \n test-8: literal / \"quoted\";\ \n test-9: literal/#{interpolant};\ \n test-10: literal /#{interpolant};\ \n test-11: literal/ #{interpolant};\ \n test-12: literal / #{interpolant};\ \n test-13: literal/lschema_#{ritlp};\ \n test-14: literal /lschema_#{ritlp};\ \n test-15: literal/ lschema_#{ritlp};\ \n test-16: literal / lschema_#{ritlp};\ \n test-17: literal/#{litlp}_rschema;\ \n test-18: literal /#{litlp}_rschema;\ \n test-19: literal/ #{litlp}_rschema;\ \n test-20: literal / #{litlp}_rschema;\ \n test-21: \"quoted\"/\"quoted\";\ \n test-22: \"quoted\" /\"quoted\";\ \n test-23: \"quoted\"/ \"quoted\";\ \n test-24: \"quoted\" / \"quoted\";\ \n test-25: \"quoted\"/#{interpolant};\ \n test-26: \"quoted\" /#{interpolant};\ \n test-27: \"quoted\"/ #{interpolant};\ \n test-28: \"quoted\" / #{interpolant};\ \n test-29: \"quoted\"/lschema_#{ritlp};\ \n test-30: \"quoted\" /lschema_#{ritlp};\ \n test-31: \"quoted\"/ lschema_#{ritlp};\ \n test-32: \"quoted\" / lschema_#{ritlp};\ \n test-33: \"quoted\"/#{litlp}_rschema;\ \n test-34: \"quoted\" /#{litlp}_rschema;\ \n test-35: \"quoted\"/ #{litlp}_rschema;\ \n test-36: \"quoted\" / #{litlp}_rschema;\ \n test-37: #{interpolant}/#{interpolant};\ \n test-38: #{interpolant} /#{interpolant};\ \n test-39: #{interpolant}/ #{interpolant};\ \n test-40: #{interpolant} / #{interpolant};\ \n test-41: #{interpolant}/lschema_#{ritlp};\ \n test-42: #{interpolant} /lschema_#{ritlp};\ \n test-43: #{interpolant}/ lschema_#{ritlp};\ \n test-44: #{interpolant} / lschema_#{ritlp};\ \n test-45: #{interpolant}/#{litlp}_rschema;\ \n test-46: #{interpolant} /#{litlp}_rschema;\ \n test-47: #{interpolant}/ #{litlp}_rschema;\ \n test-48: #{interpolant} / #{litlp}_rschema;\ \n test-49: lschema_#{ritlp}/lschema_#{ritlp};\ \n test-50: lschema_#{ritlp} /lschema_#{ritlp};\ \n test-51: lschema_#{ritlp}/ lschema_#{ritlp};\ \n test-52: lschema_#{ritlp} / lschema_#{ritlp};\ \n test-53: lschema_#{ritlp}/#{litlp}_rschema;\ \n test-54: lschema_#{ritlp} /#{litlp}_rschema;\ \n test-55: lschema_#{ritlp}/ #{litlp}_rschema;\ \n test-56: lschema_#{ritlp} / #{litlp}_rschema;\ \n test-57: #{litlp}_rschema/#{litlp}_rschema;\ \n test-58: #{litlp}_rschema /#{litlp}_rschema;\ \n test-59: #{litlp}_rschema/ #{litlp}_rschema;\ \n test-60: #{litlp}_rschema / #{litlp}_rschema;\ \n}\ \n" ) .unwrap(), "foo {\ \n test-1: literal/literal;\ \n test-2: literal/literal;\ \n test-3: literal/literal;\ \n test-4: literal/literal;\ \n test-5: literal/\"quoted\";\ \n test-6: literal/\"quoted\";\ \n test-7: literal/\"quoted\";\ \n test-8: literal/\"quoted\";\ \n test-9: literal/interpolant;\ \n test-10: literal/interpolant;\ \n test-11: literal/interpolant;\ \n test-12: literal/interpolant;\ \n test-13: literal/lschema_ritlp;\ \n test-14: literal/lschema_ritlp;\ \n test-15: literal/lschema_ritlp;\ \n test-16: literal/lschema_ritlp;\ \n test-17: literal/litlp_rschema;\ \n test-18: literal/litlp_rschema;\ \n test-19: literal/litlp_rschema;\ \n test-20: literal/litlp_rschema;\ \n test-21: \"quoted\"/\"quoted\";\ \n test-22: \"quoted\"/\"quoted\";\ \n test-23: \"quoted\"/\"quoted\";\ \n test-24: \"quoted\"/\"quoted\";\ \n test-25: \"quoted\"/interpolant;\ \n test-26: \"quoted\"/interpolant;\ \n test-27: \"quoted\"/interpolant;\ \n test-28: \"quoted\"/interpolant;\ \n test-29: \"quoted\"/lschema_ritlp;\ \n test-30: \"quoted\"/lschema_ritlp;\ \n test-31: \"quoted\"/lschema_ritlp;\ \n test-32: \"quoted\"/lschema_ritlp;\ \n test-33: \"quoted\"/litlp_rschema;\ \n test-34: \"quoted\"/litlp_rschema;\ \n test-35: \"quoted\"/litlp_rschema;\ \n test-36: \"quoted\"/litlp_rschema;\ \n test-37: interpolant/interpolant;\ \n test-38: interpolant/interpolant;\ \n test-39: interpolant/interpolant;\ \n test-40: interpolant/interpolant;\ \n test-41: interpolant/lschema_ritlp;\ \n test-42: interpolant/lschema_ritlp;\ \n test-43: interpolant/lschema_ritlp;\ \n test-44: interpolant/lschema_ritlp;\ \n test-45: interpolant/litlp_rschema;\ \n test-46: interpolant/litlp_rschema;\ \n test-47: interpolant/litlp_rschema;\ \n test-48: interpolant/litlp_rschema;\ \n test-49: lschema_ritlp/lschema_ritlp;\ \n test-50: lschema_ritlp/lschema_ritlp;\ \n test-51: lschema_ritlp/lschema_ritlp;\ \n test-52: lschema_ritlp/lschema_ritlp;\ \n test-53: lschema_ritlp/litlp_rschema;\ \n test-54: lschema_ritlp/litlp_rschema;\ \n test-55: lschema_ritlp/litlp_rschema;\ \n test-56: lschema_ritlp/litlp_rschema;\ \n test-57: litlp_rschema/litlp_rschema;\ \n test-58: litlp_rschema/litlp_rschema;\ \n test-59: litlp_rschema/litlp_rschema;\ \n test-60: litlp_rschema/litlp_rschema;\ \n}\ \n" ); }
46.627737
106
0.525047
729765c401afb23c02f5a88a735f339f15c5804f
20,042
/// Only allow processing this many inputs in a domain before we handle timer events, acks, etc. const FORCE_INPUT_YIELD_EVERY: usize = 64; use super::ChannelCoordinator; use crate::coordination::CoordinationPayload; use async_bincode::AsyncDestination; use bincode; use bufstream::BufStream; use dataflow::{ payload::SourceChannelIdentifier, prelude::{DataType, Executor}, Domain, Packet, PollEvent, ProcessResult, }; use failure::{self, ResultExt}; use fnv::{FnvHashMap, FnvHashSet}; use futures::stream::futures_unordered::FuturesUnordered; use futures::{self, Future, Sink, Stream}; use noria::channel::{DualTcpStream, CONNECTION_FROM_BASE}; use noria::internal::DomainIndex; use noria::internal::LocalOrNot; use noria::{Input, Tagged}; use slog; use std::collections::{HashMap, VecDeque}; use std::io; use std::sync::Arc; use stream_cancel::{Valve, Valved}; use streamunordered::{StreamUnordered, StreamYield}; use tokio; use tokio::prelude::*; pub(super) type ReplicaIndex = (DomainIndex, usize); pub(super) struct Replica { domain: Domain, log: slog::Logger, coord: Arc<ChannelCoordinator>, retry: Option<Box<Packet>>, incoming: Valved<tokio::net::tcp::Incoming>, first_byte: FuturesUnordered<tokio::io::ReadExact<tokio::net::tcp::TcpStream, Vec<u8>>>, locals: tokio_sync::mpsc::UnboundedReceiver<Box<Packet>>, inputs: StreamUnordered< DualTcpStream< BufStream<tokio::net::TcpStream>, Box<Packet>, Tagged<LocalOrNot<Input>>, AsyncDestination, >, >, outputs: FnvHashMap< ReplicaIndex, ( Box<dyn Sink<SinkItem = Box<Packet>, SinkError = bincode::Error> + Send>, bool, ), >, outbox: FnvHashMap<ReplicaIndex, VecDeque<Box<Packet>>>, timeout: Option<tokio_os_timer::Delay>, oob: OutOfBand, } impl Replica { pub(super) fn new( valve: &Valve, mut domain: Domain, on: tokio::net::TcpListener, locals: tokio_sync::mpsc::UnboundedReceiver<Box<Packet>>, ctrl_tx: futures::sync::mpsc::UnboundedSender<CoordinationPayload>, log: slog::Logger, cc: Arc<ChannelCoordinator>, ) -> Self { let id = domain.id(); let id = format!("{}.{}", id.0.index(), id.1); domain.booted(on.local_addr().unwrap()); Replica { coord: cc, domain, retry: None, incoming: valve.wrap(on.incoming()), first_byte: FuturesUnordered::new(), locals, log: log.new(o! {"id" => id}), inputs: Default::default(), outputs: Default::default(), outbox: Default::default(), oob: OutOfBand::new(ctrl_tx), timeout: None, } } fn try_oob(&mut self) -> Result<(), failure::Error> { let inputs = &mut self.inputs; let pending = &mut self.oob.pending; // first, queue up any additional writes we have to do let mut err = Vec::new(); self.oob.back.retain(|&streami, tags| { let stream = &mut inputs[streami]; let had = tags.len(); tags.retain(|&tag| { match stream.start_send(Tagged { tag, v: () }) { Ok(AsyncSink::Ready) => false, Ok(AsyncSink::NotReady(_)) => { // TODO: also break? true } Err(e) => { // start_send shouldn't generally error err.push(e.into()); true } } }); if had != tags.len() { pending.insert(streami); } !tags.is_empty() }); if !err.is_empty() { return Err(err.swap_remove(0)); } // then, try to send on any streams we may be able to pending.retain(|&streami| { let stream = &mut inputs[streami]; match stream.poll_complete() { Ok(Async::Ready(())) => false, Ok(Async::NotReady) => true, Err(box bincode::ErrorKind::Io(e)) => { match e.kind() { io::ErrorKind::BrokenPipe | io::ErrorKind::NotConnected | io::ErrorKind::UnexpectedEof | io::ErrorKind::ConnectionAborted | io::ErrorKind::ConnectionReset => { // connection went away, no need to try more false } _ => { err.push(e.into()); true } } } Err(e) => { err.push(e.into()); true } } }); if !err.is_empty() { return Err(err.swap_remove(0)); } Ok(()) } fn try_flush(&mut self) -> Result<(), failure::Error> { let cc = &self.coord; let outputs = &mut self.outputs; // just like in try_oob: // first, queue up any additional writes we have to do let mut err = Vec::new(); for (&ri, ms) in &mut self.outbox { if ms.is_empty() { continue; } let &mut (ref mut tx, ref mut pending) = outputs.entry(ri).or_insert_with(|| { while !cc.has(&ri) {} let tx = cc.builder_for(&ri).unwrap().build_async().unwrap(); (tx, true) }); while let Some(m) = ms.pop_front() { match tx.start_send(m) { Ok(AsyncSink::Ready) => { // we queued something, so we'll need to send! *pending = true; } Ok(AsyncSink::NotReady(m)) => { // put back the m we tried to send ms.push_front(m); // there's also no use in trying to enqueue more packets break; } Err(e) => { err.push(e); break; } } } } if !err.is_empty() { return Err(err.swap_remove(0).into()); } // then, try to do any sends that are still pending for &mut (ref mut tx, ref mut pending) in outputs.values_mut() { if !*pending { continue; } match tx.poll_complete() { Ok(Async::Ready(())) => { *pending = false; } Ok(Async::NotReady) => {} Err(e) => err.push(e), } } if !err.is_empty() { return Err(err.swap_remove(0).into()); } Ok(()) } fn try_new(&mut self) -> io::Result<bool> { while let Async::Ready(stream) = self.incoming.poll()? { match stream { Some(stream) => { // we know that any new connection to a domain will first send a one-byte // token to indicate whether the connection is from a base or not. debug!(self.log, "accepted new connection"; "from" => ?stream.peer_addr().unwrap()); self.first_byte .push(tokio::io::read_exact(stream, vec![0; 1])); } None => { return Ok(false); } } } while let Async::Ready(Some((stream, tag))) = self.first_byte.poll()? { let is_base = tag[0] == CONNECTION_FROM_BASE; debug!(self.log, "established new connection"; "base" => ?is_base); let slot = self.inputs.stream_slot(); let token = slot.token(); if let Err(e) = stream.set_nodelay(true) { warn!(self.log, "failed to set TCP_NODELAY for new connection: {:?}", e; "from" => ?stream.peer_addr().unwrap()); } let tcp = if is_base { DualTcpStream::upgrade(BufStream::new(stream), move |Tagged { v: input, tag }| { Box::new(Packet::Input { inner: input, src: Some(SourceChannelIdentifier { token, tag }), senders: Vec::new(), }) }) } else { BufStream::with_capacities(2 * 1024 * 1024, 4 * 1024, stream).into() }; slot.insert(tcp); } Ok(true) } fn try_timeout(&mut self) -> Poll<(), io::Error> { if let Some(mut to) = self.timeout.take() { if let Async::Ready(()) = to.poll()? { crate::block_on(|| { self.domain .on_event(&mut self.oob, PollEvent::Timeout, &mut self.outbox) }); return Ok(Async::Ready(())); } else { self.timeout = Some(to); } } Ok(Async::NotReady) } } struct OutOfBand { // map from inputi to number of (empty) ACKs back: FnvHashMap<usize, Vec<u32>>, pending: FnvHashSet<usize>, // for sending messages to the controller ctrl_tx: futures::sync::mpsc::UnboundedSender<CoordinationPayload>, } impl OutOfBand { fn new(ctrl_tx: futures::sync::mpsc::UnboundedSender<CoordinationPayload>) -> Self { OutOfBand { back: Default::default(), pending: Default::default(), ctrl_tx, } } } impl Executor for OutOfBand { fn ack(&mut self, id: SourceChannelIdentifier) { self.back.entry(id.token).or_default().push(id.tag); } fn create_universe(&mut self, universe: HashMap<String, DataType>) { self.ctrl_tx .unbounded_send(CoordinationPayload::CreateUniverse(universe)) .expect("asked to send to controller, but controller has gone away"); } } impl Future for Replica { type Item = (); type Error = (); fn poll(&mut self) -> Result<Async<Self::Item>, Self::Error> { let r: Result<Async<Self::Item>, failure::Error> = try { loop { // FIXME: check if we should call update_state_sizes (every evict_every) // are there are any new connections? if !self.try_new().context("check for new connections")? { // incoming socket closed -- no more clients will arrive return Ok(Async::Ready(())); } // have any of our timers expired? self.try_timeout().context("check timeout")?; // we have three logical input sources: receives from local domains, receives from // remote domains, and remote mutators. we want to achieve some kind of fairness among // these, but bias the data-flow towards finishing work it has accepted (i.e., domain // operations) to accepting new work. however, this is complicated by two facts: // // - we cannot (currently) differentiate between receives from remote domains and // receives from mutators. they are all remote tcp channels. we *could* // differentiate them using `is_base` in `try_new` to store them separately, but // it's also unclear how that would change the receive heuristic. // - domain operations are not all "completing starting work". in many cases, traffic // from domains will be replay-related, in which case favoring domains would favor // writes over reads. while we do in general want reads to be fast, we don't want // them to fully starve writes. // // the current stategy is therefore that we alternate reading once from the local // channel and once from the set of remote channels. this biases slightly in favor of // local sends, without starving either. we also stop alternating once either source is // depleted. let mut local_done = false; let mut remote_done = false; let mut check_local = true; let readiness = 'ready: loop { let d = &mut self.domain; let oob = &mut self.oob; let ob = &mut self.outbox; macro_rules! process { ($retry:expr, $p:expr, $pp:expr) => {{ $retry = Some($p); let retry = &mut $retry; match tokio_threadpool::blocking(|| $pp(retry.take().unwrap())) { Ok(Async::Ready(ProcessResult::StopPolling)) => { // domain got a message to quit // TODO: should we finish up remaining work? return Ok(Async::Ready(())); } Ok(Async::Ready(_)) => {} Ok(Async::NotReady) => { // NOTE: the packet is still left in $retry, so we'll try again break 'ready Async::NotReady; } Err(e) => { unreachable!("trying to block without tokio runtime: {:?}", e) } } }}; } let mut interrupted = false; if let Some(p) = self.retry.take() { // first try the thing we failed to process last time again process!(self.retry, p, |p| d.on_event( oob, PollEvent::Process(p), ob )); } for i in 0..FORCE_INPUT_YIELD_EVERY { if !local_done && (check_local || remote_done) { match self.locals.poll() { Ok(Async::Ready(Some(packet))) => process!( self.retry, packet, |p| d.on_event(oob, PollEvent::Process(p), ob) ), Ok(Async::Ready(None)) => { // local input stream finished? // TODO: should we finish up remaining work? return Ok(Async::Ready(())); } Ok(Async::NotReady) => { local_done = true; } Err(e) => { error!(self.log, "local input stream failed: {:?}", e); local_done = true; break; } } } if !remote_done && (!check_local || local_done) { match self.inputs.poll() { Ok(Async::Ready(Some((StreamYield::Item(packet), _)))) => process!( self.retry, packet, |p| d.on_event(oob, PollEvent::Process(p), ob) ), Ok(Async::Ready(Some(( StreamYield::Finished(_stream), streami, )))) => { oob.back.remove(&streami); oob.pending.remove(&streami); // FIXME: what about if a later flush flushes to this stream? } Ok(Async::Ready(None)) => { // we probably haven't booted yet remote_done = true; } Ok(Async::NotReady) => { remote_done = true; } Err(e) => { error!(self.log, "input stream failed: {:?}", e); remote_done = true; break; } } } // alternate between input sources check_local = !check_local; // nothing more to do -- wait to be polled again if local_done && remote_done { break; } if i == FORCE_INPUT_YIELD_EVERY - 1 { // we could keep processing inputs, but make sure we send some ACKs too! interrupted = true; } } // send to downstream // TODO: send fail == exiting? self.try_flush().context("downstream flush (after)")?; // send acks self.try_oob()?; if interrupted { // resume reading from our non-depleted inputs continue; } break Async::NotReady; }; // check if we now need to set a timeout match self.domain.on_event( &mut self.oob, PollEvent::ResumePolling, &mut self.outbox, ) { ProcessResult::KeepPolling(timeout) => { if let Some(timeout) = timeout { // tokio-timer has a resolution of 1ms, so we can't use it :'( // TODO: how about we don't create a new timer each time? self.timeout = Some(tokio_os_timer::Delay::new(timeout).unwrap()); // we need to poll the timer to ensure we'll get woken up if let Async::Ready(()) = self.try_timeout().context("check timeout after setting")? { // the timer expired and we did some stuff // make sure we don't return while there's more work to do task::current().notify(); } } } pr => { // TODO: just have resume_polling be a method... unreachable!("unexpected ResumePolling result {:?}", pr) } } break readiness; } }; match r { Ok(k) => Ok(k), Err(e) => { crit!(self.log, "replica failure: {:?}", e); Err(()) } } } }
38.765957
104
0.434188
18cb700ce2a17e9c1e4131b3944e7077f9234774
22,046
use serde::{Deserialize, Serialize}; use url::{Url}; #[cfg(feature = "async")] use async_trait::async_trait; use std::fmt; use std::fmt::Formatter; use crate::schema; use crate::error; /// Contains the information required to register a producer with a Conductor server. #[derive(Debug, Clone, Deserialize, Serialize)] pub struct Registration { name: String, schema: schema::Schema, use_custom_id: Option<String>, // this is to support devices without persistent storage such as an arduino. They can have a custom id } impl Registration { #[must_use] pub const fn new(name: String, schema: schema::Schema, custom_id: Option<String>) -> Self { Self { name, schema, use_custom_id: custom_id, } } /// Create a new instance of Registration with an empty schema. #[must_use] pub fn new_empty(name: String, custom_id: Option<String>) -> Self { Self { name, schema: std::collections::HashMap::default(), use_custom_id: custom_id, } } /// Get the name of the producer #[must_use] pub fn get_name(&self) -> &str { &self.name } /// returns true if a uuid has been set. #[must_use] pub const fn has_custom_id(&self) -> bool { self.use_custom_id.is_some() } #[must_use] pub fn get_custom_id(&self) -> Option<&str> { if let Some(c_id) = &self.use_custom_id { return Some(c_id.as_str()); } None } #[must_use] pub fn contains_column(&self, column_name: &str) -> bool { self.schema.contains_key(column_name) } #[must_use] pub fn schema_len(&self) -> usize { self.schema.len() } #[must_use] pub const fn get_schema(&self) -> &schema::Schema { &self.schema } } ///The response from the Conductor instance after a registration attempt #[derive(Debug, Clone, Deserialize, Serialize)] pub struct RegistrationResult { pub error: error::ConductorError, pub uuid: Option<String>, } /// A new data packet to be sent to the Conductor instance #[derive(Debug, Clone, Deserialize, Serialize)] pub struct Emit<'a, T> { uuid: &'a str, timestamp: Option<u64>, data: T, } impl<'a, T> Emit<'a, T> { #[must_use] pub const fn new(uuid: &'a str, timestamp: Option<u64>, data: T) -> Self { Self { uuid, timestamp, data, } } #[must_use] pub const fn get_uuid(&self) -> &str { self.uuid } #[must_use] pub const fn get_timestamp(&self) -> Option<u64> { self.timestamp } #[must_use] pub const fn get_data(&self) -> &T { &self.data } } #[derive(Debug, Clone, Deserialize, Serialize)] pub struct EmitResult { pub error: error::ConductorError, } /// All the errors that can be produced by a producer #[derive(Debug)] pub enum Error { /// The domain given for the conductor instance is invalid in some way InvalidConductorDomain(String), /// Indicates a failure to serialize a struct to message pack. Contains rmp_serde encoding error MsgPackSerialisationFailure(rmp_serde::encode::Error), /// Indicates a failure to serialize a struct to json. Contains serde_json error type JsonSerialisationFailure(serde_json::Error), /// Indicates a failure to serialize a struct. Contains the error given by the serializer. GenericSerialisationFailure(Box<dyn std::error::Error>), /// Indicates an error which was emitted from the Conductor server (Internal Server Error) ConductorError(error::ConductorError), /// Indicates an issue with the network layer. Contains the reqwest error type NetworkError(reqwest::Error), /// Indicates a failure to deserialize a struct from message pack. Contains rmp_serde decoding error MsgPackDeserializationFailure(rmp_serde::decode::Error), /// Indicates a failure to deserialize a struct from json. Contains serde_json error type JsonDeserializationFailure(serde_json::Error), /// Indicates a failure to deserialize a struct. Contains the error given by the serializer. GenericDeserializationFailure(Box<dyn std::error::Error>), } impl std::error::Error for Error {} impl fmt::Display for Error { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Error::InvalidConductorDomain(message) => write!(f, "InvalidConductorDomain: {}", message), Error::MsgPackSerialisationFailure(encode_error) => write!(f, "MsgPackSerialisationFailure: {}", encode_error), Error::ConductorError(ce) => write!(f, "ConductorError: {}", ce), Error::NetworkError(re) => write!(f, "NetworkError: {}", re), Error::MsgPackDeserializationFailure(decode_error) => write!(f, "MsgPackDeserializationFailure: {}", decode_error), Error::JsonSerialisationFailure(encode_error) => write!(f, "JsonSerialisationFailure: {}", encode_error), Error::GenericSerialisationFailure(encode_error) => write!(f, "GenericSerialisationFailure: {}", encode_error), Error::JsonDeserializationFailure(decode_error) => write!(f, "JsonDeserializationFailure: {}", decode_error), Error::GenericDeserializationFailure(decode_error) => write!(f, "GenericDeserializationFailure: {}", decode_error), } } } /// /// Provides functionality that is shared between both the async and blocking versions of the Producer trait. /// Prepares and processes conductor requests and responses. /// pub trait Base: Serialize + Clone + crate::schema::ConductorSchema { /// /// Prepares a payload for emitting data. This function doesn't send the payload. /// /// # Arguments /// /// * `uuid`: The unique ID of this producer. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `SerialisationFailure`: Produced when the emit payload cannot be serialised to the message pack format. This is most likely /// due to a difficulty serialising Self using serde. /// # Example /// /// ``` /// use std::collections::HashMap; /// use conductor_common::producer::Base; /// use conductor_common::schema; /// #[derive(Clone, Serialize)] /// struct Measurement { /// data_point:u8 /// } /// impl schema::ConductorSchema for Measurement { /// fn generate_schema() -> HashMap<String, schema::DataTypes> { /// unimplemented!("Not needed for example/test"); /// } /// } /// impl Base for Measurement {} /// let m = Measurement { /// data_point: 10 /// }; /// let expected:Vec<u8> = vec![3,4,5]; /// assert_eq!(m, expected); /// ``` fn generate_emit_data(&self, uuid: &str, conductor_domain: Url) -> Result<(Vec<u8>, Url), Error> { let url = match conductor_domain.join("/v1/producer/emit") { Ok(u) => u, Err(err) => return Err(Error::InvalidConductorDomain(format!("The conductor domain was invalid. {}", err))) }; let emit: Emit<Self> = Emit { uuid, timestamp: None, data: self.clone(), }; let payload = match rmp_serde::to_vec_named(&emit) { Ok(p) => p, Err(err) => { return Err(Error::MsgPackSerialisationFailure(err)); } }; Ok((payload, url)) } /// /// Prepares the payload used for registration. Registration is not done by this function. /// /// # Arguments /// /// * `name`: The name of the producer. /// This doesn't need to be unique in a Conductor network although it may be helpful to you if it is. /// * `uuid`: The unique ID string to identify this producer. If it's none one will be generated by the /// Conductor server and returned to us. Most of the time you'll want to leave this as None. /// * `conductor_domain`: The url of the conductor instance. /// ///# Errors /// /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `MsgPackSerialisationFailure`: Produced when the emit payload cannot be serialised to the message pack format. /// /// # Example /// /// ``` /// use std::collections::HashMap; /// use conductor_common::producer::Base; /// use conductor_common::schema; /// #[derive(Clone, Serialize)] /// struct Measurement { /// data_point:u8 /// } /// impl schema::ConductorSchema for Measurement { /// fn generate_schema() -> HashMap<String, schema::DataTypes> { /// unimplemented!("Not needed for example/test"); /// } /// } /// impl Base for Measurement {} /// let m = Measurement { /// data_point: 10 /// }; /// let expected:Vec<u8> = vec![3,4,5]; /// assert_eq!(m, expected); /// ``` /// fn prepare_registration_data(name: &str, uuid: Option<String>, conductor_domain: Url) -> Result<(Vec<u8>, Url), Error> { let url = match conductor_domain.join("/v1/producer/register") { Ok(u) => u, Err(err) => return Err(Error::InvalidConductorDomain(format!("The conductor domain was invalid. {}", err))) }; let reg = Registration { name: name.to_string(), schema: Self::generate_schema(), use_custom_id: uuid, }; let payload = match rmp_serde::to_vec_named(&reg) { Ok(m) => m, Err(err) => { return Err(Error::MsgPackSerialisationFailure(err)); } }; Ok((payload, url)) } } /// /// Provides functions to add Conductor interactions to a type. Turns the implementing type into /// a Conductor Producer. This version of the trait provides a Asynchronous version of the functions. /// Refer to `conductor::producer::Producer` for the blocking version. /// /// This should not be implemented directly in most cases. /// Instead use the `#[derive(conductor::Producer)]` macro to generate everything for you. /// #[cfg(feature = "async")] #[async_trait] #[allow(clippy::module_name_repetitions)] pub trait AsyncProducer: Base { /// Async send a new data packet to the conductor server. /// Messagepack is used as the format over the wire. /// /// # Arguments /// /// * `uuid`: The unique id of this producer which was registered with conductor. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `MsgPackSerialisationFailure`: Produced when the emit payload cannot be serialised to the message pack format. This is most likely /// due to a difficulty serialising Self using serde. /// * `NetworkError`: Produced when the http post fails for any reason. Holds the Reqwest Error Struct. /// * `MsgPackDeserializationFailure`: Produced when the emit response couldn't be deserialized from message pack. Holds the /// rmp_serde Error struct. /// * `ConductorError`: Produced when there was an error on the server. /// async fn emit(&self, uuid: &str, conductor_domain: Url) -> Result<(), Error> { let (payload, url) = self.generate_emit_data(uuid, conductor_domain)?; //start async specific let client = reqwest::Client::new(); let request_resp = client.post(url) .body(payload) .header(reqwest::header::CONTENT_TYPE, reqwest::header::HeaderValue::from_static("application/msgpack")) .send().await; let response = match request_resp { Ok(r) => r, Err(err) => return Err(Error::NetworkError(err)) }; let result: EmitResult = match rmp_serde::from_read_ref(response.bytes().await.unwrap().as_ref()) { Ok(r) => r, Err(err) => return Err(Error::MsgPackDeserializationFailure(err)) }; //end async specific code if result.error == error::ConductorError::NoError { return Ok(()); } Err(Error::ConductorError(result.error)) } /// Generates the schema for this struct and register it with conductor asynchronously. /// /// # Arguments /// /// * `name`: A human friendly name for this producer. This isn't important to conductor and doesn't have to be unique. /// It's stored in the DB and can be useful to identify the producer. And empty string is valid but not recommended. /// * `uuid`: An optional unique ID which will be used to identify this producer. If this is set to None one is generated automatically by /// Conductor. It's recommended to leave this as null and let the server generate the ID. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `MsgPackSerialisationFailure`: Produced when the emit payload cannot be serialised to the message pack format. This is most likely /// due to a difficulty serialising Self using serde. /// * `NetworkError`: Produced when the http post fails for any reason. Holds the Reqwest Error Struct. /// * `MsgPackDeserializationFailure`: Produced when the emit response couldn't be deserialized from message pack. Holds the /// rmp_serde Error struct. /// * `ConductorError`: Produced when there was an error on the server. /// async fn register(name: &str, uuid: Option<String>, conductor_domain: Url) -> Result<String, Error> { //TODO handle errors correctly let (payload, url) = Self::prepare_registration_data(name, uuid, conductor_domain)?; let client = reqwest::Client::new(); let request = client.post(url) .body(payload) .header(reqwest::header::CONTENT_TYPE, reqwest::header::HeaderValue::from_static("application/msgpack")) .send().await; let response = match request { Ok(r) => r, Err(err) => return Err(Error::NetworkError(err)) }; let result: RegistrationResult = match rmp_serde::from_read_ref(response.bytes().await.unwrap().as_ref()) { Ok(r) => r, Err(err) => return Err(Error::MsgPackDeserializationFailure(err)) }; if result.error != error::ConductorError::NoError { return Err(Error::ConductorError(result.error)); } Ok(result.uuid.unwrap()) } /// /// Asynchronously checks to see if the UUID has been registered with Conductor. /// This does not verify that the schema registered with the server is correct. /// /// # Arguments /// /// * `uuid`: The unique id of this producer which was registered with conductor. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `NetworkError`: Produced when the http get fails for any reason. Holds the Reqwest Error Struct. /// async fn is_registered(uuid: &str, conductor_domain: Url) -> Result<bool, Error> { let url = match conductor_domain.join("/v1/producer/check") { Ok(u) => u, Err(err) => return Err(Error::InvalidConductorDomain(format!("The conductor domain was invalid. {}", err))) }; let params = [("uuid", uuid)]; let client = reqwest::Client::new(); match client.get(url).query(&params).send().await { Ok(response) => { Ok(response.status().is_success()) } Err(err) => Err(Error::NetworkError(err)) } } } /// /// Provides functions to add Conductor interactions to a type. Turns the implementing type into /// a Conductor Producer. This version of the trait provides a blocking version of the functions. /// Refer to `conductor::producer::AsyncProducer` for the Asynchronous version. /// /// This should not be implemented directly in most cases. /// Instead use the `#[derive(conductor::Producer)]` macro to generate everything for you. /// pub trait Producer: Base { /// Send a new data packet to the conductor server. /// Messagepack is used as the format over the wire. /// This function blocks. /// /// # Arguments /// /// * `uuid`: The unique id of this producer which was registered with conductor. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `MsgPackSerialisationFailure`: Produced when the emit payload cannot be serialised to the message pack format. This is most likely /// due to a difficulty serialising Self using serde. /// * `NetworkError`: Produced when the http post fails for any reason. Holds the Reqwest Error Struct. /// * `MsgPackDeserializationFailure`: Produced when the emit response couldn't be deserialized from message pack. Holds the /// rmp_serde Error struct. /// * `ConductorError`: Produced when there was an error on the server. /// fn emit(&self, uuid: &str, conductor_domain: Url) -> Result<(), Error> { let (payload, url) = self.generate_emit_data(uuid, conductor_domain)?; //start blocking specific let client = reqwest::blocking::Client::new(); let request_resp = client.post(url) .body(payload) .header(reqwest::header::CONTENT_TYPE, reqwest::header::HeaderValue::from_static("application/msgpack")) .send(); let response = match request_resp { Ok(r) => r, Err(err) => return Err(Error::NetworkError(err)) }; let result: EmitResult = match rmp_serde::from_read_ref(response.bytes().unwrap().as_ref()) { Ok(r) => r, Err(err) => return Err(Error::MsgPackDeserializationFailure(err)) }; //end blocking specific code match &result.error { error::ConductorError::NoError => Ok(()), _ => Err(Error::ConductorError(result.error)) } } /// Generates the schema for this struct and register it with conductor. /// This function blocks. /// /// # Arguments /// /// * `name`: A human friendly name for this producer. This isn't important to conductor and doesn't have to be unique. /// It's stored in the DB and can be useful to identify the producer. And empty string is valid but not recommended. /// * `uuid`: An optional unique ID which will be used to identify this producer. If this is set to None one is generated automatically by /// Conductor. It's recommended to leave this as null and let the server generate the ID. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `MsgPackSerialisationFailure`: Produced when the emit payload cannot be serialised to the message pack format. This is most likely /// due to a difficulty serialising Self using serde. /// * `NetworkError`: Produced when the http post fails for any reason. Holds the Reqwest Error Struct. /// * `MsgPackDeserializationFailure`: Produced when the emit response couldn't be deserialized from message pack. Holds the /// rmp_serde Error struct. /// * `ConductorError`: Produced when there was an error on the server. /// fn register(name: &str, uuid: Option<String>, conductor_domain: Url) -> Result<String, Error> { //TODO handle errors correctly let (payload, url) = Self::prepare_registration_data(name, uuid, conductor_domain)?; let client = reqwest::blocking::Client::new(); let request = client.post(url) .body(payload) .header(reqwest::header::CONTENT_TYPE, reqwest::header::HeaderValue::from_static("application/msgpack")) .send(); let response = match request { Ok(r) => r, Err(err) => return Err(Error::NetworkError(err)) }; let result: RegistrationResult = match rmp_serde::from_read_ref(response.bytes().unwrap().as_ref()) { Ok(r) => r, Err(err) => return Err(Error::MsgPackDeserializationFailure(err)) }; if result.error != error::ConductorError::NoError { return Err(Error::ConductorError(result.error)); } Ok(result.uuid.unwrap()) } /// /// Checks to see if the UUID has been registered with Conductor. /// This does not verify that the schema registered with the server is correct. /// This function blocks /// /// # Arguments /// /// * `uuid`: The unique id of this producer which was registered with conductor. /// * `conductor_domain`: The url of the conductor instance. /// /// # Errors /// * `InvalidConductorDomain`: Produced when the conductor domain is an invalid url. /// * `NetworkError`: Produced when the http get fails for any reason. Holds the Reqwest Error Struct. /// fn is_registered(uuid: &str, conductor_domain: Url) -> Result<bool, Error> { let url = match conductor_domain.join("/v1/producer/check") { Ok(u) => u, Err(err) => return Err(Error::InvalidConductorDomain(format!("The conductor domain was invalid. {}", err))) }; let params = [("uuid", uuid)]; let client = reqwest::blocking::Client::new(); match client.get(url).query(&params).send() { Ok(response) => { Ok(response.status().is_success()) } Err(err) => Err(Error::NetworkError(err)) } } }
40.750462
142
0.630318
331f8e3c3bce090e0df6694401445ed8ad83c458
11,222
use proc_macro2::TokenStream; use quote::quote; use crate::lifetimes::anon_lifetime; use crate::module_trait::passed_by_reference; use crate::names::Names; pub fn define_func(names: &Names, func: &witx::InterfaceFunc) -> TokenStream { let funcname = func.name.as_str(); let ident = names.func(&func.name); let ctx_type = names.ctx_type(); let coretype = func.core_type(); let params = coretype.args.iter().map(|arg| { let name = names.func_core_arg(arg); let atom = names.atom_type(arg.repr()); quote!(#name : #atom) }); let abi_args = quote!( ctx: &#ctx_type, memory: &dyn wiggle::GuestMemory, #(#params),* ); let abi_ret = if let Some(ret) = &coretype.ret { match ret.signifies { witx::CoreParamSignifies::Value(atom) => names.atom_type(atom), _ => unreachable!("ret should always be passed by value"), } } else if func.noreturn { // Ideally we would return `quote!(!)` here, but, we'd have to change // the error handling logic in all the marshalling code to never return, // and instead provide some other way to bail to the context... // noreturn func unimplemented!("noreturn funcs not supported yet!") } else { quote!(()) }; let err_type = coretype.ret.map(|ret| ret.param.tref); let ret_err = err_type .clone() .map(|_res| { quote! { #[cfg(feature = "trace_log")] { log::trace!(" | errno={}", e); } return #abi_ret::from(e); } }) .unwrap_or_else(|| quote!(())); let error_handling = |location: &str| -> TokenStream { if let Some(tref) = &err_type { let abi_ret = match tref.type_().passed_by() { witx::TypePassedBy::Value(atom) => names.atom_type(atom), _ => unreachable!("err should always be passed by value"), }; let err_typename = names.type_ref(&tref, anon_lifetime()); quote! { let e = wiggle::GuestError::InFunc { funcname: #funcname, location: #location, err: Box::new(e.into()) }; let err: #err_typename = wiggle::GuestErrorType::from_error(e, ctx); return #abi_ret::from(err); } } else { quote! { panic!("error: {:?}", e) } } }; let marshal_args = func .params .iter() .map(|p| marshal_arg(names, p, error_handling(p.name.as_str()))); let trait_args = func.params.iter().map(|param| { let name = names.func_param(&param.name); if passed_by_reference(&*param.tref.type_()) { quote!(&#name) } else { quote!(#name) } }); let (trait_rets, trait_bindings) = if func.results.len() < 2 { (quote!({}), quote!(_)) } else { let trait_rets: Vec<_> = func .results .iter() .skip(1) .map(|result| names.func_param(&result.name)) .collect(); let bindings = quote!((#(#trait_rets),*)); let names: Vec<_> = func .results .iter() .skip(1) .map(|result| { let name = names.func_param(&result.name); let fmt = match &*result.tref.type_() { witx::Type::Builtin(_) | witx::Type::Enum(_) | witx::Type::Flags(_) | witx::Type::Handle(_) | witx::Type::Int(_) => "{}", _ => "{:?}", }; format!("{}={}", name.to_string(), fmt) }) .collect(); let trace_fmt = format!(" | result=({})", names.join(",")); let rets = quote! { #[cfg(feature = "trace_log")] { log::trace!(#trace_fmt, #(#trait_rets),*); } (#(#trait_rets),*) }; (rets, bindings) }; // Return value pointers need to be validated before the api call, then // assigned to afterwards. marshal_result returns these two statements as a pair. let marshal_rets = func .results .iter() .skip(1) .map(|result| marshal_result(names, result, &error_handling)); let marshal_rets_pre = marshal_rets.clone().map(|(pre, _post)| pre); let marshal_rets_post = marshal_rets.map(|(_pre, post)| post); let success = if let Some(ref err_type) = err_type { let err_typename = names.type_ref(&err_type, anon_lifetime()); quote! { let success:#err_typename = wiggle::GuestErrorType::success(); #[cfg(feature = "trace_log")] { log::trace!(" | errno={}", success); } #abi_ret::from(success) } } else { quote!() }; let (placeholders, args): (Vec<_>, Vec<_>) = func .params .iter() .map(|param| { let name = names.func_param(&param.name); let fmt = if passed_by_reference(&*param.tref.type_()) { "{:?}" } else { "{}" }; (format!("{}={}", name.to_string(), fmt), quote!(#name)) }) .unzip(); let trace_fmt = format!("{}({})", ident.to_string(), placeholders.join(",")); quote!(pub fn #ident(#abi_args) -> #abi_ret { #(#marshal_args)* #(#marshal_rets_pre)* #[cfg(feature = "trace_log")] { log::trace!(#trace_fmt, #(#args),*); } let #trait_bindings = match ctx.#ident(#(#trait_args),*) { Ok(#trait_bindings) => { #trait_rets }, Err(e) => { #ret_err }, }; #(#marshal_rets_post)* #success }) } fn marshal_arg( names: &Names, param: &witx::InterfaceFuncParam, error_handling: TokenStream, ) -> TokenStream { let tref = &param.tref; let interface_typename = names.type_ref(&tref, anon_lifetime()); let try_into_conversion = { let name = names.func_param(&param.name); quote! { let #name: #interface_typename = { use ::std::convert::TryInto; match #name.try_into() { Ok(a) => a, Err(e) => { #error_handling } } }; } }; let read_conversion = { let pointee_type = names.type_ref(tref, anon_lifetime()); let arg_name = names.func_ptr_binding(&param.name); let name = names.func_param(&param.name); quote! { let #name = match wiggle::GuestPtr::<#pointee_type>::new(memory, #arg_name as u32).read() { Ok(r) => r, Err(e) => { #error_handling } }; } }; match &*tref.type_() { witx::Type::Enum(_e) => try_into_conversion, witx::Type::Flags(_f) => try_into_conversion, witx::Type::Int(_i) => try_into_conversion, witx::Type::Builtin(b) => match b { witx::BuiltinType::U8 | witx::BuiltinType::U16 | witx::BuiltinType::Char8 => { try_into_conversion } witx::BuiltinType::S8 | witx::BuiltinType::S16 => { let name = names.func_param(&param.name); quote! { let #name: #interface_typename = match (#name as i32).try_into() { Ok(a) => a, Err(e) => { #error_handling } } } } witx::BuiltinType::U32 | witx::BuiltinType::S32 | witx::BuiltinType::U64 | witx::BuiltinType::S64 | witx::BuiltinType::USize | witx::BuiltinType::F32 | witx::BuiltinType::F64 => { let name = names.func_param(&param.name); quote! { let #name = #name as #interface_typename; } } witx::BuiltinType::String => { let lifetime = anon_lifetime(); let ptr_name = names.func_ptr_binding(&param.name); let len_name = names.func_len_binding(&param.name); let name = names.func_param(&param.name); quote! { let #name = wiggle::GuestPtr::<#lifetime, str>::new(memory, (#ptr_name as u32, #len_name as u32)); } } }, witx::Type::Pointer(pointee) | witx::Type::ConstPointer(pointee) => { let pointee_type = names.type_ref(pointee, anon_lifetime()); let name = names.func_param(&param.name); quote! { let #name = wiggle::GuestPtr::<#pointee_type>::new(memory, #name as u32); } } witx::Type::Struct(_) => read_conversion, witx::Type::Array(arr) => { let pointee_type = names.type_ref(arr, anon_lifetime()); let ptr_name = names.func_ptr_binding(&param.name); let len_name = names.func_len_binding(&param.name); let name = names.func_param(&param.name); quote! { let #name = wiggle::GuestPtr::<[#pointee_type]>::new(memory, (#ptr_name as u32, #len_name as u32)); } } witx::Type::Union(_u) => read_conversion, witx::Type::Handle(_h) => { let name = names.func_param(&param.name); let handle_type = names.type_ref(tref, anon_lifetime()); quote!( let #name = #handle_type::from(#name); ) } } } fn marshal_result<F>( names: &Names, result: &witx::InterfaceFuncParam, error_handling: F, ) -> (TokenStream, TokenStream) where F: Fn(&str) -> TokenStream, { let tref = &result.tref; let write_val_to_ptr = { let pointee_type = names.type_ref(tref, anon_lifetime()); // core type is given func_ptr_binding name. let ptr_name = names.func_ptr_binding(&result.name); let ptr_err_handling = error_handling(&format!("{}:result_ptr_mut", result.name.as_str())); let pre = quote! { let #ptr_name = wiggle::GuestPtr::<#pointee_type>::new(memory, #ptr_name as u32); }; // trait binding returns func_param name. let val_name = names.func_param(&result.name); let post = quote! { if let Err(e) = #ptr_name.write(#val_name) { #ptr_err_handling } }; (pre, post) }; match &*tref.type_() { witx::Type::Builtin(b) => match b { witx::BuiltinType::String => unimplemented!("string result types"), _ => write_val_to_ptr, }, witx::Type::Pointer { .. } | witx::Type::ConstPointer { .. } | witx::Type::Array { .. } => { unimplemented!("pointer/array result types") } _ => write_val_to_ptr, } }
34.959502
121
0.498663
d6185e04e4d6105d3c512ee480c1f19cde3b5bce
5,358
// Generated from definition io.k8s.api.policy.v1beta1.PodDisruptionBudgetSpec /// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. #[derive(Clone, Debug, Default, PartialEq)] pub struct PodDisruptionBudgetSpec { /// An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable". pub max_unavailable: Option<crate::v1_11::apimachinery::pkg::util::intstr::IntOrString>, /// An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%". pub min_available: Option<crate::v1_11::apimachinery::pkg::util::intstr::IntOrString>, /// Label query over pods whose evictions are managed by the disruption budget. pub selector: Option<crate::v1_11::apimachinery::pkg::apis::meta::v1::LabelSelector>, } impl<'de> serde::Deserialize<'de> for PodDisruptionBudgetSpec { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_max_unavailable, Key_min_available, Key_selector, Other, } impl<'de> serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> { struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(match v { "maxUnavailable" => Field::Key_max_unavailable, "minAvailable" => Field::Key_min_available, "selector" => Field::Key_selector, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> serde::de::Visitor<'de> for Visitor { type Value = PodDisruptionBudgetSpec; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "struct PodDisruptionBudgetSpec") } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> { let mut value_max_unavailable: Option<crate::v1_11::apimachinery::pkg::util::intstr::IntOrString> = None; let mut value_min_available: Option<crate::v1_11::apimachinery::pkg::util::intstr::IntOrString> = None; let mut value_selector: Option<crate::v1_11::apimachinery::pkg::apis::meta::v1::LabelSelector> = None; while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_max_unavailable => value_max_unavailable = serde::de::MapAccess::next_value(&mut map)?, Field::Key_min_available => value_min_available = serde::de::MapAccess::next_value(&mut map)?, Field::Key_selector => value_selector = serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(PodDisruptionBudgetSpec { max_unavailable: value_max_unavailable, min_available: value_min_available, selector: value_selector, }) } } deserializer.deserialize_struct( "PodDisruptionBudgetSpec", &[ "maxUnavailable", "minAvailable", "selector", ], Visitor, ) } } impl serde::Serialize for PodDisruptionBudgetSpec { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer { let mut state = serializer.serialize_struct( "PodDisruptionBudgetSpec", self.max_unavailable.as_ref().map_or(0, |_| 1) + self.min_available.as_ref().map_or(0, |_| 1) + self.selector.as_ref().map_or(0, |_| 1), )?; if let Some(value) = &self.max_unavailable { serde::ser::SerializeStruct::serialize_field(&mut state, "maxUnavailable", value)?; } if let Some(value) = &self.min_available { serde::ser::SerializeStruct::serialize_field(&mut state, "minAvailable", value)?; } if let Some(value) = &self.selector { serde::ser::SerializeStruct::serialize_field(&mut state, "selector", value)?; } serde::ser::SerializeStruct::end(state) } }
47
291
0.578201
917c0b7cc26c7bd15c3c31cd986e6ba5b4b4108e
3,780
pub fn get_help_docs() -> Vec<Vec<&'static str>> { vec![ vec!["Scroll down to next result page", "<Ctrl+d>", "Pagination"], vec![ "Scroll up to previous result page", "<Ctrl+u>", "Pagination", ], vec!["Jump to start of playlist", "<Ctrl+a>", "Pagination"], vec!["Jump to end of playlist", "<Ctrl+e>", "Pagination"], vec!["Jump to currently playing album", "a", "General"], vec![ "Jump to currently playing artist's album list", "A", "General", ], vec!["Jump to current play context", "o", "General"], vec!["Increase volume by 10%", "+", "General"], vec!["Decrease volume by 10%", "-", "General"], vec!["Skip to next track", "n", "General"], vec!["Skip to previous track", "p", "General"], vec!["Seek backwards 5 seconds", "<", "General"], vec!["Seek forwards 5 seconds", ">", "General"], vec!["Toggle shuffle", "<Ctrl+s>", "General"], vec!["Copy url to currently playing song/episode", "c", "General"], vec!["Copy url to currently playing album/show", "C", "General"], vec!["Cycle repeat mode", "<Ctrl+r>", "General"], vec![ "Move selection left", "h | <Left Arrow Key> | <Ctrl+b>", "General", ], vec![ "Move selection down", "j | <Down Arrow Key> | <Ctrl+n>", "General", ], vec![ "Move selection up", "k | <Up Arrow Key> | <Ctrl+p>", "General", ], vec![ "Move selection right", "l | <Right Arrow Key> | <Ctrl+f>", "General", ], vec!["Move selection to top of list", "H", "General"], vec!["Move selection to middle of list", "M", "General"], vec!["Move selection to bottom of list", "L", "General"], vec!["Enter input for search", "/", "General"], vec!["Pause/Resume playback", "<Space>", "General"], vec!["Enter active mode", "<Enter>", "General"], vec!["Go to audio analysis screen", "v", "General"], vec!["Go to playbar only screen (basic view)", "B", "General"], vec![ "Go back or exit when nowhere left to back to", "q", "General", ], vec!["Select device to play music on", "d", "General"], vec!["Enter hover mode", "<Esc>", "Selected block"], vec!["Save track in list or table", "s", "Selected block"], vec![ "Start playback or enter album/artist/playlist", "<Enter>", "Selected block", ], vec![ "Play recommendations for song/artist", "r", "Selected block", ], vec!["Play all tracks for artist", "e", "Library -> Artists"], vec!["Search with input text", "<Enter>", "Search input"], vec![ "Move cursor one space left", "<Left Arrow Key>", "Search input", ], vec![ "Move cursor one space right", "<Right Arrow Key>", "Search input", ], vec!["Delete entire input", "<Ctrl+l>", "Search input"], vec![ "Delete text from cursor to start of input", "<Ctrl+u>", "Search input", ], vec![ "Delete text from cursor to end of input", "<Ctrl+k>", "Search input", ], vec!["Delete previous word", "<Ctrl+w>", "Search input"], vec!["Jump to start of input", "<Ctrl+a>", "Search input"], vec!["Jump to end of input", "<Ctrl+e>", "Search input"], vec![ "Escape from the input back to hovered block", "<Esc>", "Search input", ], vec!["Delete saved album", "D", "Library -> Albums"], vec!["Delete saved playlist", "D", "Playlist"], vec!["Follow an artist/playlist", "w", "Search result"], vec!["Save (like) album to library", "w", "Search result"], vec!["Play random song in playlist", "S", "Selected Playlist"], vec!["Add track to queue", "z", "Hovered over track"], ] }
33.451327
71
0.546032
e624b1f1948b527afedb86062cf5f9f05f3e591a
42,759
use crate::support::registry::Package; use crate::support::{basic_bin_manifest, basic_lib_manifest, main_file, project}; #[test] fn cargo_metadata_simple() { let p = project() .file("src/foo.rs", "") .file("Cargo.toml", &basic_bin_manifest("foo")) .build(); p.cargo("metadata") .with_json( r#" { "packages": [ { "authors": [ "[email protected]" ], "categories": [], "name": "foo", "version": "0.5.0", "id": "foo[..]", "keywords": [], "source": null, "dependencies": [], "edition": "2015", "license": null, "license_file": null, "description": null, "readme": null, "repository": null, "targets": [ { "kind": [ "bin" ], "crate_types": [ "bin" ], "edition": "2015", "name": "foo", "src_path": "[..]/foo/src/foo.rs" } ], "features": {}, "manifest_path": "[..]Cargo.toml", "metadata": null } ], "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "foo 0.5.0 (path+file:[..]foo)" } ], "root": "foo 0.5.0 (path+file:[..]foo)" }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn cargo_metadata_warns_on_implicit_version() { let p = project() .file("src/foo.rs", "") .file("Cargo.toml", &basic_bin_manifest("foo")) .build(); p.cargo("metadata").with_stderr("[WARNING] please specify `--format-version` flag explicitly to avoid compatibility problems").run(); p.cargo("metadata --format-version 1").with_stderr("").run(); } #[test] fn library_with_several_crate_types() { let p = project() .file("src/lib.rs", "") .file( "Cargo.toml", r#" [package] name = "foo" version = "0.5.0" [lib] crate-type = ["lib", "staticlib"] "#, ).build(); p.cargo("metadata") .with_json( r#" { "packages": [ { "authors": [], "categories": [], "name": "foo", "readme": null, "repository": null, "version": "0.5.0", "id": "foo[..]", "keywords": [], "source": null, "dependencies": [], "edition": "2015", "license": null, "license_file": null, "description": null, "targets": [ { "kind": [ "lib", "staticlib" ], "crate_types": [ "lib", "staticlib" ], "edition": "2015", "name": "foo", "src_path": "[..]/foo/src/lib.rs" } ], "features": {}, "manifest_path": "[..]Cargo.toml", "metadata": null } ], "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "foo 0.5.0 (path+file:[..]foo)" } ], "root": "foo 0.5.0 (path+file:[..]foo)" }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn library_with_features() { let p = project() .file("src/lib.rs", "") .file( "Cargo.toml", r#" [package] name = "foo" version = "0.5.0" [features] default = ["default_feat"] default_feat = [] optional_feat = [] "#, ).build(); p.cargo("metadata") .with_json( r#" { "packages": [ { "authors": [], "categories": [], "name": "foo", "readme": null, "repository": null, "version": "0.5.0", "id": "foo[..]", "keywords": [], "source": null, "dependencies": [], "edition": "2015", "license": null, "license_file": null, "description": null, "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "foo", "src_path": "[..]/foo/src/lib.rs" } ], "features": { "default": [ "default_feat" ], "default_feat": [], "optional_feat": [] }, "manifest_path": "[..]Cargo.toml", "metadata": null } ], "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [ "default", "default_feat" ], "id": "foo 0.5.0 (path+file:[..]foo)" } ], "root": "foo 0.5.0 (path+file:[..]foo)" }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn cargo_metadata_with_deps_and_version() { let p = project() .file("src/foo.rs", "") .file( "Cargo.toml", r#" [project] name = "foo" version = "0.5.0" authors = [] license = "MIT" description = "foo" [[bin]] name = "foo" [dependencies] bar = "*" "#, ).build(); Package::new("baz", "0.0.1").publish(); Package::new("bar", "0.0.1").dep("baz", "0.0.1").publish(); p.cargo("metadata -q --format-version 1") .with_json( r#" { "packages": [ { "authors": [], "categories": [], "dependencies": [], "description": null, "features": {}, "id": "baz 0.0.1 (registry+[..])", "keywords": [], "manifest_path": "[..]Cargo.toml", "name": "baz", "readme": null, "repository": null, "source": "registry+[..]", "license": null, "license_file": null, "description": null, "edition": "2015", "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "baz", "src_path": "[..]lib.rs" } ], "version": "0.0.1", "metadata": null }, { "authors": [], "categories": [], "dependencies": [ { "features": [], "kind": null, "name": "baz", "optional": false, "req": "^0.0.1", "source": "registry+[..]", "target": null, "uses_default_features": true, "rename": null } ], "features": {}, "id": "bar 0.0.1 (registry+[..])", "keywords": [], "manifest_path": "[..]Cargo.toml", "name": "bar", "readme": null, "repository": null, "source": "registry+[..]", "license": null, "license_file": null, "description": null, "edition": "2015", "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "bar", "src_path": "[..]lib.rs" } ], "version": "0.0.1", "metadata": null }, { "authors": [], "categories": [], "dependencies": [ { "features": [], "kind": null, "name": "bar", "optional": false, "req": "*", "source": "registry+[..]", "target": null, "uses_default_features": true, "rename": null } ], "features": {}, "id": "foo 0.5.0 (path+file:[..]foo)", "keywords": [], "manifest_path": "[..]Cargo.toml", "name": "foo", "readme": null, "repository": null, "source": null, "license": "MIT", "license_file": null, "description": "foo", "edition": "2015", "targets": [ { "kind": [ "bin" ], "crate_types": [ "bin" ], "edition": "2015", "name": "foo", "src_path": "[..]foo.rs" } ], "version": "0.5.0", "metadata": null } ], "workspace_members": ["foo 0.5.0 (path+file:[..]foo)"], "resolve": { "nodes": [ { "dependencies": [ "bar 0.0.1 (registry+[..])" ], "deps": [ { "name": "bar", "pkg": "bar 0.0.1 (registry+[..])" } ], "features": [], "id": "foo 0.5.0 (path+file:[..]foo)" }, { "dependencies": [ "baz 0.0.1 (registry+[..])" ], "deps": [ { "name": "baz", "pkg": "baz 0.0.1 (registry+[..])" } ], "features": [], "id": "bar 0.0.1 (registry+[..])" }, { "dependencies": [], "deps": [], "features": [], "id": "baz 0.0.1 (registry+[..])" } ], "root": "foo 0.5.0 (path+file:[..]foo)" }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn example() { let p = project() .file("src/lib.rs", "") .file("examples/ex.rs", "") .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [[example]] name = "ex" "#, ).build(); p.cargo("metadata") .with_json( r#" { "packages": [ { "authors": [], "categories": [], "name": "foo", "readme": null, "repository": null, "version": "0.1.0", "id": "foo[..]", "keywords": [], "license": null, "license_file": null, "description": null, "edition": "2015", "source": null, "dependencies": [], "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "foo", "src_path": "[..]/foo/src/lib.rs" }, { "kind": [ "example" ], "crate_types": [ "bin" ], "edition": "2015", "name": "ex", "src_path": "[..]/foo/examples/ex.rs" } ], "features": {}, "manifest_path": "[..]Cargo.toml", "metadata": null } ], "workspace_members": [ "foo 0.1.0 (path+file:[..]foo)" ], "resolve": { "root": "foo 0.1.0 (path+file://[..]foo)", "nodes": [ { "id": "foo 0.1.0 (path+file:[..]foo)", "features": [], "dependencies": [], "deps": [] } ] }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn example_lib() { let p = project() .file("src/lib.rs", "") .file("examples/ex.rs", "") .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" [[example]] name = "ex" crate-type = ["rlib", "dylib"] "#, ).build(); p.cargo("metadata") .with_json( r#" { "packages": [ { "authors": [], "categories": [], "name": "foo", "readme": null, "repository": null, "version": "0.1.0", "id": "foo[..]", "keywords": [], "license": null, "license_file": null, "description": null, "edition": "2015", "source": null, "dependencies": [], "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "foo", "src_path": "[..]/foo/src/lib.rs" }, { "kind": [ "example" ], "crate_types": [ "rlib", "dylib" ], "edition": "2015", "name": "ex", "src_path": "[..]/foo/examples/ex.rs" } ], "features": {}, "manifest_path": "[..]Cargo.toml", "metadata": null } ], "workspace_members": [ "foo 0.1.0 (path+file:[..]foo)" ], "resolve": { "root": "foo 0.1.0 (path+file://[..]foo)", "nodes": [ { "id": "foo 0.1.0 (path+file:[..]foo)", "features": [], "dependencies": [], "deps": [] } ] }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn workspace_metadata() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ).file("bar/Cargo.toml", &basic_lib_manifest("bar")) .file("bar/src/lib.rs", "") .file("baz/Cargo.toml", &basic_lib_manifest("baz")) .file("baz/src/lib.rs", "") .build(); p.cargo("metadata") .with_json( r#" { "packages": [ { "authors": [ "[email protected]" ], "categories": [], "name": "bar", "version": "0.5.0", "id": "bar[..]", "readme": null, "repository": null, "keywords": [], "source": null, "dependencies": [], "license": null, "license_file": null, "description": null, "edition": "2015", "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "bar", "src_path": "[..]bar/src/lib.rs" } ], "features": {}, "manifest_path": "[..]bar/Cargo.toml", "metadata": null }, { "authors": [ "[email protected]" ], "categories": [], "name": "baz", "readme": null, "repository": null, "version": "0.5.0", "id": "baz[..]", "keywords": [], "source": null, "dependencies": [], "license": null, "license_file": null, "description": null, "edition": "2015", "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "baz", "src_path": "[..]baz/src/lib.rs" } ], "features": {}, "manifest_path": "[..]baz/Cargo.toml", "metadata": null } ], "workspace_members": ["baz 0.5.0 (path+file:[..]baz)", "bar 0.5.0 (path+file:[..]bar)"], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "baz 0.5.0 (path+file:[..]baz)" }, { "dependencies": [], "deps": [], "features": [], "id": "bar 0.5.0 (path+file:[..]bar)" } ], "root": null }, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn workspace_metadata_no_deps() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar", "baz"] "#, ).file("bar/Cargo.toml", &basic_lib_manifest("bar")) .file("bar/src/lib.rs", "") .file("baz/Cargo.toml", &basic_lib_manifest("baz")) .file("baz/src/lib.rs", "") .build(); p.cargo("metadata --no-deps") .with_json( r#" { "packages": [ { "authors": [ "[email protected]" ], "categories": [], "name": "bar", "readme": null, "repository": null, "version": "0.5.0", "id": "bar[..]", "keywords": [], "source": null, "dependencies": [], "license": null, "license_file": null, "description": null, "edition": "2015", "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "bar", "src_path": "[..]bar/src/lib.rs" } ], "features": {}, "manifest_path": "[..]bar/Cargo.toml", "metadata": null }, { "authors": [ "[email protected]" ], "categories": [], "name": "baz", "readme": null, "repository": null, "version": "0.5.0", "id": "baz[..]", "keywords": [], "source": null, "dependencies": [], "license": null, "license_file": null, "description": null, "edition": "2015", "targets": [ { "kind": [ "lib" ], "crate_types": ["lib"], "edition": "2015", "name": "baz", "src_path": "[..]baz/src/lib.rs" } ], "features": {}, "manifest_path": "[..]baz/Cargo.toml", "metadata": null } ], "workspace_members": ["baz 0.5.0 (path+file:[..]baz)", "bar 0.5.0 (path+file:[..]bar)"], "resolve": null, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn cargo_metadata_with_invalid_manifest() { let p = project().file("Cargo.toml", "").build(); p.cargo("metadata --format-version 1") .with_status(101) .with_stderr( "\ [ERROR] failed to parse manifest at `[..]` Caused by: virtual manifests must be configured with [workspace]", ).run(); } const MANIFEST_OUTPUT: &str = r#" { "packages": [{ "authors": [ "[email protected]" ], "categories": [], "name":"foo", "version":"0.5.0", "id":"foo[..]0.5.0[..](path+file://[..]/foo)", "source":null, "dependencies":[], "keywords": [], "license": null, "license_file": null, "description": null, "edition": "2015", "targets":[{ "kind":["bin"], "crate_types":["bin"], "edition": "2015", "name":"foo", "src_path":"[..]/foo/src/foo.rs" }], "features":{}, "manifest_path":"[..]Cargo.toml", "metadata": null, "readme": null, "repository": null }], "workspace_members": [ "foo 0.5.0 (path+file:[..]foo)" ], "resolve": null, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#; #[test] fn cargo_metadata_no_deps_path_to_cargo_toml_relative() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("metadata --no-deps --manifest-path foo/Cargo.toml") .cwd(p.root().parent().unwrap()) .with_json(MANIFEST_OUTPUT) .run(); } #[test] fn cargo_metadata_no_deps_path_to_cargo_toml_absolute() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("metadata --no-deps --manifest-path") .arg(p.root().join("Cargo.toml")) .cwd(p.root().parent().unwrap()) .with_json(MANIFEST_OUTPUT) .run(); } #[test] fn cargo_metadata_no_deps_path_to_cargo_toml_parent_relative() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("metadata --no-deps --manifest-path foo") .cwd(p.root().parent().unwrap()) .with_status(101) .with_stderr( "[ERROR] the manifest-path must be \ a path to a Cargo.toml file", ).run(); } #[test] fn cargo_metadata_no_deps_path_to_cargo_toml_parent_absolute() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("metadata --no-deps --manifest-path") .arg(p.root()) .cwd(p.root().parent().unwrap()) .with_status(101) .with_stderr( "[ERROR] the manifest-path must be \ a path to a Cargo.toml file", ).run(); } #[test] fn cargo_metadata_no_deps_cwd() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("metadata --no-deps") .with_json(MANIFEST_OUTPUT) .run(); } #[test] fn cargo_metadata_bad_version() { let p = project() .file("Cargo.toml", &basic_bin_manifest("foo")) .file("src/foo.rs", &main_file(r#""i am foo""#, &[])) .build(); p.cargo("metadata --no-deps --format-version 2") .with_status(1) .with_stderr_contains( "\ error: '2' isn't a valid value for '--format-version <VERSION>' <tab>[possible values: 1] ", ).run(); } #[test] fn multiple_features() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = [] [features] a = [] b = [] "#, ).file("src/lib.rs", "") .build(); p.cargo("metadata --features").arg("a b").run(); } #[test] fn package_metadata() { let p = project() .file( "Cargo.toml", r#" [package] name = "foo" version = "0.1.0" authors = ["[email protected]"] categories = ["database"] keywords = ["database"] readme = "README.md" repository = "https://github.com/rust-lang/cargo" [package.metadata.bar] baz = "quux" "#, ).file("src/lib.rs", "") .build(); p.cargo("metadata --no-deps") .with_json( r#" { "packages": [ { "authors": ["[email protected]"], "categories": ["database"], "name": "foo", "readme": "README.md", "repository": "https://github.com/rust-lang/cargo", "version": "0.1.0", "id": "foo[..]", "keywords": ["database"], "source": null, "dependencies": [], "edition": "2015", "license": null, "license_file": null, "description": null, "targets": [ { "kind": [ "lib" ], "crate_types": [ "lib" ], "edition": "2015", "name": "foo", "src_path": "[..]foo/src/lib.rs" } ], "features": {}, "manifest_path": "[..]foo/Cargo.toml", "metadata": { "bar": { "baz": "quux" } } } ], "workspace_members": ["foo[..]"], "resolve": null, "target_directory": "[..]foo/target", "version": 1, "workspace_root": "[..]/foo" }"#, ).run(); } #[test] fn cargo_metadata_path_to_cargo_toml_project() { let p = project() .file( "Cargo.toml", r#" [workspace] members = ["bar"] "#, ).file("bar/Cargo.toml", &basic_lib_manifest("bar")) .file("bar/src/lib.rs", "") .build(); p.cargo("package --manifest-path") .arg(p.root().join("bar/Cargo.toml")) .cwd(p.root().parent().unwrap()) .run(); p.cargo("metadata --manifest-path") .arg(p.root().join("target/package/bar-0.5.0/Cargo.toml")) .with_json( r#" { "packages": [ { "authors": [ "[email protected]" ], "categories": [], "dependencies": [], "description": null, "edition": "2015", "features": {}, "id": "bar 0.5.0 ([..])", "keywords": [], "license": null, "license_file": null, "manifest_path": "[..]Cargo.toml", "metadata": null, "name": "bar", "readme": null, "repository": null, "source": null, "targets": [ { "crate_types": [ "lib" ], "edition": "2015", "kind": [ "lib" ], "name": "bar", "src_path": "[..]src/lib.rs" } ], "version": "0.5.0" } ], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "bar 0.5.0 ([..])" } ], "root": "bar 0.5.0 (path+file:[..])" }, "target_directory": "[..]", "version": 1, "workspace_members": [ "bar 0.5.0 (path+file:[..])" ], "workspace_root": "[..]" } "#, ).run(); } #[test] fn package_edition_2018() { let p = project() .file("src/lib.rs", "") .file( "Cargo.toml", r#" cargo-features = ["edition"] [package] name = "foo" version = "0.1.0" authors = ["[email protected]"] edition = "2018" "#, ).build(); p.cargo("metadata") .masquerade_as_nightly_cargo() .with_json( r#" { "packages": [ { "authors": [ "[email protected]" ], "categories": [], "dependencies": [], "description": null, "edition": "2018", "features": {}, "id": "foo 0.1.0 (path+file:[..])", "keywords": [], "license": null, "license_file": null, "manifest_path": "[..]Cargo.toml", "metadata": null, "name": "foo", "readme": null, "repository": null, "source": null, "targets": [ { "crate_types": [ "lib" ], "edition": "2018", "kind": [ "lib" ], "name": "foo", "src_path": "[..]src/lib.rs" } ], "version": "0.1.0" } ], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "foo 0.1.0 (path+file:[..])" } ], "root": "foo 0.1.0 (path+file:[..])" }, "target_directory": "[..]", "version": 1, "workspace_members": [ "foo 0.1.0 (path+file:[..])" ], "workspace_root": "[..]" } "#, ).run(); } #[test] fn target_edition_2018() { let p = project() .file("src/lib.rs", "") .file("src/main.rs", "") .file( "Cargo.toml", r#" cargo-features = ["edition"] [package] name = "foo" version = "0.1.0" authors = ["[email protected]"] edition = "2015" [lib] edition = "2018" "#, ).build(); p.cargo("metadata") .masquerade_as_nightly_cargo() .with_json( r#" { "packages": [ { "authors": [ "[email protected]" ], "categories": [], "dependencies": [], "description": null, "edition": "2015", "features": {}, "id": "foo 0.1.0 (path+file:[..])", "keywords": [], "license": null, "license_file": null, "manifest_path": "[..]Cargo.toml", "metadata": null, "name": "foo", "readme": null, "repository": null, "source": null, "targets": [ { "crate_types": [ "lib" ], "edition": "2018", "kind": [ "lib" ], "name": "foo", "src_path": "[..]src/lib.rs" }, { "crate_types": [ "bin" ], "edition": "2015", "kind": [ "bin" ], "name": "foo", "src_path": "[..]src/main.rs" } ], "version": "0.1.0" } ], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "foo 0.1.0 (path+file:[..])" } ], "root": "foo 0.1.0 (path+file:[..])" }, "target_directory": "[..]", "version": 1, "workspace_members": [ "foo 0.1.0 (path+file:[..])" ], "workspace_root": "[..]" } "#, ).run(); } #[test] fn rename_dependency() { Package::new("bar", "0.1.0").publish(); Package::new("bar", "0.2.0").publish(); let p = project() .file( "Cargo.toml", r#" cargo-features = ["rename-dependency"] [project] name = "foo" version = "0.0.1" authors = [] [dependencies] bar = { version = "0.1.0" } baz = { version = "0.2.0", package = "bar" } "#, ).file("src/lib.rs", "extern crate bar; extern crate baz;") .build(); p.cargo("metadata") .masquerade_as_nightly_cargo() .with_json( r#" { "packages": [ { "authors": [], "categories": [], "dependencies": [ { "features": [], "kind": null, "name": "bar", "optional": false, "rename": null, "req": "^0.1.0", "source": "registry+https://github.com/rust-lang/crates.io-index", "target": null, "uses_default_features": true }, { "features": [], "kind": null, "name": "bar", "optional": false, "rename": "baz", "req": "^0.2.0", "source": "registry+https://github.com/rust-lang/crates.io-index", "target": null, "uses_default_features": true } ], "description": null, "edition": "2015", "features": {}, "id": "foo 0.0.1[..]", "keywords": [], "license": null, "license_file": null, "manifest_path": "[..]", "metadata": null, "name": "foo", "readme": null, "repository": null, "source": null, "targets": [ { "crate_types": [ "lib" ], "edition": "2015", "kind": [ "lib" ], "name": "foo", "src_path": "[..]" } ], "version": "0.0.1" }, { "authors": [], "categories": [], "dependencies": [], "description": null, "edition": "2015", "features": {}, "id": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "keywords": [], "license": null, "license_file": null, "manifest_path": "[..]", "metadata": null, "name": "bar", "readme": null, "repository": null, "source": "registry+https://github.com/rust-lang/crates.io-index", "targets": [ { "crate_types": [ "lib" ], "edition": "2015", "kind": [ "lib" ], "name": "bar", "src_path": "[..]" } ], "version": "0.1.0" }, { "authors": [], "categories": [], "dependencies": [], "description": null, "edition": "2015", "features": {}, "id": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "keywords": [], "license": null, "license_file": null, "manifest_path": "[..]", "metadata": null, "name": "bar", "readme": null, "repository": null, "source": "registry+https://github.com/rust-lang/crates.io-index", "targets": [ { "crate_types": [ "lib" ], "edition": "2015", "kind": [ "lib" ], "name": "bar", "src_path": "[..]" } ], "version": "0.2.0" } ], "resolve": { "nodes": [ { "dependencies": [], "deps": [], "features": [], "id": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" }, { "dependencies": [], "deps": [], "features": [], "id": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" }, { "dependencies": [ "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" ], "deps": [ { "name": "bar", "pkg": "bar 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" }, { "name": "baz", "pkg": "bar 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" } ], "features": [], "id": "foo 0.0.1[..]" } ], "root": "foo 0.0.1[..]" }, "target_directory": "[..]", "version": 1, "workspace_members": [ "foo 0.0.1[..]" ], "workspace_root": "[..]" }"#, ).run(); }
29.226931
137
0.319184
91401c8e4fd2c0e3b2d12f5f905059548182a817
12,877
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A private parser implementation of IPv4, IPv6, and socket addresses. //! //! This module is "publicly exported" through the `FromStr` implementations //! below. use prelude::v1::*; use error::Error; use fmt; #[allow(deprecated)] use net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use str::FromStr; struct Parser<'a> { // parsing as ASCII, so can use byte array s: &'a [u8], pos: usize, } impl<'a> Parser<'a> { fn new(s: &'a str) -> Parser<'a> { Parser { s: s.as_bytes(), pos: 0, } } fn is_eof(&self) -> bool { self.pos == self.s.len() } // Commit only if parser returns Some fn read_atomically<T, F>(&mut self, cb: F) -> Option<T> where F: FnOnce(&mut Parser) -> Option<T>, { let pos = self.pos; let r = cb(self); if r.is_none() { self.pos = pos; } r } // Commit only if parser read till EOF fn read_till_eof<T, F>(&mut self, cb: F) -> Option<T> where F: FnOnce(&mut Parser) -> Option<T>, { self.read_atomically(move |p| { match cb(p) { Some(x) => if p.is_eof() {Some(x)} else {None}, None => None, } }) } // Return result of first successful parser fn read_or<T>(&mut self, parsers: &mut [Box<FnMut(&mut Parser) -> Option<T> + 'static>]) -> Option<T> { for pf in parsers { match self.read_atomically(|p: &mut Parser| pf(p)) { Some(r) => return Some(r), None => {} } } None } // Apply 3 parsers sequentially fn read_seq_3<A, B, C, PA, PB, PC>(&mut self, pa: PA, pb: PB, pc: PC) -> Option<(A, B, C)> where PA: FnOnce(&mut Parser) -> Option<A>, PB: FnOnce(&mut Parser) -> Option<B>, PC: FnOnce(&mut Parser) -> Option<C>, { self.read_atomically(move |p| { let a = pa(p); let b = if a.is_some() { pb(p) } else { None }; let c = if b.is_some() { pc(p) } else { None }; match (a, b, c) { (Some(a), Some(b), Some(c)) => Some((a, b, c)), _ => None } }) } // Read next char fn read_char(&mut self) -> Option<char> { if self.is_eof() { None } else { let r = self.s[self.pos] as char; self.pos += 1; Some(r) } } // Return char and advance iff next char is equal to requested fn read_given_char(&mut self, c: char) -> Option<char> { self.read_atomically(|p| { match p.read_char() { Some(next) if next == c => Some(next), _ => None, } }) } // Read digit fn read_digit(&mut self, radix: u8) -> Option<u8> { fn parse_digit(c: char, radix: u8) -> Option<u8> { let c = c as u8; // assuming radix is either 10 or 16 if c >= b'0' && c <= b'9' { Some(c - b'0') } else if radix > 10 && c >= b'a' && c < b'a' + (radix - 10) { Some(c - b'a' + 10) } else if radix > 10 && c >= b'A' && c < b'A' + (radix - 10) { Some(c - b'A' + 10) } else { None } } self.read_atomically(|p| { p.read_char().and_then(|c| parse_digit(c, radix)) }) } fn read_number_impl(&mut self, radix: u8, max_digits: u32, upto: u32) -> Option<u32> { let mut r = 0; let mut digit_count = 0; loop { match self.read_digit(radix) { Some(d) => { r = r * (radix as u32) + (d as u32); digit_count += 1; if digit_count > max_digits || r >= upto { return None } } None => { if digit_count == 0 { return None } else { return Some(r) } } }; } } // Read number, failing if max_digits of number value exceeded fn read_number(&mut self, radix: u8, max_digits: u32, upto: u32) -> Option<u32> { self.read_atomically(|p| p.read_number_impl(radix, max_digits, upto)) } fn read_ipv4_addr_impl(&mut self) -> Option<Ipv4Addr> { let mut bs = [0; 4]; let mut i = 0; while i < 4 { if i != 0 && self.read_given_char('.').is_none() { return None; } let octet = self.read_number(10, 3, 0x100).map(|n| n as u8); match octet { Some(d) => bs[i] = d, None => return None, }; i += 1; } Some(Ipv4Addr::new(bs[0], bs[1], bs[2], bs[3])) } // Read IPv4 address fn read_ipv4_addr(&mut self) -> Option<Ipv4Addr> { self.read_atomically(|p| p.read_ipv4_addr_impl()) } fn read_ipv6_addr_impl(&mut self) -> Option<Ipv6Addr> { fn ipv6_addr_from_head_tail(head: &[u16], tail: &[u16]) -> Ipv6Addr { assert!(head.len() + tail.len() <= 8); let mut gs = [0; 8]; gs[..head.len()].clone_from_slice(head); gs[(8 - tail.len()) .. 8].clone_from_slice(tail); Ipv6Addr::new(gs[0], gs[1], gs[2], gs[3], gs[4], gs[5], gs[6], gs[7]) } fn read_groups(p: &mut Parser, groups: &mut [u16; 8], limit: usize) -> (usize, bool) { let mut i = 0; while i < limit { if i < limit - 1 { let ipv4 = p.read_atomically(|p| { if i == 0 || p.read_given_char(':').is_some() { p.read_ipv4_addr() } else { None } }); if let Some(v4_addr) = ipv4 { let octets = v4_addr.octets(); groups[i + 0] = ((octets[0] as u16) << 8) | (octets[1] as u16); groups[i + 1] = ((octets[2] as u16) << 8) | (octets[3] as u16); return (i + 2, true); } } let group = p.read_atomically(|p| { if i == 0 || p.read_given_char(':').is_some() { p.read_number(16, 4, 0x10000).map(|n| n as u16) } else { None } }); match group { Some(g) => groups[i] = g, None => return (i, false) } i += 1; } (i, false) } let mut head = [0; 8]; let (head_size, head_ipv4) = read_groups(self, &mut head, 8); if head_size == 8 { return Some(Ipv6Addr::new( head[0], head[1], head[2], head[3], head[4], head[5], head[6], head[7])) } // IPv4 part is not allowed before `::` if head_ipv4 { return None } // read `::` if previous code parsed less than 8 groups if !self.read_given_char(':').is_some() || !self.read_given_char(':').is_some() { return None; } let mut tail = [0; 8]; let (tail_size, _) = read_groups(self, &mut tail, 8 - head_size); Some(ipv6_addr_from_head_tail(&head[..head_size], &tail[..tail_size])) } fn read_ipv6_addr(&mut self) -> Option<Ipv6Addr> { self.read_atomically(|p| p.read_ipv6_addr_impl()) } #[allow(deprecated)] fn read_ip_addr(&mut self) -> Option<IpAddr> { let ipv4_addr = |p: &mut Parser| p.read_ipv4_addr().map(IpAddr::V4); let ipv6_addr = |p: &mut Parser| p.read_ipv6_addr().map(IpAddr::V6); self.read_or(&mut [Box::new(ipv4_addr), Box::new(ipv6_addr)]) } fn read_socket_addr_v4(&mut self) -> Option<SocketAddrV4> { let ip_addr = |p: &mut Parser| p.read_ipv4_addr(); let colon = |p: &mut Parser| p.read_given_char(':'); let port = |p: &mut Parser| { p.read_number(10, 5, 0x10000).map(|n| n as u16) }; self.read_seq_3(ip_addr, colon, port).map(|t| { let (ip, _, port): (Ipv4Addr, char, u16) = t; SocketAddrV4::new(ip, port) }) } fn read_socket_addr_v6(&mut self) -> Option<SocketAddrV6> { let ip_addr = |p: &mut Parser| { let open_br = |p: &mut Parser| p.read_given_char('['); let ip_addr = |p: &mut Parser| p.read_ipv6_addr(); let clos_br = |p: &mut Parser| p.read_given_char(']'); p.read_seq_3(open_br, ip_addr, clos_br).map(|t| t.1) }; let colon = |p: &mut Parser| p.read_given_char(':'); let port = |p: &mut Parser| { p.read_number(10, 5, 0x10000).map(|n| n as u16) }; self.read_seq_3(ip_addr, colon, port).map(|t| { let (ip, _, port): (Ipv6Addr, char, u16) = t; SocketAddrV6::new(ip, port, 0, 0) }) } fn read_socket_addr(&mut self) -> Option<SocketAddr> { let v4 = |p: &mut Parser| p.read_socket_addr_v4().map(SocketAddr::V4); let v6 = |p: &mut Parser| p.read_socket_addr_v6().map(SocketAddr::V6); self.read_or(&mut [Box::new(v4), Box::new(v6)]) } } #[stable(feature = "rust1", since = "1.0.0")] #[allow(deprecated)] impl FromStr for IpAddr { type Err = AddrParseError; fn from_str(s: &str) -> Result<IpAddr, AddrParseError> { match Parser::new(s).read_till_eof(|p| p.read_ip_addr()) { Some(s) => Ok(s), None => Err(AddrParseError(())) } } } #[stable(feature = "rust1", since = "1.0.0")] impl FromStr for Ipv4Addr { type Err = AddrParseError; fn from_str(s: &str) -> Result<Ipv4Addr, AddrParseError> { match Parser::new(s).read_till_eof(|p| p.read_ipv4_addr()) { Some(s) => Ok(s), None => Err(AddrParseError(())) } } } #[stable(feature = "rust1", since = "1.0.0")] impl FromStr for Ipv6Addr { type Err = AddrParseError; fn from_str(s: &str) -> Result<Ipv6Addr, AddrParseError> { match Parser::new(s).read_till_eof(|p| p.read_ipv6_addr()) { Some(s) => Ok(s), None => Err(AddrParseError(())) } } } #[stable(feature = "socket_addr_from_str", since = "1.5.0")] impl FromStr for SocketAddrV4 { type Err = AddrParseError; fn from_str(s: &str) -> Result<SocketAddrV4, AddrParseError> { match Parser::new(s).read_till_eof(|p| p.read_socket_addr_v4()) { Some(s) => Ok(s), None => Err(AddrParseError(())), } } } #[stable(feature = "socket_addr_from_str", since = "1.5.0")] impl FromStr for SocketAddrV6 { type Err = AddrParseError; fn from_str(s: &str) -> Result<SocketAddrV6, AddrParseError> { match Parser::new(s).read_till_eof(|p| p.read_socket_addr_v6()) { Some(s) => Ok(s), None => Err(AddrParseError(())), } } } #[stable(feature = "rust1", since = "1.0.0")] impl FromStr for SocketAddr { type Err = AddrParseError; fn from_str(s: &str) -> Result<SocketAddr, AddrParseError> { match Parser::new(s).read_till_eof(|p| p.read_socket_addr()) { Some(s) => Ok(s), None => Err(AddrParseError(())), } } } /// An error returned when parsing an IP address or a socket address. #[stable(feature = "rust1", since = "1.0.0")] #[derive(Debug, Clone, PartialEq)] pub struct AddrParseError(()); #[stable(feature = "addr_parse_error_error", since = "1.4.0")] impl fmt::Display for AddrParseError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.write_str(self.description()) } } #[stable(feature = "addr_parse_error_error", since = "1.4.0")] impl Error for AddrParseError { fn description(&self) -> &str { "invalid IP address syntax" } }
32.6
92
0.488701
3a022e6b02fa4d648124f69478ad7d7d09bd79d5
238
use std::path::{Path, PathBuf}; fn main() { let path = Path::new("/tmp/foo/bar.txt"); println!("{}", path); //~^ ERROR E0277 let path = PathBuf::from("/tmp/foo/bar.txt"); println!("{}", path); //~^ ERROR E0277 }
19.833333
49
0.52521
f72bbda9d677547ef230fc2547c51477899bd1ce
1,970
pub type Type = usize; // TODO(#145): Separate Delete Character and Delete Item actions pub const UP: Type = 0; pub const DOWN: Type = 1; pub const LEFT: Type = 2; pub const RIGHT: Type = 3; pub const HOME: Type = 4; pub const INSERT_AFTER_ITEM: Type = 5; pub const INSERT_BEFORE_ITEM: Type = 6; pub const DELETE: Type = 7; pub const BACK_DELETE: Type = 8; pub const EDIT_ITEM: Type = 9; pub const DUP_AFTER_ITEM: Type = 10; pub const DUP_BEFORE_ITEM: Type = 11; pub const TOGGLE_PROFILE_PANEL: Type = 12; pub const QUIT: Type = 13; pub const FOCUS_FORWARD: Type = 14; pub const FOCUS_BACKWARD: Type = 15; pub const ACCEPT: Type = 16; pub const CANCEL: Type = 17; pub const RUN: Type = 18; pub const RUN_INTO_ITSELF: Type = 19; pub const RERUN: Type = 20; pub const BACK: Type = 21; pub const NEXT_MATCH: Type = 22; pub const PREV_MATCH: Type = 23; pub const EDIT_CMDLINE: Type = 24; pub const OPEN_KEY_MAP_SETTINGS: Type = 25; pub const START_SEARCH: Type = 26; pub const JUMP_TO_START: Type = 27; pub const JUMP_TO_END: Type = 28; pub const NEXT_SEARCH_MATCH: Type = 29; pub const PREV_SEARCH_MATCH: Type = 30; pub const LEN: usize = 31; pub const NAMES: [&str; LEN] = [ "up", "down", "left", "right", "home", "insert_after_item", "insert_before_item", "delete", "back_delete", "edit_item", "dup_after_item", "dup_before_item", "toggle_profile_panel", "quit", "focus_forward", "focus_backward", "accept", "cancel", "run", "run_into_itself", "rerun", "back", "next_match", "prev_match", "edit_cmdline", "open_key_map_settings", "start_search", "jump_to_start", "jump_to_end", "next_search_match", "prev_search_match", ]; pub fn from_str(s: &str) -> Result<Type, String> { for (action, name) in NAMES.iter().enumerate() { if *name == s { return Ok(action); } } Err(format!("Unknown action `{}`", s)) }
24.625
64
0.650254
62c1fb960bbb63e751673cd61fcbfbde085a1252
5,859
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use ffi; use glib; use glib::translate::*; use glib_ffi; use gobject_ffi; use gst; use std::mem; use std::ptr; glib_wrapper! { pub struct Adapter(Object<ffi::GstAdapter, ffi::GstAdapterClass>); match fn { get_type => || ffi::gst_adapter_get_type(), } } impl Adapter { pub fn new() -> Adapter { assert_initialized_main_thread!(); unsafe { from_glib_full(ffi::gst_adapter_new()) } } pub fn available(&self) -> usize { unsafe { ffi::gst_adapter_available(self.to_glib_none().0) } } pub fn available_fast(&self) -> usize { unsafe { ffi::gst_adapter_available_fast(self.to_glib_none().0) } } pub fn clear(&self) { unsafe { ffi::gst_adapter_clear(self.to_glib_none().0); } } pub fn copy_bytes(&self, offset: usize, size: usize) -> Option<glib::Bytes> { unsafe { from_glib_full(ffi::gst_adapter_copy_bytes(self.to_glib_none().0, offset, size)) } } pub fn distance_from_discont(&self) -> u64 { unsafe { ffi::gst_adapter_distance_from_discont(self.to_glib_none().0) } } #[cfg(any(feature = "v1_10", feature = "dox"))] pub fn dts_at_discont(&self) -> gst::ClockTime { unsafe { from_glib(ffi::gst_adapter_dts_at_discont(self.to_glib_none().0)) } } pub fn flush(&self, flush: usize) { unsafe { ffi::gst_adapter_flush(self.to_glib_none().0, flush); } } pub fn get_buffer(&self, nbytes: usize) -> Option<gst::Buffer> { unsafe { from_glib_full(ffi::gst_adapter_get_buffer(self.to_glib_none().0, nbytes)) } } pub fn get_buffer_fast(&self, nbytes: usize) -> Option<gst::Buffer> { unsafe { from_glib_full(ffi::gst_adapter_get_buffer_fast(self.to_glib_none().0, nbytes)) } } pub fn get_buffer_list(&self, nbytes: usize) -> Option<gst::BufferList> { unsafe { from_glib_full(ffi::gst_adapter_get_buffer_list(self.to_glib_none().0, nbytes)) } } pub fn get_list(&self, nbytes: usize) -> Vec<gst::Buffer> { unsafe { FromGlibPtrContainer::from_glib_full(ffi::gst_adapter_get_list(self.to_glib_none().0, nbytes)) } } pub fn masked_scan_uint32(&self, mask: u32, pattern: u32, offset: usize, size: usize) -> isize { unsafe { ffi::gst_adapter_masked_scan_uint32(self.to_glib_none().0, mask, pattern, offset, size) } } pub fn masked_scan_uint32_peek(&self, mask: u32, pattern: u32, offset: usize, size: usize) -> (isize, u32) { unsafe { let mut value = mem::uninitialized(); let ret = ffi::gst_adapter_masked_scan_uint32_peek(self.to_glib_none().0, mask, pattern, offset, size, &mut value); (ret, value) } } #[cfg(any(feature = "v1_10", feature = "dox"))] pub fn offset_at_discont(&self) -> u64 { unsafe { ffi::gst_adapter_offset_at_discont(self.to_glib_none().0) } } pub fn prev_dts(&self) -> (gst::ClockTime, u64) { unsafe { let mut distance = mem::uninitialized(); let ret = from_glib(ffi::gst_adapter_prev_dts(self.to_glib_none().0, &mut distance)); (ret, distance) } } pub fn prev_dts_at_offset(&self, offset: usize) -> (gst::ClockTime, u64) { unsafe { let mut distance = mem::uninitialized(); let ret = from_glib(ffi::gst_adapter_prev_dts_at_offset(self.to_glib_none().0, offset, &mut distance)); (ret, distance) } } #[cfg(any(feature = "v1_10", feature = "dox"))] pub fn prev_offset(&self) -> (u64, u64) { unsafe { let mut distance = mem::uninitialized(); let ret = ffi::gst_adapter_prev_offset(self.to_glib_none().0, &mut distance); (ret, distance) } } pub fn prev_pts(&self) -> (gst::ClockTime, u64) { unsafe { let mut distance = mem::uninitialized(); let ret = from_glib(ffi::gst_adapter_prev_pts(self.to_glib_none().0, &mut distance)); (ret, distance) } } pub fn prev_pts_at_offset(&self, offset: usize) -> (gst::ClockTime, u64) { unsafe { let mut distance = mem::uninitialized(); let ret = from_glib(ffi::gst_adapter_prev_pts_at_offset(self.to_glib_none().0, offset, &mut distance)); (ret, distance) } } #[cfg(any(feature = "v1_10", feature = "dox"))] pub fn pts_at_discont(&self) -> gst::ClockTime { unsafe { from_glib(ffi::gst_adapter_pts_at_discont(self.to_glib_none().0)) } } pub fn take_buffer(&self, nbytes: usize) -> Option<gst::Buffer> { unsafe { from_glib_full(ffi::gst_adapter_take_buffer(self.to_glib_none().0, nbytes)) } } pub fn take_buffer_fast(&self, nbytes: usize) -> Option<gst::Buffer> { unsafe { from_glib_full(ffi::gst_adapter_take_buffer_fast(self.to_glib_none().0, nbytes)) } } pub fn take_buffer_list(&self, nbytes: usize) -> Option<gst::BufferList> { unsafe { from_glib_full(ffi::gst_adapter_take_buffer_list(self.to_glib_none().0, nbytes)) } } pub fn take_list(&self, nbytes: usize) -> Vec<gst::Buffer> { unsafe { FromGlibPtrContainer::from_glib_full(ffi::gst_adapter_take_list(self.to_glib_none().0, nbytes)) } } } impl Default for Adapter { fn default() -> Self { Self::new() } }
29.892857
127
0.584229
2150c44c55920f443242409c97c0e0305ad3ba27
1,717
use crate::{notes_to_busses, Bus, Input}; #[test] fn test_note_to_busses() { let busses = notes_to_busses("7,13,x,x,59,x,31,19"); assert_eq!(busses[0].as_ref().unwrap().id, 7); assert_eq!(busses[1].as_ref().unwrap().id, 13); assert!(busses[2].is_none()); assert!(busses[3].is_none()); assert_eq!(busses[4].as_ref().unwrap().id, 59); } #[test] fn test_time_since_last_departure() { let bus = Bus{id: 7, offset: 0}; assert_eq!(bus.time_since_last_departure(939), 1); let bus = Bus{id: 13, offset: 0}; assert_eq!(bus.time_since_last_departure(939), 3); let bus = Bus{id: 59, offset: 0}; assert_eq!(bus.time_since_last_departure(939), 54); let bus = Bus{id: 31, offset: 0}; assert_eq!(bus.time_since_last_departure(939), 9); let bus = Bus{id: 19, offset: 0}; assert_eq!(bus.time_since_last_departure(939), 8); } #[test] fn test_time_till_next_departure() { let bus = Bus{id: 7, offset: 0}; assert_eq!(bus.time_till_next_departure(939), 6); let bus = Bus{id: 13, offset: 0}; assert_eq!(bus.time_till_next_departure(939), 10); let bus = Bus{id: 59, offset: 0}; assert_eq!(bus.time_till_next_departure(939), 5); let bus = Bus{id: 31, offset: 0}; assert_eq!(bus.time_till_next_departure(939), 22); let bus = Bus{id: 19, offset: 0}; assert_eq!(bus.time_till_next_departure(939), 11); } #[test] fn test_get_earliest_available_bus_and_wait_time() { let input = Input { earliest_arrival_timestamp: 939, busses: notes_to_busses("7,13,x,x,59,x,31,19") }; let (bus, wait_time) = input.get_earliest_available_bus_and_wait_time(); assert_eq!(bus.id, 59); assert_eq!(wait_time, 5); }
29.603448
76
0.655795
29478d2a3af2849833cc7bc299d0e26f3f71190c
3,813
use crate::command_prelude::*; use cargo::core::Verbosity; use cargo::ops::{self, CompileFilter}; pub fn cli() -> App { subcommand("run") // subcommand aliases are handled in aliased_command() // .alias("r") .setting(AppSettings::TrailingVarArg) .about("Run a binary or example of the local package") .arg(opt("quiet", "No output printed to stdout").short("q")) .arg(Arg::with_name("args").multiple(true)) .arg_targets_bin_example( "Name of the bin target to run", "Name of the example target to run", ) .arg_package("Package with the target to run") .arg_jobs() .arg_release("Build artifacts in release mode, with optimizations") .arg_profile("Build artifacts with the specified profile") .arg_features() .arg_target_triple("Build for the target triple") .arg_target_dir() .arg_manifest_path() .arg_message_format() .after_help( "\ If neither `--bin` nor `--example` are given, then if the package only has one bin target it will be run. Otherwise `--bin` specifies the bin target to run, and `--example` specifies the example target to run. At most one of `--bin` or `--example` can be provided. All the arguments following the two dashes (`--`) are passed to the binary to run. If you're passing arguments to both Cargo and the binary, the ones after `--` go to the binary, the ones before go to Cargo. ", ) } pub fn exec(config: &mut Config, args: &ArgMatches<'_>) -> CliResult { let ws = args.workspace(config)?; let mut compile_opts = args.compile_options( config, CompileMode::Build, Some(&ws), ProfileChecking::Checked, )?; if !args.is_present("example") && !args.is_present("bin") { let default_runs: Vec<_> = compile_opts .spec .get_packages(&ws)? .iter() .filter_map(|pkg| pkg.manifest().default_run()) .collect(); if default_runs.len() == 1 { compile_opts.filter = CompileFilter::from_raw_arguments( false, vec![default_runs[0].to_owned()], false, vec![], false, vec![], false, vec![], false, false, ); } else { // ops::run will take care of errors if len pkgs != 1. compile_opts.filter = CompileFilter::Default { // Force this to false because the code in ops::run is not // able to pre-check features before compilation starts to // enforce that only 1 binary is built. required_features_filterable: false, }; } }; match ops::run(&ws, &compile_opts, &values_os(args, "args"))? { None => Ok(()), Some(err) => { // If we never actually spawned the process then that sounds pretty // bad and we always want to forward that up. let exit = match err.exit { Some(exit) => exit, None => return Err(CliError::new(err.into(), 101)), }; // If `-q` was passed then we suppress extra error information about // a failed process, we assume the process itself printed out enough // information about why it failed so we don't do so as well let exit_code = exit.code().unwrap_or(101); let is_quiet = config.shell().verbosity() == Verbosity::Quiet; Err(if is_quiet { CliError::code(exit_code) } else { CliError::new(err.into(), exit_code) }) } } }
36.663462
80
0.55573
f7d3d3e2b1fc14f44331de0ad0856ca3461fea5f
2,015
use bigint::uint::U256; // TODO Make this public use ed25519_fun::curve25519::field_element::FieldElement; use ed25519_fun::curve25519::field_element::*; use ed25519_fun::curve25519::group_element::*; use itertools::Itertools; use num_bigint::BigUint; use std::ops::BitXor; // TODO Make this public use ed25519_compact::curve25519::*; // Ed25519 base point. pub fn base_point_v1() { // m = 4 let mut m = FieldElement([4, 0, 0, 0, 0]); // n = 5 let mut n = FieldElement([5, 0, 0, 0, 0]); // n^{-1} let invert_n = n.invert(); println!("Inverted n: {:020}", invert_n.0.iter().format("\n")); // m * n^{-1} let y = m * invert_n; println!("m * n-1 n: {:020}", y.0.iter().format("\n")); // Encode FieldElement y let encoded_y = FieldElement::encode(&y); println!("Encoded y: {}", encoded_y.iter().format("\n")); // Decode point with y let mut decoded_P3 = P3::decode(encoded_y).unwrap(); println!("P3 point: {:020}", decoded_P3.X.0.iter().format("-/-")); // println!("X: {:020}", FieldElement::reduce(decoded_P3.X.0).0.iter().format("\n")); // println!("Y: {:020}", decoded_P3.Y.0.iter().format("\n")); } pub fn base_point_v2() { // m = 4 let mut m = Fe([4, 0, 0, 0, 0]); // n = 5 let mut n = Fe([5, 0, 0, 0, 0]); // n^{-1} let invert_n = n.invert(); println!("Inverted n: {:020}", invert_n.0.iter().format("\n")); // m * n^{-1} let y = m * invert_n; println!("m * n-1 n: {:020}", y.0.iter().format("\n")); // Encode FieldElement y let mut out = [0u8; 32]; fiat_25519_to_bytes(&mut out, &y.0); println!("Encoded y: {:020}", out.iter().format("-/-")); // Decode point with y let mut decoded_P3 = GeP3::from_bytes_negate_vartime(&out).unwrap(); println!("P3 point: {:020}", decoded_P3.x.0.iter().format("-/-")); // println!("X: {:020}", FieldElement::reduce(decoded_P3.X.0).0.iter().format("\n")); // println!("Y: {:020}", decoded_P3.Y.0.iter().format("\n")); }
30.074627
89
0.572208
f556b183087b525464ba3137177330f03a43e703
894
use super::impl_prelude::*; impl Http { // onboarding pub async fn get_onboarding(&self) -> Result<OnboardingStatus> { Ok(self .client_user_session_auth_type() .get(ep!(self, "/onboard/hello")) .send() .await? .error_for_status()? .json() .await?) } pub async fn complete_onboarding(&self, username: &str) -> Result { #[derive(serde::Serialize)] struct CompleteOnboardingRequest<'a> { username: &'a str, } self.client_user_session_auth_type() .post(ep!(self, "/onboard/complete")) .json(&CompleteOnboardingRequest { username }) .send() .await? .error_for_status()?; Ok(()) } } #[derive(serde::Deserialize)] pub struct OnboardingStatus { pub onboarding: bool, }
24.162162
71
0.534676
4ac79dd6498583889929f434263b9dcbbe72780f
5,244
use crate::helpers::{ once_cell::unsync::Lazy, Deref, IntoIterator, }; use proc_macro::TokenStream; use proc_macro2::TokenStream as TokenStream2; use proc_macro_error::*; use quote::{ quote, ToTokens, }; use std::iter; use syn::{ braced, parse::{ Parse, ParseStream, Result, }, punctuated::Punctuated, spanned::Spanned, Attribute, Expr, ExprPath, ExprType, Ident, Token, Type, }; #[allow(clippy::declare_interior_mutable_const)] const GLOBAL_ATTRS: Lazy<Punctuated<ExprType, Token![,]>> = Lazy::new(|| { let def = syn::parse_str::<TokenStream2>( "GlobalAttributes { accesskey: attributes::SpacedSet<String>, class: attributes::SpacedSet<attributes::Class>, id: attributes::Id, lang: attributes::LanguageTag, title: String }", ) .and_then(syn::parse2::<ElementDefinition>) .expect_or_abort("Failed to parse global attributes definitions."); match def { ElementDefinition::Struct { fields, .. } => fields, ElementDefinition::Unit { ident, .. } => { abort!(ident.span(), "Invalid global attributes definitions.") } } }); fn global_attributes<'a>() -> Box<dyn Iterator<Item = ExprType> + 'a> { let fields = GLOBAL_ATTRS; box (*fields).clone().into_iter() } #[derive(Debug)] struct ElementDefinitionField(Vec<Attribute>, Ident, Box<Type>); impl ToTokens for ElementDefinitionField { fn to_tokens(&self, tokens: &mut TokenStream2) { let ElementDefinitionField(ref attrs, ref name, ref ty) = *self; tokens.extend(quote! { #(#attrs)* #name: Option<#ty>, }) } } #[derive(Debug)] pub enum ElementDefinition { Unit { attrs: Vec<Attribute>, ident: Ident, }, Struct { attrs: Vec<Attribute>, ident: Ident, fields: Punctuated<ExprType, Token![,]>, }, } impl ElementDefinition { fn attrs(&self) -> &Vec<Attribute> { match self { Self::Unit { attrs, .. } => attrs, Self::Struct { attrs, .. } => attrs, } } fn ident(&self) -> &Ident { match self { Self::Unit { ident, .. } => ident, Self::Struct { ident, .. } => ident, } } fn own_fields<'a>(&'a self) -> Box<dyn Iterator<Item = ExprType> + 'a> { match self { Self::Struct { fields, .. } => box fields.clone().into_iter(), _ => box iter::empty::<ExprType>(), } } fn fields<'a>(&'a self) -> Box<dyn Iterator<Item = ElementDefinitionField> + 'a> { let iter = global_attributes().chain(self.own_fields()); box iter.map(|field| { let (attrs, name) = match *field.expr { Expr::Path(ExprPath { path, qself, attrs }) if qself.is_none() && path.segments.len() == 1 => { (attrs, path.segments[0].ident.clone()) } e => abort!(e.span(), "Invalid field name."), }; let ty = field.ty; ElementDefinitionField(attrs, name, ty) }) } } impl ToTokens for ElementDefinition { fn to_tokens(&self, tokens: &mut TokenStream2) { let attrs = self.attrs(); let ident = self.ident(); let fields = self.fields(); let doc = format!(" {} element.", ident); tokens.extend(quote! { #[doc = #doc] #[derive(Element, Clone, Default, Debug)] #(#attrs)* pub struct #ident<C> { #(#fields)* _c: std::marker::PhantomData<C> } }) } } impl Parse for ElementDefinition { fn parse(input: ParseStream<'_>) -> Result<Self> { let attrs = input.call(Attribute::parse_outer)?; let ident = input.parse()?; if input.peek(Token![,]) { Ok(Self::Unit { attrs, ident }) } else { let content; let _ = braced!(content in input); let fields = content.parse_terminated(ExprType::parse)?; Ok(Self::Struct { attrs, ident, fields, }) } } } impl From<ElementDefinition> for TokenStream { fn from(definition: ElementDefinition) -> Self { Self::from(quote! { #definition }) } } #[derive(IntoIterator, Deref, Debug)] pub struct ElementDefinitions(Punctuated<ElementDefinition, Token![,]>); impl Parse for ElementDefinitions { fn parse(input: ParseStream<'_>) -> Result<Self> { let definitions = input.parse_terminated(ElementDefinition::parse)?; Ok(Self(definitions)) } } impl ToTokens for ElementDefinitions { fn to_tokens(&self, tokens: &mut TokenStream2) { let definitions = self.iter(); tokens.extend(quote! { #(#definitions)* }) } } impl From<ElementDefinitions> for TokenStream { fn from(definitions: ElementDefinitions) -> Self { Self::from(quote! { #definitions }) } }
26.755102
86
0.53585
ef89f924e2f825739d497d040dc8842197d09474
1,357
extern crate core; #[macro_use] extern crate hamcrest; extern crate tempdir; use std::io; use hamcrest::prelude::*; use log::{debug, info}; use tempdir::TempDir; #[test] fn should_load_plugin() { let temp_dir = TempDir::new("data").unwrap(); let ctx = core::init_context(temp_dir.path().to_str().unwrap()); let mut engine = core::init_engine(ctx); let plugins = engine.list(); assert_that!(plugins.len(), is(equal_to(0))); let file_content = std::fs::read_to_string("tests/files/1.xml").expect("read file fail"); engine.load(&file_content); let plugins = engine.list(); assert_that!(plugins.len(), is(equal_to(1))); let plugin = plugins.get(0).unwrap(); info!("plugin:{:?}", plugin); } #[test] fn should_get_hots() { let temp_dir = TempDir::new("data").unwrap(); let ctx = core::init_context(temp_dir.path().to_str().unwrap()); let mut engine = core::init_engine(ctx); let plugins = engine.list(); let file_content = std::fs::read_to_string("tests/files/1.xml").expect("read file fail"); engine.load(&file_content); let uid = "bc9e3e3b-c6e8-4d84-b976-81cce04e4a61"; let result = engine.find(uid); assert_that!(result.is_ok(), is(true)); let p = result.ok().unwrap(); let books = p.getHots(); assert_that!(books.len(), is(greater_than(1))); }
22.245902
93
0.645542
394f689b7d2c79340aa509e2ff1dbb5dc2a85606
1,593
mod date; pub(crate) use date::fmt_http_date; pub(crate) use date::parse_http_date; pub(crate) use date::HttpDate; use crate::{Error, Status, StatusCode}; use std::cmp::Ordering; use std::str::FromStr; /// Declares unstable items. #[doc(hidden)] macro_rules! cfg_unstable { ($($item:item)*) => { $( #[cfg(feature = "unstable")] #[cfg_attr(feature = "docs", doc(cfg(unstable)))] $item )* } } /// Parse a weight of the form `q=0.123`. pub(crate) fn parse_weight(s: &str) -> crate::Result<f32> { let mut parts = s.split('='); if !matches!(parts.next(), Some("q")) { let mut err = Error::new_adhoc("invalid weight"); err.set_status(StatusCode::BadRequest); return Err(err); } match parts.next() { Some(s) => { let weight = f32::from_str(s).status(400)?; Ok(weight) } None => { let mut err = Error::new_adhoc("invalid weight"); err.set_status(StatusCode::BadRequest); Err(err) } } } /// Order proposals by weight. Try ordering by q value first. If equal or undefined, /// order by index, favoring the latest provided value. pub(crate) fn sort_by_weight<T: PartialOrd + Copy>(props: &mut Vec<T>) { let mut arr: Vec<(usize, T)> = props.iter().copied().enumerate().collect(); arr.sort_unstable_by(|a, b| match b.1.partial_cmp(&a.1) { None | Some(Ordering::Equal) => b.0.cmp(&a.0), Some(ord) => ord, }); *props = arr.into_iter().map(|(_, t)| t).collect::<Vec<T>>(); }
28.963636
84
0.569994
b9f88d87e573878376f7c5cfc3b6abcec7091583
1,800
use actix::prelude::*; use std::{ prelude::v1::*, process::Command, } #[derive(Message)] #[rtype(result="Result<bool, std::io::Error>")] struct ClusterListener; struct Actor4Cluster; impl Actor for Actor4Cluster { type Context = Context<Self>; } impl Handler for Actor4Cluster { type Result = Result<bool, std::io::Error>; fn handle(&mut self, msg: ClusterListener, ctx: &mut Context<Self>) -> Self::Result { Ok(true) } } struct ClusterDockStdin; impl ClusterDockStdin { fn accept_stdin_argsv(&mut argsv: Vec<String>) { let &mut argsv = env::args().collect()::<Vec<String>>(); let stdin = argsv.get(1).unwrap(); let defaulting_filepath = format!("{:?}.conf", input); // TODO: default to config or shell file-type. let saves_into = args.get(2).unwrap_or($defaulting_filepath); } fn stdin_argsv_and_then() { } } struct BashscriptArgstdio { stdin_argsv: Vec<String>, } impl DefmoduleArgsv for BashscriptArgstdio { fn convert() {} fn parse(stdin: &str) { this(self.stdin_argsv) } fn argstdin() { // "How do I invoke a system command and capture its output?". Available at: https://stackoverflow.com/a/25574952 Command::new("cd --prefix /src \\ |") .arg("./ \\ |") .arg("composedockerargsv.sh") .spawn() .expect("Changing directory\ then initiating shellscript:\ \"composedockerargsv.sh\" $=> [<FAILED>]."); println!("status: {}", output.status); println!("stdout: {}", String::from_utf8_lossy(&output.stdout)); println!("stderr: {}", String::from_utf8_lossy(&output.stderr)); assert!(output.status.success()); } fn this(self) { } } trait DefmoduleArgsv { fn convert(); fn parse(stdin: &str); } #[derive(Debug, Clone, Copy)] enum ArgsvOperations { Left, Right, Home, End, Newline, CarriageReturn, Backslash, DoNothing, }
20.930233
130
0.673889
f7fb2df98f12e881295690b903ef6b8e1738ab88
7,783
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::BWTR2 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct ACCMODR { bits: u8, } impl ACCMODR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct DATLATR { bits: u8, } impl DATLATR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct CLKDIVR { bits: u8, } impl CLKDIVR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct DATASTR { bits: u8, } impl DATASTR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct ADDHLDR { bits: u8, } impl ADDHLDR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct ADDSETR { bits: u8, } impl ADDSETR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _ACCMODW<'a> { w: &'a mut W, } impl<'a> _ACCMODW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 3; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _DATLATW<'a> { w: &'a mut W, } impl<'a> _DATLATW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 24; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _CLKDIVW<'a> { w: &'a mut W, } impl<'a> _CLKDIVW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _DATASTW<'a> { w: &'a mut W, } impl<'a> _DATASTW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _ADDHLDW<'a> { w: &'a mut W, } impl<'a> _ADDHLDW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _ADDSETW<'a> { w: &'a mut W, } impl<'a> _ADDSETW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 28:29 - ACCMOD"] #[inline] pub fn accmod(&self) -> ACCMODR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) as u8 }; ACCMODR { bits } } #[doc = "Bits 24:27 - DATLAT"] #[inline] pub fn datlat(&self) -> DATLATR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) as u8 }; DATLATR { bits } } #[doc = "Bits 20:23 - CLKDIV"] #[inline] pub fn clkdiv(&self) -> CLKDIVR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) as u8 }; CLKDIVR { bits } } #[doc = "Bits 8:15 - DATAST"] #[inline] pub fn datast(&self) -> DATASTR { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u8 }; DATASTR { bits } } #[doc = "Bits 4:7 - ADDHLD"] #[inline] pub fn addhld(&self) -> ADDHLDR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) as u8 }; ADDHLDR { bits } } #[doc = "Bits 0:3 - ADDSET"] #[inline] pub fn addset(&self) -> ADDSETR { let bits = { const MASK: u8 = 15; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }; ADDSETR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 268435455 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 28:29 - ACCMOD"] #[inline] pub fn accmod(&mut self) -> _ACCMODW { _ACCMODW { w: self } } #[doc = "Bits 24:27 - DATLAT"] #[inline] pub fn datlat(&mut self) -> _DATLATW { _DATLATW { w: self } } #[doc = "Bits 20:23 - CLKDIV"] #[inline] pub fn clkdiv(&mut self) -> _CLKDIVW { _CLKDIVW { w: self } } #[doc = "Bits 8:15 - DATAST"] #[inline] pub fn datast(&mut self) -> _DATASTW { _DATASTW { w: self } } #[doc = "Bits 4:7 - ADDHLD"] #[inline] pub fn addhld(&mut self) -> _ADDHLDW { _ADDHLDW { w: self } } #[doc = "Bits 0:3 - ADDSET"] #[inline] pub fn addset(&mut self) -> _ADDSETW { _ADDSETW { w: self } } }
25.025723
60
0.467943
8935704535ee5a1a93bfe37aceb25fa5967f7c01
1,282
use super::constants; use std::num::Wrapping; pub const HASH_PRIME: u32 = 0x0100_0193; pub const HASH_INIT: u32 = 0x2802_1967; #[derive(Clone)] pub struct Context { pub h: u32, pub half_h: u32, pub digest: Vec<u8>, pub half_digest: u8, pub d_len: u32, } impl Context { pub fn new() -> Context { Context { h: 0, half_h: 0, digest: vec![0; constants::SPAM_SUM_LENGTH as usize], half_digest: 0, d_len: 0, } } pub(crate) fn hash(&mut self, c: u8) { let h1 = self.h; self.h = self.hash_full(c, h1); let h2 = self.half_h; self.half_h = self.hash_full(c, h2); } pub(crate) fn hash_full(&mut self, c: u8, h: u32) -> u32 { let h_wrapped = Wrapping(h); let hp_wrapped = Wrapping(HASH_PRIME); let c_wrapped = Wrapping(c as u32); ((h_wrapped * hp_wrapped) ^ (c_wrapped)).0 } pub(crate) fn reset(&mut self, init: bool) { if !init { self.d_len += 1; } self.digest[self.d_len as usize] = 0; self.h = HASH_INIT; if self.d_len < constants::SPAM_SUM_LENGTH / 2 { self.half_h = HASH_INIT; self.half_digest = 0; } } }
23.309091
65
0.531981
e956726f584b60c7a85a4098c7ea110296788a5e
3,145
use super::{ textinput::TextInputComponent, visibility_blocking, CommandBlocking, CommandInfo, Component, DrawableComponent, }; use crate::{ queue::{InternalEvent, NeedsUpdate, Queue}, strings::{self, commands}, ui::style::SharedTheme, }; use anyhow::Result; use asyncgit::{ sync::{self, CommitId}, CWD, }; use crossterm::event::{Event, KeyCode}; use tui::{backend::Backend, layout::Rect, Frame}; pub struct TagCommitComponent { input: TextInputComponent, commit_id: Option<CommitId>, queue: Queue, } impl DrawableComponent for TagCommitComponent { fn draw<B: Backend>( &self, f: &mut Frame<B>, rect: Rect, ) -> Result<()> { self.input.draw(f, rect)?; Ok(()) } } impl Component for TagCommitComponent { fn commands( &self, out: &mut Vec<CommandInfo>, force_all: bool, ) -> CommandBlocking { if self.is_visible() || force_all { self.input.commands(out, force_all); out.push(CommandInfo::new( commands::TAG_COMMIT_CONFIRM_MSG, true, true, )); } visibility_blocking(self) } fn event(&mut self, ev: Event) -> Result<bool> { if self.is_visible() { if self.input.event(ev)? { return Ok(true); } if let Event::Key(e) = ev { if let KeyCode::Enter = e.code { self.tag() } return Ok(true); } } Ok(false) } fn is_visible(&self) -> bool { self.input.is_visible() } fn hide(&mut self) { self.input.hide() } fn show(&mut self) -> Result<()> { self.input.show()?; Ok(()) } } impl TagCommitComponent { /// pub fn new(queue: Queue, theme: SharedTheme) -> Self { Self { queue, input: TextInputComponent::new( theme, strings::TAG_COMMIT_POPUP_TITLE, strings::TAG_COMMIT_POPUP_MSG, ), commit_id: None, } } /// pub fn open(&mut self, id: CommitId) -> Result<()> { self.commit_id = Some(id); self.show()?; Ok(()) } /// pub fn tag(&mut self) { if let Some(commit_id) = self.commit_id { match sync::tag(CWD, &commit_id, self.input.get_text()) { Ok(_) => { self.input.clear(); self.hide(); self.queue.borrow_mut().push_back( InternalEvent::Update(NeedsUpdate::ALL), ); } Err(e) => { self.hide(); log::error!("e: {}", e,); self.queue.borrow_mut().push_back( InternalEvent::ShowErrorMsg(format!( "tag error:\n{}", e, )), ); } } } } }
23.296296
69
0.460095
ab8c4209bf36b794faedd0d975c14f53ef97c5de
320
//! Assertion failure: _FIELD_0_HAS_THE_SIZE_OF_THE_WHOLE_BIT_FIELD extern crate alloc; #[bitfield::bitfield(size)] struct BitField(#[field(0, 16)] Field); // Uses the whole bit field, a use a plain `Field` instead. #[derive(Debug, bitfield::Field)] #[repr(u8)] enum Field { F1 = 1, F2, F3 } fn main() {}
20
99
0.678125
9bd3f555c7a9892287e95d476070d7e0c841ef0c
434
fn main() { cxx_build::bridge("src/main.rs") .file("src/nix.cc") .flag_if_supported("-std=c++17") .flag_if_supported("-O3") .compile("carinae"); println!("cargo:rerun-if-changed=include/nix.hh"); println!("cargo:rerun-if-changed=src/nix.cc"); println!("cargo:rerun-if-changed=src/main.rs"); println!("cargo:rustc-link-lib=nixstore"); println!("cargo:rustc-link-lib=nixutil"); }
33.384615
54
0.617512
e98e5e811935f54e96a0ec49d3e939de622b4d24
3,099
use clap::{App, Arg, SubCommand}; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new("simulator") .version(crate_version!()) .author("Sigma Prime <[email protected]>") .about("Options for interacting with simulator") .subcommand( SubCommand::with_name("no-eth1-sim") .about("Runs a simulator that bypasses the eth1 chain. Useful for faster testing of components that don't rely upon eth1") .arg(Arg::with_name("nodes") .short("n") .long("nodes") .takes_value(true) .default_value("4") .help("Number of beacon nodes")) .arg(Arg::with_name("validators_per_node") .short("v") .long("validators_per_node") .takes_value(true) .default_value("20") .help("Number of validators")) .arg(Arg::with_name("speed_up_factor") .short("s") .long("speed_up_factor") .takes_value(true) .default_value("4") .help("Speed up factor")) .arg(Arg::with_name("end_after_checks") .short("e") .long("end_after_checks") .takes_value(false) .help("End after checks (default true)")) ) .subcommand( SubCommand::with_name("syncing-sim") .about("Run the syncing simulation") .arg( Arg::with_name("speedup") .short("s") .long("speedup") .takes_value(true) .default_value("15") .help("Speed up factor for eth1 blocks and slot production"), ) .arg( Arg::with_name("initial_delay") .short("i") .long("initial_delay") .takes_value(true) .default_value("5") .help("Epoch delay for new beacon node to start syncing"), ) .arg( Arg::with_name("sync_timeout") .long("sync_timeout") .takes_value(true) .default_value("10") .help("Number of epochs after which newly added beacon nodes must be synced"), ) .arg( Arg::with_name("strategy") .long("strategy") .takes_value(true) .default_value("all") .possible_values(&["one-node", "two-nodes", "mixed", "all"]) .help("Sync verification strategy to run."), ), ) }
43.041667
102
0.409487
7a5f168e6bf748a1fa68279e9dc40fbc80c77a30
130
use crate::math::Vector3; pub enum Light { Ambient(f32), OmniDirectional(f32, Vector3), Directional(f32, Vector3), }
16.25
34
0.669231
e5cf5b5a159d73362a955326c946c7222e0c1f6d
5,365
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use gdk; use gdk_x11_sys; #[cfg(any(feature = "v3_16", feature = "dox"))] use glib::object::IsA; use glib::translate::*; use glib::GString; use std::fmt; use std::mem; use std::ptr; glib_wrapper! { pub struct X11Display(Object<gdk_x11_sys::GdkX11Display, gdk_x11_sys::GdkX11DisplayClass>) @extends gdk::Display; match fn { get_type => || gdk_x11_sys::gdk_x11_display_get_type(), } } impl X11Display { //pub fn broadcast_startup_message(&self, message_type: &str, : /*Unknown conversion*//*Unimplemented*/Fundamental: VarArgs) { // unsafe { TODO: call gdk_x11_sys:gdk_x11_display_broadcast_startup_message() } //} pub fn error_trap_pop(&self) -> i32 { unsafe { gdk_x11_sys::gdk_x11_display_error_trap_pop(self.to_glib_none().0) } } pub fn error_trap_pop_ignored(&self) { unsafe { gdk_x11_sys::gdk_x11_display_error_trap_pop_ignored(self.to_glib_none().0); } } pub fn error_trap_push(&self) { unsafe { gdk_x11_sys::gdk_x11_display_error_trap_push(self.to_glib_none().0); } } pub fn get_startup_notification_id(&self) -> Option<GString> { unsafe { from_glib_none(gdk_x11_sys::gdk_x11_display_get_startup_notification_id( self.to_glib_none().0, )) } } pub fn get_user_time(&self) -> u32 { unsafe { gdk_x11_sys::gdk_x11_display_get_user_time(self.to_glib_none().0) } } pub fn grab(&self) { unsafe { gdk_x11_sys::gdk_x11_display_grab(self.to_glib_none().0); } } pub fn set_cursor_theme(&self, theme: Option<&str>, size: i32) { unsafe { gdk_x11_sys::gdk_x11_display_set_cursor_theme( self.to_glib_none().0, theme.to_glib_none().0, size, ); } } pub fn set_startup_notification_id(&self, startup_id: &str) { unsafe { gdk_x11_sys::gdk_x11_display_set_startup_notification_id( self.to_glib_none().0, startup_id.to_glib_none().0, ); } } pub fn set_window_scale(&self, scale: i32) { unsafe { gdk_x11_sys::gdk_x11_display_set_window_scale(self.to_glib_none().0, scale); } } pub fn string_to_compound_text(&self, str: &str) -> (i32, gdk::Atom, i32, Vec<u8>) { unsafe { let mut encoding = gdk::Atom::uninitialized(); let mut format = mem::MaybeUninit::uninit(); let mut ctext = ptr::null_mut(); let mut length = mem::MaybeUninit::uninit(); let ret = gdk_x11_sys::gdk_x11_display_string_to_compound_text( self.to_glib_none().0, str.to_glib_none().0, encoding.to_glib_none_mut().0, format.as_mut_ptr(), &mut ctext, length.as_mut_ptr(), ); let format = format.assume_init(); ( ret, encoding, format, FromGlibContainer::from_glib_full_num(ctext, length.assume_init() as usize), ) } } pub fn ungrab(&self) { unsafe { gdk_x11_sys::gdk_x11_display_ungrab(self.to_glib_none().0); } } pub fn utf8_to_compound_text(&self, str: &str) -> Option<(gdk::Atom, i32, Vec<u8>)> { unsafe { let mut encoding = gdk::Atom::uninitialized(); let mut format = mem::MaybeUninit::uninit(); let mut ctext = ptr::null_mut(); let mut length = mem::MaybeUninit::uninit(); let ret = from_glib(gdk_x11_sys::gdk_x11_display_utf8_to_compound_text( self.to_glib_none().0, str.to_glib_none().0, encoding.to_glib_none_mut().0, format.as_mut_ptr(), &mut ctext, length.as_mut_ptr(), )); let format = format.assume_init(); if ret { Some(( encoding, format, FromGlibContainer::from_glib_full_num(ctext, length.assume_init() as usize), )) } else { None } } } #[cfg(any(feature = "v3_16", feature = "dox"))] pub fn get_glx_version<P: IsA<gdk::Display>>(display: &P) -> Option<(i32, i32)> { assert_initialized_main_thread!(); unsafe { let mut major = mem::MaybeUninit::uninit(); let mut minor = mem::MaybeUninit::uninit(); let ret = from_glib(gdk_x11_sys::gdk_x11_display_get_glx_version( display.as_ref().to_glib_none().0, major.as_mut_ptr(), minor.as_mut_ptr(), )); let major = major.assume_init(); let minor = minor.assume_init(); if ret { Some((major, minor)) } else { None } } } } impl fmt::Display for X11Display { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "X11Display") } }
31.374269
130
0.547623
cc2d85f5f1d92ee6e70b97c2cff2dbec342cbfb3
12,039
//! This executable is for outputting metrics on each of the EE host functions. //! //! In order to set up the required global state, the `state-initializer` should have been run //! first. use std::{ collections::BTreeMap, env, fs::{self, File}, io::{self, Write}, iter, path::{Path, PathBuf}, process::Command, str::FromStr, }; use clap::{crate_version, App, Arg}; use log::LevelFilter; use rand::{self, Rng}; use serde_json::Value; use casper_engine_test_support::internal::{ DeployItemBuilder, ExecuteRequestBuilder, LmdbWasmTestBuilder, }; use casper_execution_engine::{ core::engine_state::EngineConfig, shared::logging::{self, Settings}, }; use casper_types::{runtime_args, ApiError, RuntimeArgs}; use casper_engine_tests::profiling; const ABOUT: &str = "Executes a contract which logs metrics for all host functions. Note that the \ 'state-initializer' executable should be run first to set up the required global state."; const EXECUTE_AS_SUBPROCESS_ARG: &str = "execute-as-subprocess"; const ROOT_HASH_ARG_NAME: &str = "root-hash"; const ROOT_HASH_ARG_VALUE_NAME: &str = "HEX-ENCODED HASH"; const ROOT_HASH_ARG_HELP: &str = "Initial root hash; the output of running the 'state-initializer' executable"; const REPETITIONS_ARG_NAME: &str = "repetitions"; const REPETITIONS_ARG_SHORT: &str = "r"; const REPETITIONS_ARG_DEFAULT: &str = "10000"; const REPETITIONS_ARG_VALUE_NAME: &str = "NUM"; const REPETITIONS_ARG_HELP: &str = "Number of repetitions of each host function call"; const OUTPUT_DIR_ARG_NAME: &str = "output-dir"; const OUTPUT_DIR_ARG_SHORT: &str = "o"; const OUTPUT_DIR_ARG_VALUE_NAME: &str = "DIR"; const OUTPUT_DIR_ARG_HELP: &str = "Path to output directory. It will be created if it doesn't exist. If unspecified, the \ current working directory will be used"; const HOST_FUNCTION_METRICS_CONTRACT: &str = "host_function_metrics.wasm"; const PAYMENT_AMOUNT: u64 = profiling::ACCOUNT_1_INITIAL_AMOUNT - 1_000_000_000; const EXPECTED_REVERT_VALUE: u16 = 10; const CSV_HEADER: &str = "args,n_exec,total_elapsed_time"; const ARG_AMOUNT: &str = "amount"; const ARG_SEED: &str = "seed"; const ARG_OTHERS: &str = "others"; fn execute_as_subprocess_arg() -> Arg<'static, 'static> { Arg::with_name(EXECUTE_AS_SUBPROCESS_ARG) .long(EXECUTE_AS_SUBPROCESS_ARG) .hidden(true) } fn root_hash_arg() -> Arg<'static, 'static> { Arg::with_name(ROOT_HASH_ARG_NAME) .value_name(ROOT_HASH_ARG_VALUE_NAME) .help(ROOT_HASH_ARG_HELP) } fn repetitions_arg() -> Arg<'static, 'static> { Arg::with_name(REPETITIONS_ARG_NAME) .long(REPETITIONS_ARG_NAME) .short(REPETITIONS_ARG_SHORT) .default_value(REPETITIONS_ARG_DEFAULT) .value_name(REPETITIONS_ARG_VALUE_NAME) .help(REPETITIONS_ARG_HELP) } fn output_dir_arg() -> Arg<'static, 'static> { Arg::with_name(OUTPUT_DIR_ARG_NAME) .long(OUTPUT_DIR_ARG_NAME) .short(OUTPUT_DIR_ARG_SHORT) .value_name(OUTPUT_DIR_ARG_VALUE_NAME) .help(OUTPUT_DIR_ARG_HELP) } #[derive(Debug)] struct Args { execute_as_subprocess: bool, root_hash: Option<String>, repetitions: usize, output_dir: PathBuf, data_dir: PathBuf, } impl Args { fn new() -> Self { let exe_name = profiling::exe_name(); let data_dir_arg = profiling::data_dir_arg(); let arg_matches = App::new(&exe_name) .version(crate_version!()) .about(ABOUT) .arg(execute_as_subprocess_arg()) .arg(root_hash_arg()) .arg(repetitions_arg()) .arg(output_dir_arg()) .arg(data_dir_arg) .get_matches(); let execute_as_subprocess = arg_matches.is_present(EXECUTE_AS_SUBPROCESS_ARG); let root_hash = arg_matches .value_of(ROOT_HASH_ARG_NAME) .map(ToString::to_string); let repetitions = arg_matches .value_of(REPETITIONS_ARG_NAME) .map(profiling::parse_count) .expect("should have repetitions"); let output_dir = match arg_matches.value_of(OUTPUT_DIR_ARG_NAME) { Some(dir) => PathBuf::from_str(dir).expect("Expected a valid unicode path"), None => env::current_dir().expect("Expected to be able to access current working dir"), }; let data_dir = profiling::data_dir(&arg_matches); Args { execute_as_subprocess, root_hash, repetitions, output_dir, data_dir, } } } /// Executes the host-function-metrics contract repeatedly to generate metrics in stdout. fn run_test(root_hash: Vec<u8>, repetitions: usize, data_dir: &Path) { let log_settings = Settings::new(LevelFilter::Warn).with_metrics_enabled(true); let _ = logging::initialize(log_settings); let account_1_account_hash = profiling::account_1_account_hash(); let account_2_account_hash = profiling::account_2_account_hash(); let engine_config = EngineConfig::new().with_use_system_contracts(cfg!(feature = "use-system-contracts")); let mut test_builder = LmdbWasmTestBuilder::open(data_dir, engine_config, root_hash); let mut rng = rand::thread_rng(); for _ in 0..repetitions { let seed: u64 = rng.gen(); let random_bytes_length: usize = rng.gen_range(0, 10_000); let mut random_bytes = vec![0_u8; random_bytes_length]; rng.fill(random_bytes.as_mut_slice()); let deploy = DeployItemBuilder::new() .with_address(account_1_account_hash) .with_deploy_hash(rng.gen()) .with_session_code( HOST_FUNCTION_METRICS_CONTRACT, runtime_args! { ARG_SEED => seed, ARG_OTHERS => (random_bytes, account_1_account_hash, account_2_account_hash), }, ) .with_empty_payment_bytes(runtime_args! { ARG_AMOUNT => PAYMENT_AMOUNT }) .with_authorization_keys(&[account_1_account_hash]) .build(); let exec_request = ExecuteRequestBuilder::new() .push_deploy(deploy.clone()) .build(); test_builder.exec(exec_request); // Should revert with User error 10. let error_msg = test_builder .exec_error_message(0) .expect("should have error message"); assert!( error_msg.contains(&format!("{:?}", ApiError::User(EXPECTED_REVERT_VALUE))), error_msg ); } } #[derive(Debug)] struct Metrics { duration: String, others: BTreeMap<String, String>, } fn gather_metrics(stdout: String) -> BTreeMap<String, Vec<Metrics>> { const PAYLOAD_KEY: &str = "payload="; const DESCRIPTION_KEY: &str = "description"; const HOST_FUNCTION_PREFIX: &str = "host_function_"; const PROPERTIES_KEY: &str = "properties"; const DURATION_KEY: &str = "duration_in_seconds"; const MESSAGE_KEY: &str = "message"; const MESSAGE_TEMPLATE_KEY: &str = "message_template"; let mut result = BTreeMap::new(); for line in stdout.lines() { if let Some(index) = line.find(PAYLOAD_KEY) { let (_, payload_slice) = line.split_at(index + PAYLOAD_KEY.len()); let mut payload = serde_json::from_str::<Value>(payload_slice).expect("payload should parse as JSON"); let description = payload .get_mut(DESCRIPTION_KEY) .expect("payload should have description field") .take(); let function_id = description .as_str() .expect("description field should parse as string") .split(' ') .next() .expect("description field should consist of function name followed by a space"); if !function_id.starts_with(HOST_FUNCTION_PREFIX) { continue; } let function_name = function_id .split_at(HOST_FUNCTION_PREFIX.len()) .1 .to_string(); let metrics_vec = result.entry(function_name).or_insert_with(Vec::new); let mut properties: BTreeMap<String, String> = serde_json::from_value( payload .get_mut(PROPERTIES_KEY) .expect("payload should have properties field") .take(), ) .expect("properties should parse as pairs of strings"); let duration = properties .remove(DURATION_KEY) .expect("properties should have a duration entry"); let _ = properties.remove(MESSAGE_KEY); let _ = properties.remove(MESSAGE_TEMPLATE_KEY); let metrics = Metrics { duration, others: properties, }; metrics_vec.push(metrics); } } result } fn generate_csv(function_name: String, metrics_vec: Vec<Metrics>, output_dir: &Path) { let file_path = output_dir.join(format!("{}.csv", function_name)); let mut file = File::create(&file_path) .unwrap_or_else(|_| panic!("should create {}", file_path.display())); writeln!(file, "{}", CSV_HEADER) .unwrap_or_else(|_| panic!("should write to {}", file_path.display())); for metrics in metrics_vec { write!(file, "\"(").unwrap_or_else(|_| panic!("should write to {}", file_path.display())); for (_metric_key, metric_value) in metrics.others { write!(file, "{},", metric_value) .unwrap_or_else(|_| panic!("should write to {}", file_path.display())); } writeln!(file, ")\",1,{}", metrics.duration) .unwrap_or_else(|_| panic!("should write to {}", file_path.display())); } } fn main() { let args = Args::new(); // If the required initial root hash wasn't passed as a command line arg, expect to read it in // from stdin to allow for it to be piped from the output of 'state-initializer'. let (root_hash, root_hash_read_from_stdin) = match args.root_hash { Some(root_hash) => (root_hash, false), None => { let mut input = String::new(); let _ = io::stdin().read_line(&mut input); (input.trim_end().to_string(), true) } }; // We're running as a subprocess - execute the test to output the metrics to stdout. if args.execute_as_subprocess { return run_test( profiling::parse_hash(&root_hash), args.repetitions, &args.data_dir, ); } // We're running as the top-level process - invoke the current exe as a subprocess to capture // its stdout. let subprocess_flag = format!("--{}", EXECUTE_AS_SUBPROCESS_ARG); let mut subprocess_args = env::args().chain(iter::once(subprocess_flag)); let mut subprocess = Command::new( subprocess_args .next() .expect("should get current executable's full path"), ); subprocess.args(subprocess_args); if root_hash_read_from_stdin { subprocess.arg(root_hash); } let subprocess_output = subprocess .output() .expect("should run current executable as a subprocess"); let stdout = String::from_utf8(subprocess_output.stdout).expect("should be valid UTF-8"); if !subprocess_output.status.success() { let stderr = String::from_utf8(subprocess_output.stderr).expect("should be valid UTF-8"); panic!( "\nFailed to execute as subprocess:\n{}\n\n{}\n\n", stdout, stderr ); } let all_metrics = gather_metrics(stdout); let output_dir = &args.output_dir; fs::create_dir_all(output_dir) .unwrap_or_else(|_| panic!("should create {}", output_dir.display())); for (function_id, metrics_vec) in all_metrics { generate_csv(function_id, metrics_vec, &args.output_dir); } }
36.04491
100
0.63394
676281309c3fac02e24e52dc74ba710fc33a4802
6,311
// Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Polkadot is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. //! Weights for the Democracy Pallet //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc5 use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; pub struct WeightInfo; impl pallet_democracy::WeightInfo for WeightInfo { fn propose() -> Weight { (49113000 as Weight) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(3 as Weight)) } fn second(s: u32, ) -> Weight { (42067000 as Weight) .saturating_add((220000 as Weight).saturating_mul(s as Weight)) .saturating_add(DbWeight::get().reads(1 as Weight)) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn vote_new(r: u32, ) -> Weight { (54159000 as Weight) .saturating_add((252000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(3 as Weight)) .saturating_add(DbWeight::get().writes(3 as Weight)) } fn vote_existing(r: u32, ) -> Weight { (54145000 as Weight) .saturating_add((262000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(3 as Weight)) .saturating_add(DbWeight::get().writes(3 as Weight)) } fn emergency_cancel() -> Weight { (31071000 as Weight) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(2 as Weight)) } fn external_propose(v: u32, ) -> Weight { (14282000 as Weight) .saturating_add((109000 as Weight).saturating_mul(v as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn external_propose_majority() -> Weight { (3478000 as Weight) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn external_propose_default() -> Weight { (3442000 as Weight) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn fast_track() -> Weight { (30820000 as Weight) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(3 as Weight)) } fn veto_external(v: u32, ) -> Weight { (30971000 as Weight) .saturating_add((184000 as Weight).saturating_mul(v as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(2 as Weight)) } fn cancel_referendum() -> Weight { (20431000 as Weight) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn cancel_queued(r: u32, ) -> Weight { (42438000 as Weight) .saturating_add((3284000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(2 as Weight)) } fn on_initialize_base(r: u32, ) -> Weight { (70826000 as Weight) .saturating_add((10716000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(6 as Weight)) .saturating_add(DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(DbWeight::get().writes(5 as Weight)) } fn delegate(r: u32, ) -> Weight { (72046000 as Weight) .saturating_add((7837000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(4 as Weight)) .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(DbWeight::get().writes(4 as Weight)) .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn undelegate(r: u32, ) -> Weight { (41028000 as Weight) .saturating_add((7810000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(DbWeight::get().writes(2 as Weight)) .saturating_add(DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn clear_public_proposals() -> Weight { (3643000 as Weight) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn note_preimage(b: u32, ) -> Weight { (46629000 as Weight) .saturating_add((4000 as Weight).saturating_mul(b as Weight)) .saturating_add(DbWeight::get().reads(1 as Weight)) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn note_imminent_preimage(b: u32, ) -> Weight { (31147000 as Weight) .saturating_add((3000 as Weight).saturating_mul(b as Weight)) .saturating_add(DbWeight::get().reads(1 as Weight)) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn reap_preimage(b: u32, ) -> Weight { (42848000 as Weight) .saturating_add((3000 as Weight).saturating_mul(b as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(1 as Weight)) } fn unlock_remove(r: u32, ) -> Weight { (45333000 as Weight) .saturating_add((171000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(3 as Weight)) .saturating_add(DbWeight::get().writes(3 as Weight)) } fn unlock_set(r: u32, ) -> Weight { (44424000 as Weight) .saturating_add((291000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(3 as Weight)) .saturating_add(DbWeight::get().writes(3 as Weight)) } fn remove_vote(r: u32, ) -> Weight { (28250000 as Weight) .saturating_add((283000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(2 as Weight)) } fn remove_other_vote(r: u32, ) -> Weight { (28250000 as Weight) .saturating_add((283000 as Weight).saturating_mul(r as Weight)) .saturating_add(DbWeight::get().reads(2 as Weight)) .saturating_add(DbWeight::get().writes(2 as Weight)) } }
40.197452
85
0.710505
39180043beb1152ce786b3002f2a9ae6b987be0e
461
// Copyright (C) 2019-2021 Calcu Network Technologies Ltd. // This file is part of Calcu. pub use sc_executor::NativeExecutor; use sc_executor::native_executor_instance; // Declare an instance of the native executor named `Executor`. Include the wasm binary as the // equivalent wasm code. native_executor_instance!( pub Executor, calcu_runtime::api::dispatch, calcu_runtime::native_version, frame_benchmarking::benchmarking::HostFunctions, );
32.928571
94
0.772234
b906bbd39fc88ff233a1ebc3bade8db1178982c6
1,655
//! Options which can be passed to various `Docker` commands. use url::form_urlencoded; /// Options for `Docker::containers`. This uses a "builder" pattern, so /// most methods will consume the object and return a new one. #[derive(Debug, Clone, Default)] pub struct ContainerListOptions { all: bool, //before: Option<String>, //filter: Filter, latest: bool, limit: Option<u64>, //since: Option<String>, size: bool, } impl ContainerListOptions { /// Return all containers, including stopped ones. pub fn all(mut self) -> Self { self.all = true; self } /// Return just the most-recently-started container (even if it has /// stopped). pub fn latest(mut self) -> Self { self.latest = true; self } /// Limit the number of containers we return. pub fn limit(mut self, n: u64) -> Self { self.limit = Some(n); self } /// Calculate the total file sizes for our containers. **WARNING:** /// This is very expensive. pub fn size(mut self) -> Self { self.size = true; self } /// Convert to URL parameters. pub fn to_url_params(&self) -> String { let mut params = form_urlencoded::Serializer::new(String::new()); if self.all { params.append_pair("all", "1"); } if self.latest { params.append_pair("latest", "1"); } if let Some(limit) = self.limit { params.append_pair("limit", &limit.to_string()); } if self.size { params.append_pair("size", "1"); } params.finish() } }
26.269841
73
0.57281
67d93d2643330bbcb295d280eabe1d65bbbccc3d
1,295
#![cfg(feature = "runtime-benchmarks")] use super::*; use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_std::prelude::*; use frame_support::{traits::{StoredMap}}; use pallet_balances::AccountData; use orml_traits::MultiCurrencyExtended; const SEED: u32 = 1; fn funded_account<T: Config>(name: &'static str, index: u32) -> T::AccountId { let caller: T::AccountId = account(name, index, SEED); <T as StoredMap<T::AccountId, T::AccountData>>::insert(who, AccountData { free, ..Default::default() }).unwrap(); // T::Currency::update_balance(1, &caller, 1_000_000_000_000_000).unwrap(); caller } benchmarks! { insert_feed_account { let origin = T::FeedOrigin::successful_origin(); let BOB = funded_account::<T>("caller", 1); let call = Call::<T>::insert_feed_account(vec![BOB]); }: {call.dispatch_bypass_filter(origin)?} verify { assert_eq!(T::BaseCurrency::free_balance(&BOB), 0); } } #[cfg(test)] mod tests { use super::*; use crate::mock::{new_test_ext, Test}; use frame_support::assert_ok; #[test] fn test_benchmarks() { new_test_ext().execute_with(|| { println!("bench"); assert_ok!(test_benchmark_insert_feed_account::<Test>()); }); } }
25.9
118
0.664093
22ae0fd55bc2045869404aa932735d6b1a5ed30f
6,874
#[cfg(feature = "realtime")] pub mod realtime; // mod translated_pairs { MAP: FxHashMap<&str, FxHashMap<&str, &str>> } include!("../target/generated/translated-pairs.rs"); // mod company { MAP: FxHashMap<&str, FxHashMap<CompanyMessageId, &str>>, ... } include!("../target/generated/company.rs"); // mod primer { MAP: FxHashMap<&str, FxHashMap<PrimerId, &str>>, ... } include!("../target/generated/primer.rs"); // mod function_docs { MAP: FxHashMap<&str, FxHashMap<FunctionDocId, &str>>, ... } include!("../target/generated/function_docs.rs"); pub use company::CompanyMessageId; pub use function_docs::FunctionDocId; pub use primer::PrimerId; use once_cell::sync::Lazy; use parking_lot::RwLock; /// Global language, improves translation ergonomics at great cost static LANG: Lazy<RwLock<String>> = Lazy::new(<_>::default); /// Translations section of Robo Instructus credits pub const CREDITS: &str = include_str!("../credits.txt"); /// Sets the global target translation language pub fn set_language_target(lang: &str) { *LANG.write() = lang.to_owned(); } #[inline] pub fn language_target<T, F: FnOnce(&str) -> T>(fun: F) -> T { let lang = &*LANG.read(); if lang.trim().is_empty() { fun("en") } else { fun(lang) } } /// Translates english text into the global target language #[inline] pub fn translate(en: &str) -> &str { translate_to(&*LANG.read(), en) } /// Translates english text into the input target language pub fn translate_to<'a>(lang: &str, en: &'a str) -> &'a str { if lang == "en" || en.trim().is_empty() || lang.trim().is_empty() { en } else if let Some(translated) = translated_pairs::MAP.get(lang).and_then(|l| l.get(en)) { translated } else { #[cfg(feature = "realtime")] realtime::notify_unknown(lang, en); en } } /// Fetches global target language company message with the matching `key`, or falls back on /// lang='en'. #[inline] pub fn company(c: CompanyMessageId) -> &'static str { company_lang(LANG.read().as_str(), c) } /// Fetches `lang` company message with the matching `key`, or falls back on lang='en'. pub fn company_lang(lang: &str, c: CompanyMessageId) -> &'static str { company::MAP .get(lang) .and_then(|company| company.get(&c).copied()) .unwrap_or_else(|| company::MAP["en"][&c]) } /// Fetches global target language primer section with the matching `key`, or falls back on /// lang='en'. #[inline] pub fn primer(c: PrimerId) -> &'static str { primer_lang(LANG.read().as_str(), c) } /// Fetches `lang` primer section with the matching `key`, or falls back on lang='en'. pub fn primer_lang(lang: &str, c: PrimerId) -> &'static str { primer::MAP.get(lang).and_then(|p| p.get(&c).copied()).unwrap_or_else(|| primer::MAP["en"][&c]) } /// Fetches global target language function doc with the matching `key`, /// or falls back on lang='en'. #[inline] pub fn function_docs(id: FunctionDocId) -> &'static str { function_docs_lang(LANG.read().as_str(), id) } /// Fetches `lang` function doc with the matching `key`, or falls back on lang='en'. pub fn function_docs_lang(lang: &str, id: FunctionDocId) -> &'static str { function_docs::MAP .get(lang) .and_then(|p| p.get(&id).copied()) .unwrap_or_else(|| function_docs::MAP["en"][&id]) } #[test] fn translate_ru() { assert_eq!(&*translate_to("ru", "Begin"), "Начать"); } #[test] fn translate_pl() { assert_eq!(&*translate_to("pl", "Begin"), "Rozpocznij"); } #[test] fn company_en() { assert_eq!(company_lang("en", CompanyMessageId::Acknowledge), "Acknowledge"); // en should include every CompanyMessageId assert!(!company_lang("en", CompanyMessageId::Receiving).is_empty()); assert!(!company_lang("en", CompanyMessageId::Await).is_empty()); assert!(!company_lang("en", CompanyMessageId::Arrive).is_empty()); assert!(!company_lang("en", CompanyMessageId::Underground).is_empty()); assert!(!company_lang("en", CompanyMessageId::Lower).is_empty()); assert!(!company_lang("en", CompanyMessageId::Final).is_empty()); assert!(!company_lang("en", CompanyMessageId::Promotion).is_empty()); } #[test] fn company_fallback() { assert_eq!(company_lang("nosuch", CompanyMessageId::Receiving), "Receiving Communication"); } #[test] fn company_await_is_3_lines() { for (lang, c) in &*company::MAP { if let Some(text) = c.get(&CompanyMessageId::Await) { let new_lines = text.chars().filter(|c| *c == '\n').count(); assert_eq!(new_lines, 2, "`{}` has invalid CompanyMessageId::Await", lang); } } } #[test] #[rustfmt::skip] fn primer_en() { const EXPECTED_COMMENTS_PRIMER: &str = "# Comments\n\ \n\ Text after a '`#`' symbol will not be used as code, \ these are just comments used to make notes.\n\ \n\ ```no_run\n\ robo_left() # I think I'm starting to get this\n\ ```"; assert_eq!(primer_lang("en", PrimerId::Comments), EXPECTED_COMMENTS_PRIMER); // en should include all use PrimerId::*; for id in [ Loops, Comments, Conditionals, Variables, Conditionals2, Is, Comparison, Conditionals3, ElseIf, Scope, Loops2, Loops3, Fun, FunB, Fun2, Bool, Seq, SeqB, LoopSeq, Fun3, DotCall ].iter() { assert!(!primer_lang("en", *id).trim().is_empty(), "{:?} empty", id); } } #[test] fn primer_fallback() { assert!(primer_lang("nosuch", PrimerId::Comments).starts_with("# Comments")); } #[test] #[rustfmt::skip] fn function_docs_en() { const EXPECTED: &str = "`robo_use()` Operates on the current tile returning tile specific \ data, or `0` otherwise. Runtime $tu{robo_use()} ms\n\ $render{robo_use}"; assert_eq!(function_docs_lang("en", FunctionDocId::Use), EXPECTED); // en should include all use FunctionDocId::*; for id in [ LeftForward, Scan, ScanU1, ScanU2, ScanU3, ScanU4, Use, UseU1, UseU2, UseU3, UseU4, ForwardLocation, Location, DetectAdjacent, Detect3, Detect3L, Probo, ProboScanU1, Transmit, ProboUse, ShortLeft, ShortForward, ShortScan, ShortScanU1, ShortScanU2, ShortScanU3, ShortScanU4, ShortUse, ShortUseU2, ShortUseU3, ShortUseU4, ShortDetectAdjacent, ShortLocation, ShortForwardLocation, ShortDetect3, ShortDetect3L, ShortProboLeft, ShortProboForward, ShortProboScan, ShortProboLocation, ShortProboUse, ShortTransmit, ShortReceive, ].iter() { assert!(!function_docs_lang("en", *id).trim().is_empty(), "{:?} empty", id); } }
35.802083
99
0.626418
01b187c9291c05eadf55ae2ab5d93b81c74ae297
12,134
// Copyright Materialize, Inc. and contributors. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. use std::time::Duration; use anyhow::{anyhow, Context}; use rdkafka::admin::{AdminClient, AdminOptions, NewTopic, ResourceSpecifier, TopicReplication}; use mz_dataflow_types::connections::ConnectionContext; use mz_dataflow_types::sinks::{ KafkaSinkConnection, KafkaSinkConnectionBuilder, KafkaSinkConnectionRetention, KafkaSinkConsistencyConnection, PersistSinkConnection, PersistSinkConnectionBuilder, PublishedSchemaInfo, SinkConnection, SinkConnectionBuilder, }; use mz_kafka_util::client::{create_new_client_config, MzClientContext}; use mz_ore::collections::CollectionExt; use mz_repr::GlobalId; use crate::error::CoordError; pub async fn build( builder: SinkConnectionBuilder, id: GlobalId, connector_context: ConnectionContext, ) -> Result<SinkConnection, CoordError> { match builder { SinkConnectionBuilder::Kafka(k) => build_kafka(k, id, connector_context).await, SinkConnectionBuilder::Persist(p) => build_persist_sink(p, id), } } async fn register_kafka_topic( client: &AdminClient<MzClientContext>, topic: &str, mut partition_count: i32, mut replication_factor: i32, succeed_if_exists: bool, retention: KafkaSinkConnectionRetention, ) -> Result<(), CoordError> { // if either partition count or replication factor should be defaulted to the broker's config // (signaled by a value of -1), explicitly poll the broker to discover the defaults. // Newer versions of Kafka can instead send create topic requests with -1 and have this happen // behind the scenes, but this is unsupported and will result in errors on pre-2.4 Kafka if partition_count == -1 || replication_factor == -1 { let metadata = client .inner() .fetch_metadata(None, Duration::from_secs(5)) .with_context(|| { format!( "error fetching metadata when creating new topic {} for sink", topic ) })?; if metadata.brokers().len() == 0 { coord_bail!("zero brokers discovered in metadata request"); } let broker = metadata.brokers()[0].id(); let configs = client .describe_configs( &[ResourceSpecifier::Broker(broker)], &AdminOptions::new().request_timeout(Some(Duration::from_secs(5))), ) .await .with_context(|| { format!( "error fetching configuration from broker {} when creating new topic {} for sink", broker, topic ) })?; if configs.len() != 1 { coord_bail!( "error creating topic {} for sink: broker {} returned {} config results, but one was expected", topic, broker, configs.len() ); } let config = configs.into_element().map_err(|e| { anyhow!( "error reading broker configuration when creating topic {} for sink: {}", topic, e ) })?; for entry in config.entries { if entry.name == "num.partitions" && partition_count == -1 { if let Some(s) = entry.value { partition_count = s.parse::<i32>().with_context(|| { format!( "default partition count {} cannot be parsed into an integer", s ) })?; } } else if entry.name == "default.replication.factor" && replication_factor == -1 { if let Some(s) = entry.value { replication_factor = s.parse::<i32>().with_context(|| { format!( "default replication factor {} cannot be parsed into an integer", s ) })?; } } } if partition_count == -1 { coord_bail!("default was requested for partition_count, but num.partitions was not found in broker config"); } if replication_factor == -1 { coord_bail!("default was requested for replication_factor, but default.replication.factor was not found in broker config"); } } let mut kafka_topic = NewTopic::new( &topic, partition_count, TopicReplication::Fixed(replication_factor), ); let retention_ms_str = match retention.duration { Some(Some(d)) => Some(d.as_millis().to_string()), Some(None) => Some("-1".to_string()), None => None, }; let retention_bytes_str = retention.bytes.map(|s| s.to_string()); if let Some(ref retention_ms) = retention_ms_str { kafka_topic = kafka_topic.set("retention.ms", retention_ms); } if let Some(ref retention_bytes) = retention_bytes_str { kafka_topic = kafka_topic.set("retention.bytes", retention_bytes); } if succeed_if_exists { mz_kafka_util::admin::ensure_topic( client, &AdminOptions::new().request_timeout(Some(Duration::from_secs(5))), &kafka_topic, ) .await } else { mz_kafka_util::admin::create_new_topic( client, &AdminOptions::new().request_timeout(Some(Duration::from_secs(5))), &kafka_topic, ) .await } .with_context(|| format!("Error creating topic {} for sink", topic))?; Ok(()) } /// Publish value and optional key schemas for a given topic. /// /// TODO(benesch): do we need to delete the Kafka topic if publishing the // schema fails? async fn publish_kafka_schemas( ccsr: &mz_ccsr::Client, topic: &str, key_schema: Option<&str>, key_schema_type: Option<mz_ccsr::SchemaType>, value_schema: &str, value_schema_type: mz_ccsr::SchemaType, ) -> Result<(Option<i32>, i32), CoordError> { let value_schema_id = ccsr .publish_schema( &format!("{}-value", topic), value_schema, value_schema_type, &[], ) .await .context("unable to publish value schema to registry in kafka sink")?; let key_schema_id = if let Some(key_schema) = key_schema { let key_schema_type = key_schema_type.ok_or_else(|| { CoordError::Unstructured(anyhow!("expected schema type for key schema")) })?; Some( ccsr.publish_schema(&format!("{}-key", topic), key_schema, key_schema_type, &[]) .await .context("unable to publish key schema to registry in kafka sink")?, ) } else { None }; Ok((key_schema_id, value_schema_id)) } async fn build_kafka( builder: KafkaSinkConnectionBuilder, id: GlobalId, connector_context: ConnectionContext, ) -> Result<SinkConnection, CoordError> { let maybe_append_nonce = { let reuse_topic = builder.reuse_topic; let topic_suffix_nonce = builder.topic_suffix_nonce; move |topic: &str| { if reuse_topic { topic.to_string() } else { format!("{}-{}-{}", topic, id, topic_suffix_nonce) } } }; let topic = maybe_append_nonce(&builder.topic_prefix); // Create Kafka topic let mut config = create_new_client_config(connector_context.librdkafka_log_level); config.set("bootstrap.servers", &builder.broker_addrs.to_string()); for (k, v) in builder.config_options.iter() { // Explicitly reject the statistics interval option here because its not // properly supported for this client. // Explicitly reject isolation.level as it's a consumer-specific // parameter and will generate a benign WARN for admin clients if k != "statistics.interval.ms" && k != "isolation.level" { config.set(k, v); } } let client: AdminClient<_> = config .create_with_context(MzClientContext) .context("creating admin client failed")?; register_kafka_topic( &client, &topic, builder.partition_count, builder.replication_factor, builder.reuse_topic, builder.retention, ) .await .context("error registering kafka topic for sink")?; let published_schema_info = match builder.format { mz_dataflow_types::sinks::KafkaSinkFormat::Avro { key_schema, value_schema, ccsr_config, .. } => { let ccsr = ccsr_config.build()?; let (key_schema_id, value_schema_id) = publish_kafka_schemas( &ccsr, &topic, key_schema.as_deref(), Some(mz_ccsr::SchemaType::Avro), &value_schema, mz_ccsr::SchemaType::Avro, ) .await .context("error publishing kafka schemas for sink")?; Some(PublishedSchemaInfo { key_schema_id, value_schema_id, }) } mz_dataflow_types::sinks::KafkaSinkFormat::Json => None, }; let consistency = match builder.consistency_format { Some(mz_dataflow_types::sinks::KafkaSinkFormat::Avro { value_schema, ccsr_config, .. }) => { let consistency_topic = maybe_append_nonce( builder .consistency_topic_prefix .as_ref() .expect("known to exist"), ); // create consistency topic/schema and retrieve schema id register_kafka_topic( &client, &consistency_topic, 1, builder.replication_factor, builder.reuse_topic, KafkaSinkConnectionRetention::default(), ) .await .context("error registering kafka consistency topic for sink")?; let ccsr = ccsr_config.build()?; let (_, consistency_schema_id) = publish_kafka_schemas( &ccsr, &consistency_topic, None, None, &value_schema, mz_ccsr::SchemaType::Avro, ) .await .context("error publishing kafka consistency schemas for sink")?; Some(KafkaSinkConsistencyConnection { topic: consistency_topic, schema_id: consistency_schema_id, }) } Some(other) => unreachable!("non-Avro consistency format for Kafka sink {:#?}", &other), _ => None, }; Ok(SinkConnection::Kafka(KafkaSinkConnection { topic, topic_prefix: builder.topic_prefix, addrs: builder.broker_addrs, relation_key_indices: builder.relation_key_indices, key_desc_and_indices: builder.key_desc_and_indices, value_desc: builder.value_desc, published_schema_info, consistency, exactly_once: builder.reuse_topic, transitive_source_dependencies: builder.transitive_source_dependencies, fuel: builder.fuel, config_options: builder.config_options, })) } fn build_persist_sink( builder: PersistSinkConnectionBuilder, _id: GlobalId, ) -> Result<SinkConnection, CoordError> { Ok(SinkConnection::Persist(PersistSinkConnection { consensus_uri: builder.consensus_uri, blob_uri: builder.blob_uri, shard_id: builder.shard_id, value_desc: builder.value_desc, })) }
34.867816
135
0.582825
23b96ee4268e7e7a406448e1e85c27dfa740a0fc
27,065
use std::env; fn main() { println!("cargo:rerun-if-changed=build.rs"); let target = env::var("TARGET").unwrap(); let cwd = env::current_dir().unwrap(); println!("cargo:compiler-rt={}", cwd.join("compiler-rt").display()); // Activate libm's unstable features to make full use of Nightly. println!("cargo:rustc-cfg=feature=\"unstable\""); // Emscripten's runtime includes all the builtins if target.contains("emscripten") { return; } // OpenBSD provides compiler_rt by default, use it instead of rebuilding it from source if target.contains("openbsd") { println!("cargo:rustc-link-search=native=/usr/lib"); println!("cargo:rustc-link-lib=compiler_rt"); return; } // Forcibly enable memory intrinsics on wasm & SGX as we don't have a libc to // provide them. if (target.contains("wasm") && !target.contains("wasi")) || (target.contains("sgx") && target.contains("fortanix")) || target.contains("-none") || target.contains("nvptx") || target.contains("bpf") || target.contains("sbf") { println!("cargo:rustc-cfg=feature=\"mem\""); } // These targets have hardware unaligned access support. if target.contains("x86_64") || target.contains("i686") || target.contains("aarch64") || target.contains("bpf") { println!("cargo:rustc-cfg=feature=\"mem-unaligned\""); } // NOTE we are going to assume that llvm-target, what determines our codegen option, matches the // target triple. This is usually correct for our built-in targets but can break in presence of // custom targets, which can have arbitrary names. let llvm_target = target.split('-').collect::<Vec<_>>(); // Build missing intrinsics from compiler-rt C source code. If we're // mangling names though we assume that we're also in test mode so we don't // build anything and we rely on the upstream implementation of compiler-rt // functions if !cfg!(feature = "mangled-names") && cfg!(feature = "c") { // Don't use a C compiler for these targets: // // * wasm - clang for wasm is somewhat hard to come by and it's // unlikely that the C is really that much better than our own Rust. // * nvptx - everything is bitcode, not compatible with mixed C/Rust // * riscv - the rust-lang/rust distribution container doesn't have a C // compiler nor is cc-rs ready for compilation to riscv (at this // time). This can probably be removed in the future if !target.contains("wasm") && !target.contains("nvptx") && !target.starts_with("riscv") { #[cfg(feature = "c")] c::compile(&llvm_target, &target); } } // To compile intrinsics.rs for thumb targets, where there is no libc if llvm_target[0].starts_with("thumb") { println!("cargo:rustc-cfg=thumb") } // compiler-rt `cfg`s away some intrinsics for thumbv6m and thumbv8m.base because // these targets do not have full Thumb-2 support but only original Thumb-1. // We have to cfg our code accordingly. if llvm_target[0] == "thumbv6m" || llvm_target[0] == "thumbv8m.base" { println!("cargo:rustc-cfg=thumb_1") } // Only emit the ARM Linux atomic emulation on pre-ARMv6 architectures. This // includes the old androideabi. It is deprecated but it is available as a // rustc target (arm-linux-androideabi). if llvm_target[0] == "armv4t" || llvm_target[0] == "armv5te" || llvm_target.get(2) == Some(&"androideabi") { println!("cargo:rustc-cfg=kernel_user_helpers") } } #[cfg(feature = "c")] mod c { extern crate cc; use std::collections::{BTreeMap, HashSet}; use std::env; use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; struct Sources { // SYMBOL -> PATH TO SOURCE map: BTreeMap<&'static str, &'static str>, } impl Sources { fn new() -> Sources { Sources { map: BTreeMap::new(), } } fn extend(&mut self, sources: &[(&'static str, &'static str)]) { // NOTE Some intrinsics have both a generic implementation (e.g. // `floatdidf.c`) and an arch optimized implementation // (`x86_64/floatdidf.c`). In those cases, we keep the arch optimized // implementation and discard the generic implementation. If we don't // and keep both implementations, the linker will yell at us about // duplicate symbols! for (symbol, src) in sources { if src.contains("/") { // Arch-optimized implementation (preferred) self.map.insert(symbol, src); } else { // Generic implementation if !self.map.contains_key(symbol) { self.map.insert(symbol, src); } } } } fn remove(&mut self, symbols: &[&str]) { for symbol in symbols { self.map.remove(*symbol).unwrap(); } } } /// Compile intrinsics from the compiler-rt C source code pub fn compile(llvm_target: &[&str], target: &String) { let target_arch = env::var("CARGO_CFG_TARGET_ARCH").unwrap(); let target_env = env::var("CARGO_CFG_TARGET_ENV").unwrap(); let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap(); let target_vendor = env::var("CARGO_CFG_TARGET_VENDOR").unwrap(); let mut consider_float_intrinsics = true; let cfg = &mut cc::Build::new(); // AArch64 GCCs exit with an error condition when they encounter any kind of floating point // code if the `nofp` and/or `nosimd` compiler flags have been set. // // Therefore, evaluate if those flags are present and set a boolean that causes any // compiler-rt intrinsics that contain floating point source to be excluded for this target. if target_arch == "aarch64" { let cflags_key = String::from("CFLAGS_") + &(target.to_owned().replace("-", "_")); if let Ok(cflags_value) = env::var(cflags_key) { if cflags_value.contains("+nofp") || cflags_value.contains("+nosimd") { consider_float_intrinsics = false; } } } cfg.warnings(false); if target_env == "msvc" { // Don't pull in extra libraries on MSVC cfg.flag("/Zl"); // Emulate C99 and C++11's __func__ for MSVC prior to 2013 CTP cfg.define("__func__", Some("__FUNCTION__")); } else { // Turn off various features of gcc and such, mostly copying // compiler-rt's build system already cfg.flag("-fno-builtin"); cfg.flag("-fvisibility=hidden"); cfg.flag("-ffreestanding"); // Avoid the following warning appearing once **per file**: // clang: warning: optimization flag '-fomit-frame-pointer' is not supported for target 'armv7' [-Wignored-optimization-argument] // // Note that compiler-rt's build system also checks // // `check_cxx_compiler_flag(-fomit-frame-pointer COMPILER_RT_HAS_FOMIT_FRAME_POINTER_FLAG)` // // in https://github.com/rust-lang/compiler-rt/blob/c8fbcb3/cmake/config-ix.cmake#L19. cfg.flag_if_supported("-fomit-frame-pointer"); cfg.define("VISIBILITY_HIDDEN", None); } let mut sources = Sources::new(); sources.extend(&[ ("__absvdi2", "absvdi2.c"), ("__absvsi2", "absvsi2.c"), ("__addvdi3", "addvdi3.c"), ("__addvsi3", "addvsi3.c"), ("apple_versioning", "apple_versioning.c"), ("__clzdi2", "clzdi2.c"), ("__clzsi2", "clzsi2.c"), ("__cmpdi2", "cmpdi2.c"), ("__ctzdi2", "ctzdi2.c"), ("__ctzsi2", "ctzsi2.c"), ("__int_util", "int_util.c"), ("__mulvdi3", "mulvdi3.c"), ("__mulvsi3", "mulvsi3.c"), ("__negdi2", "negdi2.c"), ("__negvdi2", "negvdi2.c"), ("__negvsi2", "negvsi2.c"), ("__paritydi2", "paritydi2.c"), ("__paritysi2", "paritysi2.c"), ("__popcountdi2", "popcountdi2.c"), ("__popcountsi2", "popcountsi2.c"), ("__subvdi3", "subvdi3.c"), ("__subvsi3", "subvsi3.c"), ("__ucmpdi2", "ucmpdi2.c"), ]); if consider_float_intrinsics { sources.extend(&[ ("__divdc3", "divdc3.c"), ("__divsc3", "divsc3.c"), ("__divxc3", "divxc3.c"), ("__extendhfsf2", "extendhfsf2.c"), ("__muldc3", "muldc3.c"), ("__mulsc3", "mulsc3.c"), ("__mulxc3", "mulxc3.c"), ("__negdf2", "negdf2.c"), ("__negsf2", "negsf2.c"), ("__powixf2", "powixf2.c"), ("__truncdfhf2", "truncdfhf2.c"), ("__truncsfhf2", "truncsfhf2.c"), ]); } // When compiling in rustbuild (the rust-lang/rust repo) this library // also needs to satisfy intrinsics that jemalloc or C in general may // need, so include a few more that aren't typically needed by // LLVM/Rust. if cfg!(feature = "rustbuild") { sources.extend(&[("__ffsdi2", "ffsdi2.c")]); } // On iOS and 32-bit OSX these are all just empty intrinsics, no need to // include them. if target_os != "ios" && (target_vendor != "apple" || target_arch != "x86") { sources.extend(&[ ("__absvti2", "absvti2.c"), ("__addvti3", "addvti3.c"), ("__clzti2", "clzti2.c"), ("__cmpti2", "cmpti2.c"), ("__ctzti2", "ctzti2.c"), ("__ffsti2", "ffsti2.c"), ("__mulvti3", "mulvti3.c"), ("__negti2", "negti2.c"), ("__parityti2", "parityti2.c"), ("__popcountti2", "popcountti2.c"), ("__subvti3", "subvti3.c"), ("__ucmpti2", "ucmpti2.c"), ]); if consider_float_intrinsics { sources.extend(&[("__negvti2", "negvti2.c")]); } } if target_vendor == "apple" { sources.extend(&[ ("atomic_flag_clear", "atomic_flag_clear.c"), ("atomic_flag_clear_explicit", "atomic_flag_clear_explicit.c"), ("atomic_flag_test_and_set", "atomic_flag_test_and_set.c"), ( "atomic_flag_test_and_set_explicit", "atomic_flag_test_and_set_explicit.c", ), ("atomic_signal_fence", "atomic_signal_fence.c"), ("atomic_thread_fence", "atomic_thread_fence.c"), ]); } if target_env == "msvc" { if target_arch == "x86_64" { sources.extend(&[ ("__floatdisf", "x86_64/floatdisf.c"), ("__floatdixf", "x86_64/floatdixf.c"), ]); } } else { // None of these seem to be used on x86_64 windows, and they've all // got the wrong ABI anyway, so we want to avoid them. if target_os != "windows" { if target_arch == "x86_64" { sources.extend(&[ ("__floatdisf", "x86_64/floatdisf.c"), ("__floatdixf", "x86_64/floatdixf.c"), ("__floatundidf", "x86_64/floatundidf.S"), ("__floatundisf", "x86_64/floatundisf.S"), ("__floatundixf", "x86_64/floatundixf.S"), ]); } } if target_arch == "x86" { sources.extend(&[ ("__ashldi3", "i386/ashldi3.S"), ("__ashrdi3", "i386/ashrdi3.S"), ("__divdi3", "i386/divdi3.S"), ("__floatdidf", "i386/floatdidf.S"), ("__floatdisf", "i386/floatdisf.S"), ("__floatdixf", "i386/floatdixf.S"), ("__floatundidf", "i386/floatundidf.S"), ("__floatundisf", "i386/floatundisf.S"), ("__floatundixf", "i386/floatundixf.S"), ("__lshrdi3", "i386/lshrdi3.S"), ("__moddi3", "i386/moddi3.S"), ("__muldi3", "i386/muldi3.S"), ("__udivdi3", "i386/udivdi3.S"), ("__umoddi3", "i386/umoddi3.S"), ]); } } if target_arch == "arm" && target_os != "ios" && target_env != "msvc" { sources.extend(&[ ("__aeabi_div0", "arm/aeabi_div0.c"), ("__aeabi_drsub", "arm/aeabi_drsub.c"), ("__aeabi_frsub", "arm/aeabi_frsub.c"), ("__bswapdi2", "arm/bswapdi2.S"), ("__bswapsi2", "arm/bswapsi2.S"), ("__clzdi2", "arm/clzdi2.S"), ("__clzsi2", "arm/clzsi2.S"), ("__divmodsi4", "arm/divmodsi4.S"), ("__divsi3", "arm/divsi3.S"), ("__modsi3", "arm/modsi3.S"), ("__switch16", "arm/switch16.S"), ("__switch32", "arm/switch32.S"), ("__switch8", "arm/switch8.S"), ("__switchu8", "arm/switchu8.S"), ("__sync_synchronize", "arm/sync_synchronize.S"), ("__udivmodsi4", "arm/udivmodsi4.S"), ("__udivsi3", "arm/udivsi3.S"), ("__umodsi3", "arm/umodsi3.S"), ]); if target_os == "freebsd" { sources.extend(&[("__clear_cache", "clear_cache.c")]); } // First of all aeabi_cdcmp and aeabi_cfcmp are never called by LLVM. // Second are little-endian only, so build fail on big-endian targets. // Temporally workaround: exclude these files for big-endian targets. if !llvm_target[0].starts_with("thumbeb") && !llvm_target[0].starts_with("armeb") { sources.extend(&[ ("__aeabi_cdcmp", "arm/aeabi_cdcmp.S"), ("__aeabi_cdcmpeq_check_nan", "arm/aeabi_cdcmpeq_check_nan.c"), ("__aeabi_cfcmp", "arm/aeabi_cfcmp.S"), ("__aeabi_cfcmpeq_check_nan", "arm/aeabi_cfcmpeq_check_nan.c"), ]); } } if llvm_target[0] == "armv7" { sources.extend(&[ ("__sync_fetch_and_add_4", "arm/sync_fetch_and_add_4.S"), ("__sync_fetch_and_add_8", "arm/sync_fetch_and_add_8.S"), ("__sync_fetch_and_and_4", "arm/sync_fetch_and_and_4.S"), ("__sync_fetch_and_and_8", "arm/sync_fetch_and_and_8.S"), ("__sync_fetch_and_max_4", "arm/sync_fetch_and_max_4.S"), ("__sync_fetch_and_max_8", "arm/sync_fetch_and_max_8.S"), ("__sync_fetch_and_min_4", "arm/sync_fetch_and_min_4.S"), ("__sync_fetch_and_min_8", "arm/sync_fetch_and_min_8.S"), ("__sync_fetch_and_nand_4", "arm/sync_fetch_and_nand_4.S"), ("__sync_fetch_and_nand_8", "arm/sync_fetch_and_nand_8.S"), ("__sync_fetch_and_or_4", "arm/sync_fetch_and_or_4.S"), ("__sync_fetch_and_or_8", "arm/sync_fetch_and_or_8.S"), ("__sync_fetch_and_sub_4", "arm/sync_fetch_and_sub_4.S"), ("__sync_fetch_and_sub_8", "arm/sync_fetch_and_sub_8.S"), ("__sync_fetch_and_umax_4", "arm/sync_fetch_and_umax_4.S"), ("__sync_fetch_and_umax_8", "arm/sync_fetch_and_umax_8.S"), ("__sync_fetch_and_umin_4", "arm/sync_fetch_and_umin_4.S"), ("__sync_fetch_and_umin_8", "arm/sync_fetch_and_umin_8.S"), ("__sync_fetch_and_xor_4", "arm/sync_fetch_and_xor_4.S"), ("__sync_fetch_and_xor_8", "arm/sync_fetch_and_xor_8.S"), ]); } if llvm_target.last().unwrap().ends_with("eabihf") { if !llvm_target[0].starts_with("thumbv7em") && !llvm_target[0].starts_with("thumbv8m.main") { // The FPU option chosen for these architectures in cc-rs, ie: // -mfpu=fpv4-sp-d16 for thumbv7em // -mfpu=fpv5-sp-d16 for thumbv8m.main // do not support double precision floating points conversions so the files // that include such instructions are not included for these targets. sources.extend(&[ ("__fixdfsivfp", "arm/fixdfsivfp.S"), ("__fixunsdfsivfp", "arm/fixunsdfsivfp.S"), ("__floatsidfvfp", "arm/floatsidfvfp.S"), ("__floatunssidfvfp", "arm/floatunssidfvfp.S"), ]); } sources.extend(&[ ("__fixsfsivfp", "arm/fixsfsivfp.S"), ("__fixunssfsivfp", "arm/fixunssfsivfp.S"), ("__floatsisfvfp", "arm/floatsisfvfp.S"), ("__floatunssisfvfp", "arm/floatunssisfvfp.S"), ("__floatunssisfvfp", "arm/floatunssisfvfp.S"), ("__restore_vfp_d8_d15_regs", "arm/restore_vfp_d8_d15_regs.S"), ("__save_vfp_d8_d15_regs", "arm/save_vfp_d8_d15_regs.S"), ("__negdf2vfp", "arm/negdf2vfp.S"), ("__negsf2vfp", "arm/negsf2vfp.S"), ]); } if target_arch == "aarch64" && consider_float_intrinsics { sources.extend(&[ ("__comparetf2", "comparetf2.c"), ("__extenddftf2", "extenddftf2.c"), ("__extendsftf2", "extendsftf2.c"), ("__fixtfdi", "fixtfdi.c"), ("__fixtfsi", "fixtfsi.c"), ("__fixtfti", "fixtfti.c"), ("__fixunstfdi", "fixunstfdi.c"), ("__fixunstfsi", "fixunstfsi.c"), ("__fixunstfti", "fixunstfti.c"), ("__floatditf", "floatditf.c"), ("__floatsitf", "floatsitf.c"), ("__floatunditf", "floatunditf.c"), ("__floatunsitf", "floatunsitf.c"), ("__trunctfdf2", "trunctfdf2.c"), ("__trunctfsf2", "trunctfsf2.c"), ("__addtf3", "addtf3.c"), ("__multf3", "multf3.c"), ("__subtf3", "subtf3.c"), ("__divtf3", "divtf3.c"), ("__powitf2", "powitf2.c"), ("__fe_getround", "fp_mode.c"), ("__fe_raise_inexact", "fp_mode.c"), ]); if target_os != "windows" { sources.extend(&[("__multc3", "multc3.c")]); } } if target_arch == "mips" { sources.extend(&[("__bswapsi2", "bswapsi2.c")]); } if target_arch == "mips64" { sources.extend(&[ ("__extenddftf2", "extenddftf2.c"), ("__netf2", "comparetf2.c"), ("__addtf3", "addtf3.c"), ("__multf3", "multf3.c"), ("__subtf3", "subtf3.c"), ("__fixtfsi", "fixtfsi.c"), ("__floatsitf", "floatsitf.c"), ("__fixunstfsi", "fixunstfsi.c"), ("__floatunsitf", "floatunsitf.c"), ("__fe_getround", "fp_mode.c"), ("__divtf3", "divtf3.c"), ("__trunctfdf2", "trunctfdf2.c"), ]); } // Remove the assembly implementations that won't compile for the target if llvm_target[0] == "thumbv6m" || llvm_target[0] == "thumbv8m.base" { let mut to_remove = Vec::new(); for (k, v) in sources.map.iter() { if v.ends_with(".S") { to_remove.push(*k); } } sources.remove(&to_remove); // But use some generic implementations where possible sources.extend(&[("__clzdi2", "clzdi2.c"), ("__clzsi2", "clzsi2.c")]) } if llvm_target[0] == "thumbv7m" || llvm_target[0] == "thumbv7em" { sources.remove(&["__aeabi_cdcmp", "__aeabi_cfcmp"]); } if target_arch == "bpf" || target_arch == "sbf" { cfg.define("__ELF__", None); // Add the 128 bit implementations sources.extend(&[ ("__ashlti3", "ashlti3.c"), ("__ashrti3", "ashrti3.c"), ("__divdi3", "divdi3.c"), ("__divmoddi4", "divmoddi4.c"), ("__divmodsi4", "divmodsi4.c"), ("__divsi3", "divsi3.c"), ("__divti3", "divti3.c"), ("__fixdfti", "fixdfti.c"), ("__fixsfti", "fixsfti.c"), ("__fixunsdfti", "fixunsdfti.c"), ("__fixunssfti", "fixunssfti.c"), ("__floattidf", "floattidf.c"), ("__floattisf", "floattisf.c"), ("__floatuntidf", "floatuntidf.c"), ("__floatuntisf", "floatuntisf.c"), ("__lshrti3", "lshrti3.c"), ("__moddi3", "moddi3.c"), ("__modti3", "modti3.c"), ("__muloti4", "muloti4.c"), ("__multi3", "multi3.c"), ("__udivdi3", "udivdi3.c"), ("__udivmoddi4", "udivmoddi4.c"), ("__udivmodsi4", "udivmodsi4.c"), ("__udivmodti4", "sbf/udivmodti4.c"), ("__udivsi3", "udivsi3.c"), ("__udivti3", "udivti3.c"), ("__umoddi3", "umoddi3.c"), ("__umodti3", "umodti3.c"), ]); // Add any other missing builtins sources.extend(&[ ("__floatundidf", "floatundidf.c"), ("__floatundisf", "floatundisf.c"), ]); // Remove the implementations that fail to build. // This list should shrink to zero sources.remove(&[ "__int_util", // Unsupported architecture error "__mulvdi3", // Unsupported signed division "__mulvsi3", // Unsupported signed division ]); } // When compiling the C code we require the user to tell us where the // source code is, and this is largely done so when we're compiling as // part of rust-lang/rust we can use the same llvm-project repository as // rust-lang/rust. let root = match env::var_os("RUST_COMPILER_RT_ROOT") { Some(s) => PathBuf::from(s), None => panic!("RUST_COMPILER_RT_ROOT is not set"), }; if !root.exists() { panic!("RUST_COMPILER_RT_ROOT={} does not exist", root.display()); } // Support deterministic builds by remapping the __FILE__ prefix if the // compiler supports it. This fixes the nondeterminism caused by the // use of that macro in lib/builtins/int_util.h in compiler-rt. cfg.flag_if_supported(&format!("-ffile-prefix-map={}=.", root.display())); // Include out-of-line atomics for aarch64, which are all generated by supplying different // sets of flags to the same source file. // Note: Out-of-line aarch64 atomics are not supported by the msvc toolchain (#430). let src_dir = root.join("lib/builtins"); if target_arch == "aarch64" && target_env != "msvc" { // See below for why we're building these as separate libraries. build_aarch64_out_of_line_atomics_libraries(&src_dir, cfg); // Some run-time CPU feature detection is necessary, as well. sources.extend(&[("__aarch64_have_lse_atomics", "cpu_model.c")]); } let mut added_sources = HashSet::new(); for (sym, src) in sources.map.iter() { let src = src_dir.join(src); if added_sources.insert(src.clone()) { cfg.file(&src); println!("cargo:rerun-if-changed={}", src.display()); } println!("cargo:rustc-cfg={}=\"optimized-c\"", sym); } cfg.compile("libcompiler-rt.a"); } fn build_aarch64_out_of_line_atomics_libraries(builtins_dir: &Path, cfg: &mut cc::Build) { let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); let outlined_atomics_file = builtins_dir.join("aarch64/lse.S"); println!("cargo:rerun-if-changed={}", outlined_atomics_file.display()); cfg.include(&builtins_dir); for instruction_type in &["cas", "swp", "ldadd", "ldclr", "ldeor", "ldset"] { for size in &[1, 2, 4, 8, 16] { if *size == 16 && *instruction_type != "cas" { continue; } for (model_number, model_name) in &[(1, "relax"), (2, "acq"), (3, "rel"), (4, "acq_rel")] { // The original compiler-rt build system compiles the same // source file multiple times with different compiler // options. Here we do something slightly different: we // create multiple .S files with the proper #defines and // then include the original file. // // This is needed because the cc crate doesn't allow us to // override the name of object files and libtool requires // all objects in an archive to have unique names. let path = out_dir.join(format!("lse_{}{}_{}.S", instruction_type, size, model_name)); let mut file = File::create(&path).unwrap(); writeln!(file, "#define L_{}", instruction_type).unwrap(); writeln!(file, "#define SIZE {}", size).unwrap(); writeln!(file, "#define MODEL {}", model_number).unwrap(); writeln!( file, "#include \"{}\"", outlined_atomics_file.canonicalize().unwrap().display() ) .unwrap(); drop(file); cfg.file(path); let sym = format!("__aarch64_{}{}_{}", instruction_type, size, model_name); println!("cargo:rustc-cfg={}=\"optimized-c\"", sym); } } } } }
43.234824
141
0.517052
db034a1a77853775e671ec76295122378c6f99c4
32
pub mod common; pub mod github;
10.666667
15
0.75
4b52946ac0ea699669b4a61be5f122f149f8f3fe
207
use base64::{CharacterSet, Config}; fn config() -> Config { Config::new(CharacterSet::UrlSafe, false) } pub fn encode(fingerprint: &[u8]) -> String { base64::encode_config(fingerprint, config()) }
20.7
48
0.676329
bf772bc3416be236835e70a8b4ea136b8ca22128
22,731
#[cfg(test)] mod tests { use crate::helpers::Cw721MarketplaceContract; use crate::msg::{ExecuteMsg, QueryMsg, TokensResponse}; use crate::ContractError; use anyhow::{anyhow, Result}; use derivative::Derivative; use cosmwasm_std::{ to_binary, Addr, Coin, Decimal, Empty, QueryRequest, StdError, Uint128, WasmQuery, }; use cw_multi_test::{App, AppResponse, Contract, ContractWrapper, Executor}; use serde::de::DeserializeOwned; use cw721_base::helpers::Cw721Contract; use cw721_passage::{Extension, Metadata}; use serde::Serialize; use crate::state::Token; pub fn contract_marketplace() -> Box<dyn Contract<Empty>> { let contract = ContractWrapper::new( crate::execute::execute, crate::execute::instantiate, crate::query::query, ); Box::new(contract) } pub fn contract_cw721_passage() -> Box<dyn Contract<Empty>> { let contract = ContractWrapper::new( cw721_passage::entry::execute, cw721_passage::entry::instantiate, cw721_passage::entry::query, ); Box::new(contract) } const BOB: &str = "bob"; const MINTER: &str = "minter"; const ADMIN: &str = "admin"; const RANDOM: &str = "random"; const ALLOWED_NATIVE: &str = "ujuno"; const COLLECTOR: &str = "collector"; const TOKEN_ID1: &str = "token1"; const TOKEN_ID2: &str = "token2"; const TOKEN_ID3: &str = "token3"; fn mock_app() -> App { App::new(|router, _, storage| { router .bank .init_balance( storage, &Addr::unchecked(RANDOM), vec![ Coin { denom: "ujuno".into(), amount: Uint128::new(50000000), }, Coin { denom: "zuhaha".into(), amount: Uint128::new(400), }, ], ) .unwrap(); router .bank .init_balance( storage, &Addr::unchecked(BOB), vec![Coin { denom: "ujuno".into(), amount: Uint128::new(500), }], ) .unwrap(); }) } #[derive(Derivative)] #[derivative(Debug)] pub struct Suite { /// Application mock #[derivative(Debug = "ignore")] pub app: App, /// Special account pub owner: String, nft_code_id: u64, marketplace_code_id: u64, } #[allow(dead_code)] impl Suite { pub fn init() -> Result<Suite> { let mut app = mock_app(); let owner = "owner".to_owned(); let nft_code_id = app.store_code(contract_cw721_passage()); let marketplace_code_id = app.store_code(contract_marketplace()); Ok(Suite { app, owner, nft_code_id, marketplace_code_id, }) } fn instantiate_nft(&mut self, minter: String) -> Cw721Contract { let nft_id = self.app.store_code(contract_cw721_passage()); let msg = cw721_base::InstantiateMsg { name: "Strange Clan".to_string(), symbol: "STR".to_string(), minter: minter.clone(), }; Cw721Contract( self.app .instantiate_contract(nft_id, Addr::unchecked(minter), &msg, &[], "flex", None) .unwrap(), ) } fn instantiate_marketplace( &mut self, nft_addr: String, allowed_native: String, ) -> Cw721MarketplaceContract { let marketplace_id = self.app.store_code(contract_marketplace()); let msg = crate::msg::InstantiateMsg { admin: String::from(ADMIN), nft_addr, allowed_native, fee_percentage: Decimal::from_ratio(3u64, 100u64), collector_addr: String::from(COLLECTOR), }; Cw721MarketplaceContract( self.app .instantiate_contract( marketplace_id, Addr::unchecked(ADMIN), &msg, &[], "flex", None, ) .unwrap(), ) } fn proper_instantiate(&mut self) -> (Cw721Contract, Cw721MarketplaceContract) { // setup nft contract let nft = self.instantiate_nft(String::from(MINTER)); let mint_msg: cw721_base::msg::MintMsg<Extension> = cw721_base::MintMsg { token_id: TOKEN_ID1.to_string(), owner: BOB.to_string(), token_uri: Some("https://starships.example.com/Starship/Enterprise.json".into()), extension: Some(Metadata { description: Some("Spaceship with Warp Drive".into()), name: Some("Starship USS Enterprise".to_string()), ..Metadata::default() }), }; let exec_msg = cw721_base::ExecuteMsg::Mint(mint_msg); let cosmos_msg = nft.call(exec_msg).unwrap(); self.app .execute(Addr::unchecked(MINTER), cosmos_msg) .unwrap(); let marketplace = self.instantiate_marketplace(nft.addr().into(), String::from(ALLOWED_NATIVE)); (nft, marketplace) } pub fn execute<M>( &mut self, sender: Addr, contract_addr: Addr, msg: ExecuteMsg, _funds: Vec<Coin>, ) -> Result<AppResponse> where M: Serialize + DeserializeOwned, { self.app .execute_contract(sender, contract_addr, &msg, &[]) .map_err(|err| anyhow!(err)) } pub fn query<M>(&self, target_contract: Addr, msg: M) -> Result<M, StdError> where M: Serialize + DeserializeOwned, { self.app.wrap().query(&QueryRequest::Wasm(WasmQuery::Smart { contract_addr: target_contract.to_string(), msg: to_binary(&msg).unwrap(), })) } } #[test] fn test_register_tokens() { let mut suite = Suite::init().unwrap(); let (_nft_contract, marketplace_contract) = suite.proper_instantiate(); // empty tokens throw error let msg = marketplace_contract .call(ExecuteMsg::ListTokens { tokens: vec![] }, vec![]) .unwrap(); let res = suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap_err(); assert_eq!(ContractError::WrongInput {}, res.downcast().unwrap()); // only admin can register tokens let token = crate::state::Token { id: TOKEN_ID1.into(), price: Default::default(), on_sale: true, }; let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token], }, vec![], ) .unwrap(); let res = suite .app .execute(Addr::unchecked(RANDOM), msg.clone()) .unwrap_err(); assert_eq!(ContractError::Unauthorized {}, res.downcast().unwrap()); // admin can register token suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap(); } #[test] fn test_list_tokens() { let mut suite = Suite::init().unwrap(); let (nft_contract, marketplace_contract) = suite.proper_instantiate(); // empty tokens throw error let msg = marketplace_contract .call(ExecuteMsg::ListTokens { tokens: vec![] }, vec![]) .unwrap(); let res = suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap_err(); assert_eq!(ContractError::WrongInput {}, res.downcast().unwrap()); let token = crate::state::Token { id: String::from(TOKEN_ID1), price: Uint128::new(100), on_sale: true, }; let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); // register token suite .app .execute(Addr::unchecked(ADMIN), msg.clone()) .unwrap(); // only token owner can list let res = suite .app .execute(Addr::unchecked(RANDOM), msg.clone()) .unwrap_err(); assert_eq!(ContractError::Unauthorized {}, res.downcast().unwrap()); // non approved tokens are not accepted let res = suite.app.execute(Addr::unchecked(BOB), msg).unwrap_err(); assert_eq!(ContractError::NotApproved {}, res.downcast().unwrap()); // marketplace contract is not spender let exec_msg: cw721_base::ExecuteMsg<Extension> = cw721_base::ExecuteMsg::Approve { spender: RANDOM.into(), token_id: TOKEN_ID1.into(), expires: None, }; let msg = nft_contract.call(exec_msg).unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); let res = suite.app.execute(Addr::unchecked(BOB), msg).unwrap_err(); assert_eq!(ContractError::NotApproved {}, res.downcast().unwrap()); // marketplace contract is spender, happy path let exec_msg = cw721_base::ExecuteMsg::<Extension>::Approve { spender: marketplace_contract.addr().into(), token_id: token.id.clone(), expires: None, }; let msg = nft_contract.call(exec_msg).unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); let t = marketplace_contract.token(&suite.app, TOKEN_ID1).unwrap(); assert_eq!(t.token, token) } #[test] fn test_delist_token() { let mut suite = Suite::init().unwrap(); let (_nft_contract, marketplace_contract) = suite.proper_instantiate(); // list token let token = crate::state::Token { id: TOKEN_ID1.into(), price: Default::default(), on_sale: true, }; let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap(); let msg = marketplace_contract .call( ExecuteMsg::DelistTokens { tokens: vec![token.id.clone()], }, vec![], ) .unwrap(); // only owner can delist let res = suite .app .execute(Addr::unchecked(RANDOM), msg.clone()) .unwrap_err(); assert_eq!(ContractError::Unauthorized {}, res.downcast().unwrap()); // happy path suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); let t = marketplace_contract.token(&suite.app, TOKEN_ID1).unwrap(); assert_eq!( t.token, crate::state::Token { id: token.id, price: token.price, on_sale: false } ) } #[test] fn test_change_price() { let mut suite = Suite::init().unwrap(); let (nft_contract, marketplace_contract) = suite.proper_instantiate(); let token = crate::state::Token { id: TOKEN_ID1.into(), price: Uint128::new(1), on_sale: true, }; let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap(); let msg = marketplace_contract .call( ExecuteMsg::UpdatePrice { token: TOKEN_ID1.into(), price: Uint128::new(100), }, vec![], ) .unwrap(); // only approved can update price let res = suite .app .execute(Addr::unchecked(RANDOM), msg.clone()) .unwrap_err(); assert_eq!(ContractError::Unauthorized {}, res.downcast().unwrap()); // RANDOM now approved let exec_msg: cw721_base::ExecuteMsg<Extension> = cw721_base::ExecuteMsg::Approve { spender: RANDOM.into(), token_id: TOKEN_ID1.into(), expires: None, }; let cosmos_msg = nft_contract.call(exec_msg).unwrap(); suite.app.execute(Addr::unchecked(BOB), cosmos_msg).unwrap(); // happy path suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap(); let t = marketplace_contract.token(&suite.app, TOKEN_ID1).unwrap(); assert_eq!( t.token, crate::state::Token { id: token.id, price: Uint128::new(100), on_sale: true } ) } #[test] fn test_delist_and_register() { let mut suite = Suite::init().unwrap(); let (nft_contract, marketplace_contract) = suite.proper_instantiate(); // list token let mut token = crate::state::Token { id: TOKEN_ID1.into(), price: Uint128::new(100), on_sale: true, }; // owner approves let exec_msg = cw721_base::ExecuteMsg::<Extension>::Approve { spender: marketplace_contract.addr().into(), token_id: token.id.clone(), expires: None, }; let msg = nft_contract.call(exec_msg).unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); // admin lists let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap(); // owner delists let msg = marketplace_contract .call( ExecuteMsg::DelistTokens { tokens: vec![token.id.clone()], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); let new_price = Uint128::new(14); token.price = new_price; // owner lists let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); let t = marketplace_contract.token(&suite.app, TOKEN_ID1).unwrap(); assert_eq!( t.token.price, new_price ) } #[test] fn test_buy() { let mut suite = Suite::init().unwrap(); let (nft_contract, marketplace_contract) = suite.proper_instantiate(); let price = Uint128::new(100); let fee = Uint128::new(3); let token = crate::state::Token { id: TOKEN_ID1.into(), price, on_sale: true, }; let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token.clone()], }, vec![], ) .unwrap(); suite.app.execute(Addr::unchecked(ADMIN), msg).unwrap(); // approve marketplace let exec_msg = cw721_base::ExecuteMsg::<Extension>::Approve { spender: marketplace_contract.addr().into(), token_id: token.id, expires: None, }; let msg = nft_contract.call(exec_msg).unwrap(); suite.app.execute(Addr::unchecked(BOB), msg).unwrap(); // no tokens let msg = marketplace_contract .call( ExecuteMsg::Buy { recipient: None, token_id: TOKEN_ID1.into(), }, vec![], ) .unwrap(); let res = suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap_err(); assert_eq!( ContractError::SendSingleNativeToken {}, res.downcast().unwrap() ); // multiple tokens let msg = marketplace_contract .call( ExecuteMsg::Buy { recipient: None, token_id: TOKEN_ID1.into(), }, vec![ Coin { denom: "ujuno".into(), amount: Uint128::new(2), }, Coin { denom: "zuhaha".into(), amount: Uint128::new(2), }, ], ) .unwrap(); let res = suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap_err(); assert_eq!( ContractError::SendSingleNativeToken {}, res.downcast().unwrap() ); // disallowed native token let msg = marketplace_contract .call( ExecuteMsg::Buy { recipient: None, token_id: TOKEN_ID1.into(), }, vec![Coin { denom: "zuhaha".into(), amount: Uint128::new(1), }], ) .unwrap(); let res = suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap_err(); assert_eq!( ContractError::NativeDenomNotAllowed { denom: "zuhaha".into() }, res.downcast().unwrap() ); // wrong coin amount let msg = marketplace_contract .call( ExecuteMsg::Buy { recipient: None, token_id: TOKEN_ID1.into(), }, vec![Coin { denom: ALLOWED_NATIVE.into(), amount: Uint128::new(200), }], ) .unwrap(); let res = suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap_err(); assert_eq!( ContractError::InsufficientBalance { need: Uint128::new(103), sent: Uint128::new(200), }, res.downcast().unwrap() ); // wrong coin amount let msg = marketplace_contract .call( ExecuteMsg::Buy { recipient: None, token_id: TOKEN_ID1.into(), }, vec![Coin { denom: ALLOWED_NATIVE.into(), amount: Uint128::new(10000), }], ) .unwrap(); let res = suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap_err(); assert_eq!( ContractError::InsufficientBalance { need: Uint128::new(103), sent: Uint128::new(10000) }, res.downcast().unwrap() ); // happy path let msg = marketplace_contract .call( ExecuteMsg::Buy { recipient: None, token_id: TOKEN_ID1.into(), }, vec![Coin { denom: ALLOWED_NATIVE.into(), amount: Uint128::new(103), }], ) .unwrap(); suite.app.execute(Addr::unchecked(RANDOM), msg).unwrap(); // collector balance updated let collector_balance = suite .app .wrap() .query_balance(COLLECTOR, ALLOWED_NATIVE) .unwrap(); assert_eq!(collector_balance.amount, fee); // nft owner updated let res = nft_contract .owner_of(&suite.app.wrap(), TOKEN_ID1, false) .unwrap(); assert_eq!(res.owner, String::from(RANDOM)); } #[test] fn test_query_tokens_on_sale() { let mut suite = Suite::init().unwrap(); let (nft_contract, marketplace_contract) = suite.proper_instantiate(); let token1 = Token { id: String::from(TOKEN_ID1), price: Default::default(), on_sale: true, }; let token2 = Token { id: String::from(TOKEN_ID2), price: Default::default(), on_sale: true, }; let token_false = Token { id: String::from(TOKEN_ID3), price: Default::default(), on_sale: false, }; let msg = marketplace_contract .call( ExecuteMsg::ListTokens { tokens: vec![token1.clone(), token2.clone(), token_false.clone()], }, vec![], ) .unwrap(); // register token suite .app .execute(Addr::unchecked(ADMIN), msg.clone()) .unwrap(); // query tokens on sale let query_msg = QueryMsg::ListTokensOnSale { start_after: None, limit: None }; let res: TokensResponse = suite.app.wrap() .query_wasm_smart(marketplace_contract.addr(), &query_msg).unwrap(); assert_eq!(res.tokens,vec![token1, token2]) } }
31.925562
99
0.480225
01d32570f78a2cc28b2c6200ffea4d57adec5c7d
17,057
use super::{InlineAsmArch, InlineAsmType}; use crate::spec::Target; use rustc_data_structures::stable_set::FxHashSet; use rustc_macros::HashStable_Generic; use rustc_span::Symbol; use std::fmt; def_reg_class! { X86 X86InlineAsmRegClass { reg, reg_abcd, reg_byte, xmm_reg, ymm_reg, zmm_reg, kreg, mmx_reg, x87_reg, } } impl X86InlineAsmRegClass { pub fn valid_modifiers(self, arch: super::InlineAsmArch) -> &'static [char] { match self { Self::reg => { if arch == InlineAsmArch::X86_64 { &['l', 'x', 'e', 'r'] } else { &['x', 'e'] } } Self::reg_abcd => { if arch == InlineAsmArch::X86_64 { &['l', 'h', 'x', 'e', 'r'] } else { &['l', 'h', 'x', 'e'] } } Self::reg_byte => &[], Self::xmm_reg | Self::ymm_reg | Self::zmm_reg => &['x', 'y', 'z'], Self::kreg => &[], Self::mmx_reg | Self::x87_reg => &[], } } pub fn suggest_class(self, _arch: InlineAsmArch, ty: InlineAsmType) -> Option<Self> { match self { Self::reg | Self::reg_abcd if ty.size().bits() == 8 => Some(Self::reg_byte), _ => None, } } pub fn suggest_modifier( self, arch: InlineAsmArch, ty: InlineAsmType, ) -> Option<(char, &'static str)> { match self { Self::reg => match ty.size().bits() { 16 => Some(('x', "ax")), 32 if arch == InlineAsmArch::X86_64 => Some(('e', "eax")), _ => None, }, Self::reg_abcd => match ty.size().bits() { 16 => Some(('x', "ax")), 32 if arch == InlineAsmArch::X86_64 => Some(('e', "eax")), _ => None, }, Self::reg_byte => None, Self::xmm_reg => None, Self::ymm_reg => match ty.size().bits() { 256 => None, _ => Some(('x', "xmm0")), }, Self::zmm_reg => match ty.size().bits() { 512 => None, 256 => Some(('y', "ymm0")), _ => Some(('x', "xmm0")), }, Self::kreg => None, Self::mmx_reg | Self::x87_reg => None, } } pub fn default_modifier(self, arch: InlineAsmArch) -> Option<(char, &'static str)> { match self { Self::reg | Self::reg_abcd => { if arch == InlineAsmArch::X86_64 { Some(('r', "rax")) } else { Some(('e', "eax")) } } Self::reg_byte => None, Self::xmm_reg => Some(('x', "xmm0")), Self::ymm_reg => Some(('y', "ymm0")), Self::zmm_reg => Some(('z', "zmm0")), Self::kreg => None, Self::mmx_reg | Self::x87_reg => None, } } pub fn supported_types( self, arch: InlineAsmArch, ) -> &'static [(InlineAsmType, Option<Symbol>)] { match self { Self::reg | Self::reg_abcd => { if arch == InlineAsmArch::X86_64 { types! { _: I16, I32, I64, F32, F64; } } else { types! { _: I16, I32, F32; } } } Self::reg_byte => types! { _: I8; }, Self::xmm_reg => types! { sse: I32, I64, F32, F64, VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2); }, Self::ymm_reg => types! { avx: I32, I64, F32, F64, VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2), VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4); }, Self::zmm_reg => types! { avx512f: I32, I64, F32, F64, VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2), VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4), VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF32(16), VecF64(8); }, Self::kreg => types! { avx512f: I8, I16; avx512bw: I32, I64; }, Self::mmx_reg | Self::x87_reg => &[], } } } fn x86_64_only( arch: InlineAsmArch, _target_features: &FxHashSet<Symbol>, _target: &Target, ) -> Result<(), &'static str> { match arch { InlineAsmArch::X86 => Err("register is only available on x86_64"), InlineAsmArch::X86_64 => Ok(()), _ => unreachable!(), } } fn high_byte( arch: InlineAsmArch, _target_features: &FxHashSet<Symbol>, _target: &Target, ) -> Result<(), &'static str> { match arch { InlineAsmArch::X86_64 => Err("high byte registers cannot be used as an operand on x86_64"), _ => Ok(()), } } fn rbx_reserved( arch: InlineAsmArch, _target_features: &FxHashSet<Symbol>, _target: &Target, ) -> Result<(), &'static str> { match arch { InlineAsmArch::X86 => Ok(()), InlineAsmArch::X86_64 => { Err("rbx is used internally by LLVM and cannot be used as an operand for inline asm") } _ => unreachable!(), } } fn esi_reserved( arch: InlineAsmArch, _target_features: &FxHashSet<Symbol>, _target: &Target, ) -> Result<(), &'static str> { match arch { InlineAsmArch::X86 => { Err("esi is used internally by LLVM and cannot be used as an operand for inline asm") } InlineAsmArch::X86_64 => Ok(()), _ => unreachable!(), } } def_regs! { X86 X86InlineAsmReg X86InlineAsmRegClass { ax: reg, reg_abcd = ["ax", "eax", "rax"], bx: reg, reg_abcd = ["bx", "ebx", "rbx"] % rbx_reserved, cx: reg, reg_abcd = ["cx", "ecx", "rcx"], dx: reg, reg_abcd = ["dx", "edx", "rdx"], si: reg = ["si", "esi", "rsi"] % esi_reserved, di: reg = ["di", "edi", "rdi"], r8: reg = ["r8", "r8w", "r8d"] % x86_64_only, r9: reg = ["r9", "r9w", "r9d"] % x86_64_only, r10: reg = ["r10", "r10w", "r10d"] % x86_64_only, r11: reg = ["r11", "r11w", "r11d"] % x86_64_only, r12: reg = ["r12", "r12w", "r12d"] % x86_64_only, r13: reg = ["r13", "r13w", "r13d"] % x86_64_only, r14: reg = ["r14", "r14w", "r14d"] % x86_64_only, r15: reg = ["r15", "r15w", "r15d"] % x86_64_only, al: reg_byte = ["al"], ah: reg_byte = ["ah"] % high_byte, bl: reg_byte = ["bl"], bh: reg_byte = ["bh"] % high_byte, cl: reg_byte = ["cl"], ch: reg_byte = ["ch"] % high_byte, dl: reg_byte = ["dl"], dh: reg_byte = ["dh"] % high_byte, sil: reg_byte = ["sil"] % x86_64_only, dil: reg_byte = ["dil"] % x86_64_only, r8b: reg_byte = ["r8b"] % x86_64_only, r9b: reg_byte = ["r9b"] % x86_64_only, r10b: reg_byte = ["r10b"] % x86_64_only, r11b: reg_byte = ["r11b"] % x86_64_only, r12b: reg_byte = ["r12b"] % x86_64_only, r13b: reg_byte = ["r13b"] % x86_64_only, r14b: reg_byte = ["r14b"] % x86_64_only, r15b: reg_byte = ["r15b"] % x86_64_only, xmm0: xmm_reg = ["xmm0"], xmm1: xmm_reg = ["xmm1"], xmm2: xmm_reg = ["xmm2"], xmm3: xmm_reg = ["xmm3"], xmm4: xmm_reg = ["xmm4"], xmm5: xmm_reg = ["xmm5"], xmm6: xmm_reg = ["xmm6"], xmm7: xmm_reg = ["xmm7"], xmm8: xmm_reg = ["xmm8"] % x86_64_only, xmm9: xmm_reg = ["xmm9"] % x86_64_only, xmm10: xmm_reg = ["xmm10"] % x86_64_only, xmm11: xmm_reg = ["xmm11"] % x86_64_only, xmm12: xmm_reg = ["xmm12"] % x86_64_only, xmm13: xmm_reg = ["xmm13"] % x86_64_only, xmm14: xmm_reg = ["xmm14"] % x86_64_only, xmm15: xmm_reg = ["xmm15"] % x86_64_only, ymm0: ymm_reg = ["ymm0"], ymm1: ymm_reg = ["ymm1"], ymm2: ymm_reg = ["ymm2"], ymm3: ymm_reg = ["ymm3"], ymm4: ymm_reg = ["ymm4"], ymm5: ymm_reg = ["ymm5"], ymm6: ymm_reg = ["ymm6"], ymm7: ymm_reg = ["ymm7"], ymm8: ymm_reg = ["ymm8"] % x86_64_only, ymm9: ymm_reg = ["ymm9"] % x86_64_only, ymm10: ymm_reg = ["ymm10"] % x86_64_only, ymm11: ymm_reg = ["ymm11"] % x86_64_only, ymm12: ymm_reg = ["ymm12"] % x86_64_only, ymm13: ymm_reg = ["ymm13"] % x86_64_only, ymm14: ymm_reg = ["ymm14"] % x86_64_only, ymm15: ymm_reg = ["ymm15"] % x86_64_only, zmm0: zmm_reg = ["zmm0"], zmm1: zmm_reg = ["zmm1"], zmm2: zmm_reg = ["zmm2"], zmm3: zmm_reg = ["zmm3"], zmm4: zmm_reg = ["zmm4"], zmm5: zmm_reg = ["zmm5"], zmm6: zmm_reg = ["zmm6"], zmm7: zmm_reg = ["zmm7"], zmm8: zmm_reg = ["zmm8"] % x86_64_only, zmm9: zmm_reg = ["zmm9"] % x86_64_only, zmm10: zmm_reg = ["zmm10"] % x86_64_only, zmm11: zmm_reg = ["zmm11"] % x86_64_only, zmm12: zmm_reg = ["zmm12"] % x86_64_only, zmm13: zmm_reg = ["zmm13"] % x86_64_only, zmm14: zmm_reg = ["zmm14"] % x86_64_only, zmm15: zmm_reg = ["zmm15"] % x86_64_only, zmm16: zmm_reg = ["zmm16", "xmm16", "ymm16"] % x86_64_only, zmm17: zmm_reg = ["zmm17", "xmm17", "ymm17"] % x86_64_only, zmm18: zmm_reg = ["zmm18", "xmm18", "ymm18"] % x86_64_only, zmm19: zmm_reg = ["zmm19", "xmm19", "ymm19"] % x86_64_only, zmm20: zmm_reg = ["zmm20", "xmm20", "ymm20"] % x86_64_only, zmm21: zmm_reg = ["zmm21", "xmm21", "ymm21"] % x86_64_only, zmm22: zmm_reg = ["zmm22", "xmm22", "ymm22"] % x86_64_only, zmm23: zmm_reg = ["zmm23", "xmm23", "ymm23"] % x86_64_only, zmm24: zmm_reg = ["zmm24", "xmm24", "ymm24"] % x86_64_only, zmm25: zmm_reg = ["zmm25", "xmm25", "ymm25"] % x86_64_only, zmm26: zmm_reg = ["zmm26", "xmm26", "ymm26"] % x86_64_only, zmm27: zmm_reg = ["zmm27", "xmm27", "ymm27"] % x86_64_only, zmm28: zmm_reg = ["zmm28", "xmm28", "ymm28"] % x86_64_only, zmm29: zmm_reg = ["zmm29", "xmm29", "ymm29"] % x86_64_only, zmm30: zmm_reg = ["zmm30", "xmm30", "ymm30"] % x86_64_only, zmm31: zmm_reg = ["zmm31", "xmm31", "ymm31"] % x86_64_only, k1: kreg = ["k1"], k2: kreg = ["k2"], k3: kreg = ["k3"], k4: kreg = ["k4"], k5: kreg = ["k5"], k6: kreg = ["k6"], k7: kreg = ["k7"], mm0: mmx_reg = ["mm0"], mm1: mmx_reg = ["mm1"], mm2: mmx_reg = ["mm2"], mm3: mmx_reg = ["mm3"], mm4: mmx_reg = ["mm4"], mm5: mmx_reg = ["mm5"], mm6: mmx_reg = ["mm6"], mm7: mmx_reg = ["mm7"], st0: x87_reg = ["st(0)", "st"], st1: x87_reg = ["st(1)"], st2: x87_reg = ["st(2)"], st3: x87_reg = ["st(3)"], st4: x87_reg = ["st(4)"], st5: x87_reg = ["st(5)"], st6: x87_reg = ["st(6)"], st7: x87_reg = ["st(7)"], #error = ["bp", "bpl", "ebp", "rbp"] => "the frame pointer cannot be used as an operand for inline asm", #error = ["sp", "spl", "esp", "rsp"] => "the stack pointer cannot be used as an operand for inline asm", #error = ["ip", "eip", "rip"] => "the instruction pointer cannot be used as an operand for inline asm", #error = ["k0"] => "the k0 AVX mask register cannot be used as an operand for inline asm", } } impl X86InlineAsmReg { pub fn emit( self, out: &mut dyn fmt::Write, arch: InlineAsmArch, modifier: Option<char>, ) -> fmt::Result { let reg_default_modifier = match arch { InlineAsmArch::X86 => 'e', InlineAsmArch::X86_64 => 'r', _ => unreachable!(), }; if self as u32 <= Self::dx as u32 { let root = ['a', 'b', 'c', 'd'][self as usize - Self::ax as usize]; match modifier.unwrap_or(reg_default_modifier) { 'l' => write!(out, "{}l", root), 'h' => write!(out, "{}h", root), 'x' => write!(out, "{}x", root), 'e' => write!(out, "e{}x", root), 'r' => write!(out, "r{}x", root), _ => unreachable!(), } } else if self as u32 <= Self::di as u32 { let root = self.name(); match modifier.unwrap_or(reg_default_modifier) { 'l' => write!(out, "{}l", root), 'x' => write!(out, "{}", root), 'e' => write!(out, "e{}", root), 'r' => write!(out, "r{}", root), _ => unreachable!(), } } else if self as u32 <= Self::r15 as u32 { let root = self.name(); match modifier.unwrap_or(reg_default_modifier) { 'l' => write!(out, "{}b", root), 'x' => write!(out, "{}w", root), 'e' => write!(out, "{}d", root), 'r' => out.write_str(root), _ => unreachable!(), } } else if self as u32 <= Self::r15b as u32 { out.write_str(self.name()) } else if self as u32 <= Self::xmm15 as u32 { let prefix = modifier.unwrap_or('x'); let index = self as u32 - Self::xmm0 as u32; write!(out, "{}{}", prefix, index) } else if self as u32 <= Self::ymm15 as u32 { let prefix = modifier.unwrap_or('y'); let index = self as u32 - Self::ymm0 as u32; write!(out, "{}{}", prefix, index) } else if self as u32 <= Self::zmm31 as u32 { let prefix = modifier.unwrap_or('z'); let index = self as u32 - Self::zmm0 as u32; write!(out, "{}{}", prefix, index) } else { out.write_str(self.name()) } } pub fn overlapping_regs(self, mut cb: impl FnMut(X86InlineAsmReg)) { macro_rules! reg_conflicts { ( $( $w:ident : $l:ident $h:ident ),*; $( $w2:ident : $l2:ident ),*; $( $x:ident : $y:ident : $z:ident ),*; ) => { match self { $( Self::$w => { cb(Self::$w); cb(Self::$l); cb(Self::$h); } Self::$l => { cb(Self::$w); cb(Self::$l); } Self::$h => { cb(Self::$w); cb(Self::$h); } )* $( Self::$w2 | Self::$l2 => { cb(Self::$w2); cb(Self::$l2); } )* $( Self::$x | Self::$y | Self::$z => { cb(Self::$x); cb(Self::$y); cb(Self::$z); } )* r => cb(r), } }; } // XMM*, YMM* and ZMM* are all different views of the same register. // // See section 15.5 of the combined Intel® 64 and IA-32 Architectures // Software Developer’s Manual for more details. // // We don't need to specify conflicts for [x,y,z]mm[16-31] since these // registers are only available with AVX-512, so we just specify them // as aliases directly. reg_conflicts! { ax : al ah, bx : bl bh, cx : cl ch, dx : dl dh; si : sil, di : dil, r8 : r8b, r9 : r9b, r10 : r10b, r11 : r11b, r12 : r12b, r13 : r13b, r14 : r14b, r15 : r15b; xmm0 : ymm0 : zmm0, xmm1 : ymm1 : zmm1, xmm2 : ymm2 : zmm2, xmm3 : ymm3 : zmm3, xmm4 : ymm4 : zmm4, xmm5 : ymm5 : zmm5, xmm6 : ymm6 : zmm6, xmm7 : ymm7 : zmm7, xmm8 : ymm8 : zmm8, xmm9 : ymm9 : zmm9, xmm10 : ymm10 : zmm10, xmm11 : ymm11 : zmm11, xmm12 : ymm12 : zmm12, xmm13 : ymm13 : zmm13, xmm14 : ymm14 : zmm14, xmm15 : ymm15 : zmm15; } } }
36.214437
99
0.439468
8a07a897213f2f828928d084d70fa48eaaf94dfd
2,564
#[derive(Debug, PartialEq, Clone, Copy, FromPrimitive)] pub enum OpCode { End = 0x00, NextFrame = 0x04, PreviousFrame = 0x05, Play = 0x06, Stop = 0x07, ToggleQuality = 0x08, StopSounds = 0x09, Add = 0x0A, Subtract = 0x0B, Multiply = 0x0C, Divide = 0x0D, Equals = 0x0E, Less = 0x0F, And = 0x10, Or = 0x11, Not = 0x12, StringEquals = 0x13, StringLength = 0x14, StringExtract = 0x15, Pop = 0x17, ToInteger = 0x18, GetVariable = 0x1C, SetVariable = 0x1D, SetTarget2 = 0x20, StringAdd = 0x21, GetProperty = 0x22, SetProperty = 0x23, CloneSprite = 0x24, RemoveSprite = 0x25, Trace = 0x26, StartDrag = 0x27, EndDrag = 0x28, StringLess = 0x29, Throw = 0x2A, CastOp = 0x2B, ImplementsOp = 0x2C, RandomNumber = 0x30, MBStringLength = 0x31, CharToAscii = 0x32, AsciiToChar = 0x33, GetTime = 0x34, MBStringExtract = 0x35, MBCharToAscii = 0x36, MBAsciiToChar = 0x37, Delete = 0x3A, Delete2 = 0x3B, DefineLocal = 0x3C, CallFunction = 0x3D, Return = 0x3E, Modulo = 0x3F, NewObject = 0x40, DefineLocal2 = 0x41, InitArray = 0x42, InitObject = 0x43, TypeOf = 0x44, TargetPath = 0x45, Enumerate = 0x46, Add2 = 0x47, Less2 = 0x48, Equals2 = 0x49, ToNumber = 0x4A, ToString = 0x4B, PushDuplicate = 0x4C, StackSwap = 0x4D, GetMember = 0x4E, SetMember = 0x4F, Increment = 0x50, Decrement = 0x51, CallMethod = 0x52, NewMethod = 0x53, InstanceOf = 0x54, Enumerate2 = 0x55, BitAnd = 0x60, BitOr = 0x61, BitXor = 0x62, BitLShift = 0x63, BitRShift = 0x64, BitURShift = 0x65, StrictEquals = 0x66, Greater = 0x67, StringGreater = 0x68, Extends = 0x69, GotoFrame = 0x81, GetUrl = 0x83, StoreRegister = 0x87, ConstantPool = 0x88, WaitForFrame = 0x8A, SetTarget = 0x8B, GotoLabel = 0x8C, WaitForFrame2 = 0x8D, DefineFunction2 = 0x8E, Try = 0x8F, With = 0x94, Push = 0x96, Jump = 0x99, GetUrl2 = 0x9A, DefineFunction = 0x9B, If = 0x9D, Call = 0x9E, GotoFrame2 = 0x9F, } impl OpCode { pub fn from_u8(n: u8) -> Option<Self> { num_traits::FromPrimitive::from_u8(n) } pub fn format(opcode: u8) -> String { if let Some(op) = Self::from_u8(opcode) { format!("{:?}", op) } else { format!("Unknown({})", opcode) } } }
19.424242
55
0.571373
fba990dd6bd4da7243b9299e93ad93fdd176a699
3,195
use std::io; use image::{self, imageops, DynamicImage, GenericImageView, ImageOutputFormat, RgbaImage}; use rusttype::Font; use crate::{annotation::Annotation, Result, AA_FACTOR}; pub struct Canvas { base: DynamicImage, overlay: DynamicImage, width: u32, height: u32, } impl Canvas { /// Creates a new canvas based on a buffer of bytes. /// /// A canvas consists of both a base layer and an upscaled annotation layer (at 3x the /// original resolution? Depends on how we count that, I guess...). Text is rendered first /// at this upscaled size and then downsampled onto the background. pub fn read_from_buffer(buf: &[u8]) -> Result<Canvas> { let base = image::load_from_memory(buf)?; let (width, height) = base.dimensions(); Ok(Canvas { base, overlay: DynamicImage::ImageRgba8(RgbaImage::new( width * AA_FACTOR, height * AA_FACTOR, )), width, height, }) } /// Adds an annotation to the canvas. /// /// This renders the annotation to the upscaled layer of the canvas that will eventually be /// overlaid onto the canvas proper. Text is laid out and drawn at this stage, meaning each /// annotation is individually rendered. pub fn add_annotation<'a>( &mut self, annotation: &Annotation, font: &Font<'a>, scale_multiplier: f32, ) { // Font scale is, in fact, the height in pixels of each glyph. Here we set that to be // one tenth the height of the image itself modified by the scale multiplier provided // by the user. The multiplier serves to allow us to shrink or expand text to fit images // that are either too tall or too small for a given annotation. let scale = (self.height as f32 / 10.0) * scale_multiplier; annotation.render_text(&mut self.overlay, font, scale, self.width, self.height); } /// Produces the final rendering of the canvas. /// /// This rendering step applies the upscaled overlay to the base canvas, thereby adding the /// desired text to the image proper. This is done via resizing and then overlaying. It's not /// rocket surgery; the whole process is three lines of code. /// /// I've added this documentation just as a reminder of what's actually going on here. pub fn render(&mut self) { let downsampled_text = imageops::resize( &self.overlay, self.width, self.height, imageops::FilterType::Lanczos3, ); let image = &DynamicImage::ImageRgba8(downsampled_text); imageops::overlay(&mut self.base, image, 0, 0); } pub fn save_jpg(&self, stream: &mut impl io::Write) -> io::Result<()> { self.base .write_to(stream, ImageOutputFormat::Jpeg(100)) .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) } pub fn save_png(&self, stream: &mut impl io::Write) -> io::Result<()> { self.base .write_to(stream, ImageOutputFormat::Png) .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) } }
37.151163
97
0.621596
718ed735123024405f36da8b6778610354a98739
1,342
use criterion::{ black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput, }; use once_cell::sync::Lazy; static TOKIO: Lazy<tokio::runtime::Runtime> = Lazy::new(|| { tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() }); fn blake2b(hash: sodoken::BufWrite, data: sodoken::BufWrite) { TOKIO.block_on(async move { sodoken::hash::blake2b::hash(hash, data).await.unwrap(); }); } fn bench(c: &mut Criterion) { static KB: usize = 1024; let mut group = c.benchmark_group("blake2b"); // CURRENTLY we switch over to spawn_blocking above 50 * KB for size in [KB, 50 * KB, 51 * KB, 1024 * KB].iter() { group.throughput(Throughput::Bytes(*size as u64)); group.bench_with_input( BenchmarkId::from_parameter(size), size, move |b, &size| { let hash = sodoken::BufWrite::new_no_lock( sodoken::hash::blake2b::BYTES_MIN, ); let data = sodoken::BufWrite::new_no_lock(size); b.iter(move || { blake2b(black_box(hash.clone()), black_box(data.clone())); }); }, ); } group.finish(); } criterion_group!(benches, bench); criterion_main!(benches);
29.173913
78
0.568554
6218211df53beb0e5516087ed5c1d3dec3f3fbbf
2,782
use sp_core::crypto::Pair; use sp_keyring::AccountKeyring; use std::{convert::TryFrom, string::String}; use substrate_api_client::{ compose_call, compose_extrinsic_offline, extrinsic::xt_primitives::UncheckedExtrinsicV4, node_metadata::Metadata, Api, XtStatus, }; fn main() { // instantiate an Api that connects to the given address let url = "127.0.0.1:9944"; // if no signer is set in the whole program, we need to give to Api a specific type instead of an associated type // as during compilation the type needs to be defined. let signer = AccountKeyring::Bob.pair(); // sets up api client and retrieves the node metadata let api = Api::new(format!("ws://{}", url)).set_signer(signer.clone()); // gets the current nonce of Bob so we can increment it manually later let mut nonce = api.get_nonce().unwrap(); // data from the node required in extrinsic let meta = Metadata::try_from(api.get_metadata()).unwrap(); let genesis_hash = api.genesis_hash; let spec_version = api.runtime_version.spec_version; let transaction_version = api.runtime_version.transaction_version; // Example bytes to add let bytes_to_add: Vec<u8> = vec![1, 2, 3, 4]; // Example CID for the example bytes added vec![1, 2, 3, 4] let cid = String::from("QmRgctVSR8hvGRDLv7c5H7BCji7m1VXRfEE1vW78CFagD7").into_bytes(); // Example multiaddr to connect IPFS with let multiaddr = String::from( "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", ) .into_bytes(); // Example Peer Id let peer_id = String::from("QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ").into_bytes(); // Create input for all calls let calls = vec![ ("ipfs_add_bytes", bytes_to_add), ("ipfs_cat_bytes", cid.clone()), ("ipfs_connect", multiaddr.clone()), ("ipfs_insert_pin", cid.clone()), ("ipfs_dht_find_peer", peer_id), ("ipfs_dht_find_providers", cid.clone()), ("ipfs_remove_pin", cid), ("ipfs_disconnect", multiaddr), ]; // Create Extinsics and listen for all calls for call in calls { println!("\n Creating Extrinsic for {}", call.0); let _call = compose_call!(meta, "TemplateModule", call.0, call.1); let xt: UncheckedExtrinsicV4<_> = compose_extrinsic_offline!( signer, _call, nonce, Era::Immortal, genesis_hash, genesis_hash, spec_version, transaction_version ); let blockh = api .send_extrinsic(xt.hex_encode(), XtStatus::Finalized) .unwrap(); println!("Transaction got finalized in block {:?}", blockh); nonce += 1; } }
37.093333
117
0.647735
d917d74cfc37b4bc720cfd72aabb42d88cbbebf4
247
mod auth; mod compress; mod directory; mod index; mod log; mod method; mod proxy; mod rewrite; pub use auth::*; pub use compress::*; pub use directory::*; pub use index::*; pub use log::*; pub use method::*; pub use proxy::*; pub use rewrite::*;
13.722222
21
0.672065
bf67ff78d1b185f2e780aa41212440223f8dba74
2,014
// Silence some warnings so they don't distract from the exercise. #![allow(unused_mut)] fn main() { // This fancy stuff either gets the first argument as a String, or prints // usage and exits if an argument was not supplied to the program. let mut arg: String = std::env::args().nth(1).unwrap_or_else(|| { println!("Please supply an argument to this program."); std::process::exit(-1); }); // 1. Write a function `inspect` that takes a reference to a String, returns nothing, but // prints whether the contents of the String is plural or singular. Then uncomment and run this // code with `cargo run apple` and `cargo run apples'. Hint: use `.ends_with("s")` on the // String reference // inspect(&arg); // 2. Write a function `change` that takes a *mutable* reference to a String and adds an "s" to // the String if it doesn't already end with "s". Then uncomment and run the code below with // `cargo run apple`. Hint: use `.push_str("s")` on the mutable String reference to add an "s". // //change(&mut arg); //println!("I have many {}", arg); // 3. Write a function `eat` that accepts ownership of (consumes) a String and returns a bool // indicating whether or not the String both starts with a "b" AND contains an "a". // Hint 1: use `.starts_with("b")` and `.contains("a")` // Hint 2: `&&` is the boolean "AND" operator // //if eat(arg) { // println!("Might be bananas"); //} else { // println!("Not bananas"); //} // Try running this program with "boat", "banana", and "grapes" as the arguments :-) // Challenge: Write a function "add" that takes *references* to two integer arguments, // dereferences them and adds them together, and returns the result. // // println!("1 + 2 = {}, even via references", add(&1, &2)); } fn inspect(r: &String) { if r.ends_with("s") { println!("Plural") } else { println!("Singular") } }
38.730769
100
0.621648