hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
ffb64f775f5411a94e315df7e2b3eeb0ad81d804
17,391
#![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use pallet_grandpa::{ fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{AccountIdLookup, BlakeTwo256, Block as BlockT, IdentifyAccount, NumberFor, Verify}, transaction_validity::{TransactionSource, TransactionValidity}, ApplyExtrinsicResult, MultiSignature, }; use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. pub use frame_support::{ construct_runtime, parameter_types, traits::{ConstU128, ConstU32, ConstU8, KeyOwnerProofSystem, Randomness, StorageInfo}, weights::{ constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, IdentityFee, Weight, }, StorageValue, }; pub use pallet_balances::Call as BalancesCall; pub use pallet_timestamp::Call as TimestampCall; use pallet_transaction_payment::CurrencyAdapter; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; /// Import the template pallet. pub use pallet_kitties; /// An index to a block. pub type BlockNumber = u32; /// Alias to 512-bit hash when used in the context of a transaction signature on the chain. pub type Signature = MultiSignature; /// Some way of identifying an account on the chain. We intentionally make it equivalent /// to the public key of our transaction signing scheme. pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId; /// Balance of an account. pub type Balance = u128; /// Index of a transaction in the chain. pub type Index = u32; /// A hash of some data used by the chain. pub type Hash = sp_core::H256; /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know /// the specifics of the runtime. They can then be made to be agnostic over specific formats /// of data like extrinsics, allowing for them to continue syncing the network through upgrades /// to even the core data structures. pub mod opaque { use super::*; pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; /// Opaque block header type. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Opaque block type. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// Opaque block identifier type. pub type BlockId = generic::BlockId<Block>; impl_opaque_keys! { pub struct SessionKeys { pub aura: Aura, pub grandpa: Grandpa, } } } // To learn more about runtime versioning and what each of the following value means: // https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-kitties"), impl_name: create_runtime_str!("node-kitties"), authoring_version: 1, // The version of the runtime specification. A full node will not attempt to use its native // runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. spec_version: 100, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, state_version: 1, }; /// This determines the average expected block time that we are targeting. /// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. /// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked /// up by `pallet_aura` to implement `fn slot_duration()`. /// /// Change this to adjust the block time. pub const MILLISECS_PER_BLOCK: u64 = 6000; // NOTE: Currently it is not possible to change the slot duration after the chain has started. // Attempting to do so will brick block production. pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; // Time is measured by number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); pub const HOURS: BlockNumber = MINUTES * 60; pub const DAYS: BlockNumber = HOURS * 24; /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const Version: RuntimeVersion = VERSION; pub const BlockHashCount: BlockNumber = 2400; /// We allow for 2 seconds of compute with a 6 second average block time. pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights ::with_sensible_defaults(2 * WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); pub BlockLength: frame_system::limits::BlockLength = frame_system::limits::BlockLength ::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); pub const SS58Prefix: u8 = 42; } // Configure FRAME pallets to include in runtime. impl frame_system::Config for Runtime { /// The basic call filter to use in dispatchable. type BaseCallFilter = frame_support::traits::Everything; /// Block & extrinsics weights: base values and limits. type BlockWeights = BlockWeights; /// The maximum length of a block (in bytes). type BlockLength = BlockLength; /// The identifier used to distinguish between accounts. type AccountId = AccountId; /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. type Lookup = AccountIdLookup<AccountId, ()>; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. type BlockNumber = BlockNumber; /// The type for hashing blocks and tries. type Hash = Hash; /// The hashing algorithm used. type Hashing = BlakeTwo256; /// The header type. type Header = generic::Header<BlockNumber, BlakeTwo256>; /// The ubiquitous event type. type Event = Event; /// The ubiquitous origin type. type Origin = Origin; /// Maximum number of block number to block hash mappings to keep (oldest pruned first). type BlockHashCount = BlockHashCount; /// The weight of database operations that the runtime can invoke. type DbWeight = RocksDbWeight; /// Version of the runtime. type Version = Version; /// Converts a module to the index of the module in `construct_runtime!`. /// /// This type is being generated by `construct_runtime!`. type PalletInfo = PalletInfo; /// What to do if a new account is created. type OnNewAccount = (); /// What to do if an account is fully reaped from the system. type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = pallet_balances::AccountData<Balance>; /// Weight information for the extrinsics of this pallet. type SystemWeightInfo = (); /// This is used as an identifier of the chain. 42 is the generic substrate prefix. type SS58Prefix = SS58Prefix; /// The set code logic, just the default since we're not a parachain. type OnSetCode = (); type MaxConsumers = frame_support::traits::ConstU32<16>; } impl pallet_randomness_collective_flip::Config for Runtime {} impl pallet_aura::Config for Runtime { type AuthorityId = AuraId; type DisabledValidators = (); type MaxAuthorities = ConstU32<32>; } impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; type KeyOwnerProofSystem = (); type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof; type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<( KeyTypeId, GrandpaId, )>>::IdentificationTuple; type HandleEquivocation = (); type WeightInfo = (); type MaxAuthorities = ConstU32<32>; } parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } impl pallet_timestamp::Config for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; type OnTimestampSet = Aura; type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } impl pallet_balances::Config for Runtime { type MaxLocks = ConstU32<50>; type MaxReserves = (); type ReserveIdentifier = [u8; 8]; /// The type for recording an account's balance. type Balance = Balance; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); type ExistentialDeposit = ConstU128<500>; type AccountStore = System; type WeightInfo = pallet_balances::weights::SubstrateWeight<Runtime>; } parameter_types! { pub const TransactionByteFee: Balance = 1; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter<Balances, ()>; type TransactionByteFee = TransactionByteFee; type OperationalFeeMultiplier = ConstU8<5>; type WeightToFee = IdentityFee<Balance>; type FeeMultiplierUpdate = (); } impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } /// Configure pallet-kitties in pallets/kitties. parameter_types! { pub const MaxKittyOwned: u32 = 50; } /// Configure the pallet-kitties in pallets/kitties. impl pallet_kitties::Config for Runtime { type Event = Event; type Currency = Balances; type KittyRandomness = RandomnessCollectiveFlip; type MaxKittyOwned = MaxKittyOwned; } // Create the runtime by composing the FRAME pallets that were previously configured. construct_runtime!( pub enum Runtime where Block = Block, NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { System: frame_system, RandomnessCollectiveFlip: pallet_randomness_collective_flip, Timestamp: pallet_timestamp, Aura: pallet_aura, Grandpa: pallet_grandpa, Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, // Include the custom logic from the pallet-template in the runtime. KittiesModule: pallet_kitties, } ); /// The address format for describing accounts. pub type Address = sp_runtime::MultiAddress<AccountId, ()>; /// Block header type as expected by this runtime. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Block type as expected by this runtime. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( frame_system::CheckNonZeroSender<Runtime>, frame_system::CheckSpecVersion<Runtime>, frame_system::CheckTxVersion<Runtime>, frame_system::CheckGenesis<Runtime>, frame_system::CheckEra<Runtime>, frame_system::CheckNonce<Runtime>, frame_system::CheckWeight<Runtime>, pallet_transaction_payment::ChargeTransactionPayment<Runtime>, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllPalletsWithSystem, >; #[cfg(feature = "runtime-benchmarks")] #[macro_use] extern crate frame_benchmarking; #[cfg(feature = "runtime-benchmarks")] mod benches { define_benchmarks!( [frame_benchmarking, BaselineBench::<Runtime>] [frame_system, SystemBench::<Runtime>] [pallet_balances, Balances] [pallet_timestamp, Timestamp] [pallet_kitties, KittiesModule] ); } impl_runtime_apis! { impl sp_api::Core<Block> for Runtime { fn version() -> RuntimeVersion { VERSION } fn execute_block(block: Block) { Executive::execute_block(block); } fn initialize_block(header: &<Block as BlockT>::Header) { Executive::initialize_block(header) } } impl sp_api::Metadata<Block> for Runtime { fn metadata() -> OpaqueMetadata { OpaqueMetadata::new(Runtime::metadata().into()) } } impl sp_block_builder::BlockBuilder<Block> for Runtime { fn apply_extrinsic(extrinsic: <Block as BlockT>::Extrinsic) -> ApplyExtrinsicResult { Executive::apply_extrinsic(extrinsic) } fn finalize_block() -> <Block as BlockT>::Header { Executive::finalize_block() } fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> { data.create_extrinsics() } fn check_inherents( block: Block, data: sp_inherents::InherentData, ) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime { fn validate_transaction( source: TransactionSource, tx: <Block as BlockT>::Extrinsic, block_hash: <Block as BlockT>::Hash, ) -> TransactionValidity { Executive::validate_transaction(source, tx, block_hash) } } impl sp_offchain::OffchainWorkerApi<Block> for Runtime { fn offchain_worker(header: &<Block as BlockT>::Header) { Executive::offchain_worker(header) } } impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime { fn slot_duration() -> sp_consensus_aura::SlotDuration { sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) } fn authorities() -> Vec<AuraId> { Aura::authorities().into_inner() } } impl sp_session::SessionKeys<Block> for Runtime { fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> { opaque::SessionKeys::generate(seed) } fn decode_session_keys( encoded: Vec<u8>, ) -> Option<Vec<(Vec<u8>, KeyTypeId)>> { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } impl fg_primitives::GrandpaApi<Block> for Runtime { fn grandpa_authorities() -> GrandpaAuthorityList { Grandpa::grandpa_authorities() } fn current_set_id() -> fg_primitives::SetId { Grandpa::current_set_id() } fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< <Block as BlockT>::Hash, NumberFor<Block>, >, _key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, ) -> Option<()> { None } fn generate_key_ownership_proof( _set_id: fg_primitives::SetId, _authority_id: GrandpaId, ) -> Option<fg_primitives::OpaqueKeyOwnershipProof> { // NOTE: this is the only implementation possible since we've // defined our key owner proof type as a bottom type (i.e. a type // with no values). None } } impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime { fn account_nonce(account: AccountId) -> Index { System::account_nonce(account) } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime { fn query_info( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> { TransactionPayment::query_info(uxt, len) } fn query_fee_details( uxt: <Block as BlockT>::Extrinsic, len: u32, ) -> pallet_transaction_payment::FeeDetails<Balance> { TransactionPayment::query_fee_details(uxt, len) } } #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark<Block> for Runtime { fn benchmark_metadata(extra: bool) -> ( Vec<frame_benchmarking::BenchmarkList>, Vec<frame_support::traits::StorageInfo>, ) { use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; let mut list = Vec::<BenchmarkList>::new(); list_benchmarks!(list, extra); let storage_info = AllPalletsWithSystem::storage_info(); return (list, storage_info) } fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> { use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch, TrackedStorageKey}; use frame_system_benchmarking::Pallet as SystemBench; use baseline::Pallet as BaselineBench; impl frame_system_benchmarking::Config for Runtime {} impl baseline::Config for Runtime {} let whitelist: Vec<TrackedStorageKey> = vec![ // Block Number hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), // Total Issuance hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), // Execution Phase hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), // Event Count hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), // System Events hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), ]; let mut batches = Vec::<BenchmarkBatch>::new(); let params = (&config, &whitelist); add_benchmarks!(params, batches); // , pallet_kitties, KittiesModule Ok(batches) } } }
33.125714
106
0.748261
3a39eeb8a01ca41e23f4192eb95c3fd73d04bb8a
1,255
use crate::config::get_setting; use crate::templates::plain::render; use crate::user::User; use crate::{crypto, mail, template_context, templates}; use anyhow::{Context, Result}; use sqlx::{Pool, Postgres}; pub(crate) async fn send_verification_mail(user: &User, db_pool: &Pool<Postgres>) -> Result<()> { assert!(user.id >= 0); let hash = crypto::random_hex_string(32); let mut transaction = db_pool.begin().await?; sqlx::query("insert into user_verifications (user_id, hash, expires) values ($1, $2, now() + interval '1 day')") .bind(&user.id) .bind(&hash) .execute(&mut transaction) .await?; let domain = get_setting::<String, _>("domain", &mut transaction).await?; let url = format!("{}/api/verify/{}", domain, hash); let template = &templates::VERIFY_EMAIL; let body = &template.0; let tags = &template.1; let subject = tags.get("subject").context("Template does not contain subject")?; let email_body = render(body.to_string(), template_context!([ ("username".to_owned(), user.username.to_owned()), ("link".to_owned(), url) ])); mail::send_user_mail(user, subject, email_body, db_pool).await?; transaction.commit().await?; Ok(()) }
31.375
116
0.641434
28c46823226abb37be5206640f1f82c0433cb302
61,781
// Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::convert::TryInto; use std::ffi::CStr; use std::io; use std::mem::{size_of, MaybeUninit}; use std::os::unix::io::AsRawFd; use std::time::Duration; use base::{error, pagesize}; use data_model::DataInit; use crate::filesystem::{ Context, DirEntry, DirectoryIterator, Entry, FileSystem, GetxattrReply, IoctlReply, ListxattrReply, ZeroCopyReader, ZeroCopyWriter, }; use crate::sys::*; use crate::{Error, Result}; const DIRENT_PADDING: [u8; 8] = [0; 8]; /// A trait for reading from the underlying FUSE endpoint. pub trait Reader: io::Read {} /// A trait for writing to the underlying FUSE endpoint. The FUSE device expects the write /// operation to happen in one write transaction. Since there are cases when data needs to be /// generated earlier than the header, it implies the writer implementation to keep an internal /// buffer. The buffer then can be flushed once header and data are both prepared. pub trait Writer: io::Write { /// Allows a closure to generate and write data at the current writer's offset. The current /// writer is passed as a mutable reference to the closure. As an example, this provides an /// adapter for the read implementation of a filesystem to write directly to the final buffer /// without generating the FUSE header first. /// /// Notes: An alternative implementation would be to return a slightly different writer for the /// API client to write to the offset. Since the API needs to be called for more than one time, /// it imposes some complexity to deal with borrowing and mutability. The current approach /// simply does not need to create a different writer, thus no need to deal with the mentioned /// complexity. fn write_at<F>(&mut self, offset: usize, f: F) -> io::Result<usize> where F: Fn(&mut Self) -> io::Result<usize>; /// Checks if the writer can still accept certain amount of data. fn has_sufficient_buffer(&self, size: u32) -> bool; } /// A trait for memory mapping for DAX. /// /// For some transports (like virtio) it may be possible to share a region of memory with the /// FUSE kernel driver so that it can access file contents directly without issuing read or /// write requests. In this case the driver will instead send requests to map a section of a /// file into the shared memory region. pub trait Mapper { /// Maps `size` bytes starting at `file_offset` bytes from within the given `fd` at `mem_offset` /// bytes from the start of the memory region with `prot` protections. `mem_offset` must be /// page aligned. /// /// # Arguments /// * `mem_offset` - Page aligned offset into the memory region in bytes. /// * `size` - Size of memory region in bytes. /// * `fd` - File descriptor to mmap from. /// * `file_offset` - Offset in bytes from the beginning of `fd` to start the mmap. /// * `prot` - Protection (e.g. `libc::PROT_READ`) of the memory region. fn map( &self, mem_offset: u64, size: usize, fd: &dyn AsRawFd, file_offset: u64, prot: u32, ) -> io::Result<()>; /// Unmaps `size` bytes at `offset` bytes from the start of the memory region. `offset` must be /// page aligned. /// /// # Arguments /// * `offset` - Page aligned offset into the arena in bytes. /// * `size` - Size of memory region in bytes. fn unmap(&self, offset: u64, size: u64) -> io::Result<()>; } impl<'a, M: Mapper> Mapper for &'a M { fn map( &self, mem_offset: u64, size: usize, fd: &dyn AsRawFd, file_offset: u64, prot: u32, ) -> io::Result<()> { (**self).map(mem_offset, size, fd, file_offset, prot) } fn unmap(&self, offset: u64, size: u64) -> io::Result<()> { (**self).unmap(offset, size) } } pub struct Server<F: FileSystem + Sync> { fs: F, } impl<F: FileSystem + Sync> Server<F> { pub fn new(fs: F) -> Server<F> { Server { fs } } pub fn handle_message<R: Reader + ZeroCopyReader, W: Writer + ZeroCopyWriter, M: Mapper>( &self, mut r: R, w: W, mapper: M, ) -> Result<usize> { let in_header = InHeader::from_reader(&mut r).map_err(Error::DecodeMessage)?; if in_header.len > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } match Opcode::n(in_header.opcode) { Some(Opcode::Lookup) => self.lookup(in_header, r, w), Some(Opcode::Forget) => self.forget(in_header, r), // No reply. Some(Opcode::Getattr) => self.getattr(in_header, r, w), Some(Opcode::Setattr) => self.setattr(in_header, r, w), Some(Opcode::Readlink) => self.readlink(in_header, w), Some(Opcode::Symlink) => self.symlink(in_header, r, w), Some(Opcode::Mknod) => self.mknod(in_header, r, w), Some(Opcode::Mkdir) => self.mkdir(in_header, r, w), Some(Opcode::Unlink) => self.unlink(in_header, r, w), Some(Opcode::Rmdir) => self.rmdir(in_header, r, w), Some(Opcode::Rename) => self.rename(in_header, r, w), Some(Opcode::Link) => self.link(in_header, r, w), Some(Opcode::Open) => self.open(in_header, r, w), Some(Opcode::Read) => self.read(in_header, r, w), Some(Opcode::Write) => self.write(in_header, r, w), Some(Opcode::Statfs) => self.statfs(in_header, w), Some(Opcode::Release) => self.release(in_header, r, w), Some(Opcode::Fsync) => self.fsync(in_header, r, w), Some(Opcode::Setxattr) => self.setxattr(in_header, r, w), Some(Opcode::Getxattr) => self.getxattr(in_header, r, w), Some(Opcode::Listxattr) => self.listxattr(in_header, r, w), Some(Opcode::Removexattr) => self.removexattr(in_header, r, w), Some(Opcode::Flush) => self.flush(in_header, r, w), Some(Opcode::Init) => self.init(in_header, r, w), Some(Opcode::Opendir) => self.opendir(in_header, r, w), Some(Opcode::Readdir) => self.readdir(in_header, r, w), Some(Opcode::Releasedir) => self.releasedir(in_header, r, w), Some(Opcode::Fsyncdir) => self.fsyncdir(in_header, r, w), Some(Opcode::Getlk) => self.getlk(in_header, r, w), Some(Opcode::Setlk) => self.setlk(in_header, r, w), Some(Opcode::Setlkw) => self.setlkw(in_header, r, w), Some(Opcode::Access) => self.access(in_header, r, w), Some(Opcode::Create) => self.create(in_header, r, w), Some(Opcode::Interrupt) => self.interrupt(in_header), Some(Opcode::Bmap) => self.bmap(in_header, r, w), Some(Opcode::Destroy) => self.destroy(), Some(Opcode::Ioctl) => self.ioctl(in_header, r, w), Some(Opcode::Poll) => self.poll(in_header, r, w), Some(Opcode::NotifyReply) => self.notify_reply(in_header, r, w), Some(Opcode::BatchForget) => self.batch_forget(in_header, r, w), Some(Opcode::Fallocate) => self.fallocate(in_header, r, w), Some(Opcode::Readdirplus) => self.readdirplus(in_header, r, w), Some(Opcode::Rename2) => self.rename2(in_header, r, w), Some(Opcode::Lseek) => self.lseek(in_header, r, w), Some(Opcode::CopyFileRange) => self.copy_file_range(in_header, r, w), Some(Opcode::ChromeOsTmpfile) => self.chromeos_tmpfile(in_header, r, w), Some(Opcode::SetUpMapping) => self.set_up_mapping(in_header, r, w, mapper), Some(Opcode::RemoveMapping) => self.remove_mapping(in_header, r, w, mapper), None => reply_error( io::Error::from_raw_os_error(libc::ENOSYS), in_header.unique, w, ), } } fn lookup<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let namelen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(namelen); buf.resize(namelen, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; let name = bytes_to_cstr(&buf)?; match self .fs .lookup(Context::from(in_header), in_header.nodeid.into(), &name) { Ok(entry) => { let out = EntryOut::from(entry); reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn forget<R: Reader>(&self, in_header: InHeader, mut r: R) -> Result<usize> { let ForgetIn { nlookup } = ForgetIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; self.fs .forget(Context::from(in_header), in_header.nodeid.into(), nlookup); // There is no reply for forget messages. Ok(0) } fn getattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let GetattrIn { flags, dummy: _, fh, } = GetattrIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let handle = if (flags & GETATTR_FH) != 0 { Some(fh.into()) } else { None }; match self .fs .getattr(Context::from(in_header), in_header.nodeid.into(), handle) { Ok((st, timeout)) => { let out = AttrOut { attr_valid: timeout.as_secs(), attr_valid_nsec: timeout.subsec_nanos(), dummy: 0, attr: st.into(), }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn setattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let setattr_in = SetattrIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let handle = if setattr_in.valid & FATTR_FH != 0 { Some(setattr_in.fh.into()) } else { None }; let valid = SetattrValid::from_bits_truncate(setattr_in.valid); let st: libc::stat64 = setattr_in.into(); match self.fs.setattr( Context::from(in_header), in_header.nodeid.into(), st, handle, valid, ) { Ok((st, timeout)) => { let out = AttrOut { attr_valid: timeout.as_secs(), attr_valid_nsec: timeout.subsec_nanos(), dummy: 0, attr: st.into(), }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn readlink<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize> { match self .fs .readlink(Context::from(in_header), in_header.nodeid.into()) { Ok(linkname) => { // We need to disambiguate the option type here even though it is `None`. reply_ok(None::<u8>, Some(&linkname), in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn symlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { // Unfortunately the name and linkname are encoded one after another and // separated by a nul character. let len = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(len); buf.resize(len, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; let mut iter = split_inclusive(&buf, |&c| c == b'\0'); let name = iter .next() .ok_or(Error::MissingParameter) .and_then(bytes_to_cstr)?; let linkname = iter .next() .ok_or(Error::MissingParameter) .and_then(bytes_to_cstr)?; let security_ctx = iter.next().map(bytes_to_cstr).transpose()?; match self.fs.symlink( Context::from(in_header), linkname, in_header.nodeid.into(), name, security_ctx, ) { Ok(entry) => { let out = EntryOut::from(entry); reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn mknod<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let MknodIn { mode, rdev, umask, .. } = MknodIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let buflen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<MknodIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(buflen); buf.resize(buflen, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; let mut iter = split_inclusive(&buf, |&c| c == b'\0'); let name = iter .next() .ok_or(Error::MissingParameter) .and_then(bytes_to_cstr)?; let security_ctx = iter.next().map(bytes_to_cstr).transpose()?; match self.fs.mknod( Context::from(in_header), in_header.nodeid.into(), name, mode, rdev, umask, security_ctx, ) { Ok(entry) => { let out = EntryOut::from(entry); reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn mkdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let MkdirIn { mode, umask } = MkdirIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let buflen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<MkdirIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(buflen); buf.resize(buflen, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; let mut iter = split_inclusive(&buf, |&c| c == b'\0'); let name = iter .next() .ok_or(Error::MissingParameter) .and_then(bytes_to_cstr)?; let security_ctx = iter.next().map(bytes_to_cstr).transpose()?; match self.fs.mkdir( Context::from(in_header), in_header.nodeid.into(), name, mode, umask, security_ctx, ) { Ok(entry) => { let out = EntryOut::from(entry); reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn chromeos_tmpfile<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let ChromeOsTmpfileIn { mode, umask } = ChromeOsTmpfileIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let buflen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<MkdirIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut buf = vec![0u8; buflen]; let security_ctx = if buflen > 0 { r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; Some(bytes_to_cstr(&buf)?) } else { None }; match self.fs.chromeos_tmpfile( Context::from(in_header), in_header.nodeid.into(), mode, umask, security_ctx, ) { Ok(entry) => { let out = EntryOut::from(entry); reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn unlink<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let namelen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .ok_or(Error::InvalidHeaderLength)?; let mut name = Vec::with_capacity(namelen); name.resize(namelen, 0); r.read_exact(&mut name).map_err(Error::DecodeMessage)?; match self.fs.unlink( Context::from(in_header), in_header.nodeid.into(), bytes_to_cstr(&name)?, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn rmdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let namelen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .ok_or(Error::InvalidHeaderLength)?; let mut name = Vec::with_capacity(namelen); name.resize(namelen, 0); r.read_exact(&mut name).map_err(Error::DecodeMessage)?; match self.fs.rmdir( Context::from(in_header), in_header.nodeid.into(), bytes_to_cstr(&name)?, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn do_rename<R: Reader, W: Writer>( &self, in_header: InHeader, msg_size: usize, newdir: u64, flags: u32, mut r: R, w: W, ) -> Result<usize> { let buflen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(msg_size)) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(buflen); buf.resize(buflen, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; // We want to include the '\0' byte in the first slice. let split_pos = buf .iter() .position(|c| *c == b'\0') .map(|p| p + 1) .ok_or(Error::MissingParameter)?; let (oldname, newname) = buf.split_at(split_pos); match self.fs.rename( Context::from(in_header), in_header.nodeid.into(), bytes_to_cstr(oldname)?, newdir.into(), bytes_to_cstr(newname)?, flags, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn rename<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let RenameIn { newdir } = RenameIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; self.do_rename(in_header, size_of::<RenameIn>(), newdir, 0, r, w) } fn rename2<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let Rename2In { newdir, flags, .. } = Rename2In::from_reader(&mut r).map_err(Error::DecodeMessage)?; let flags = flags & (libc::RENAME_EXCHANGE | libc::RENAME_NOREPLACE) as u32; self.do_rename(in_header, size_of::<Rename2In>(), newdir, flags, r, w) } fn link<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let LinkIn { oldnodeid } = LinkIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let namelen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<LinkIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut name = Vec::with_capacity(namelen); name.resize(namelen, 0); r.read_exact(&mut name).map_err(Error::DecodeMessage)?; match self.fs.link( Context::from(in_header), oldnodeid.into(), in_header.nodeid.into(), bytes_to_cstr(&name)?, ) { Ok(entry) => { let out = EntryOut::from(entry); reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn open<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let OpenIn { flags, .. } = OpenIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self .fs .open(Context::from(in_header), in_header.nodeid.into(), flags) { Ok((handle, opts)) => { let out = OpenOut { fh: handle.map(Into::into).unwrap_or(0), open_flags: opts.bits(), ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn read<R: Reader, W: ZeroCopyWriter + Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize> { let ReadIn { fh, offset, size, read_flags, lock_owner, flags, .. } = ReadIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if size > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } let owner = if read_flags & READ_LOCKOWNER != 0 { Some(lock_owner) } else { None }; // Skip for the header size to write the data first. match w.write_at(size_of::<OutHeader>(), |writer| { self.fs.read( Context::from(in_header), in_header.nodeid.into(), fh.into(), writer, size, offset, owner, flags, ) }) { Ok(count) => { // Don't use `reply_ok` because we need to set a custom size length for the // header. let out = OutHeader { len: (size_of::<OutHeader>() + count) as u32, error: 0, unique: in_header.unique, }; w.write_all(out.as_slice()).map_err(Error::EncodeMessage)?; w.flush().map_err(Error::FlushMessage)?; Ok(out.len as usize) } Err(e) => reply_error(e, in_header.unique, w), } } fn write<R: Reader + ZeroCopyReader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let WriteIn { fh, offset, size, write_flags, lock_owner, flags, .. } = WriteIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if size > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } let owner = if write_flags & WRITE_LOCKOWNER != 0 { Some(lock_owner) } else { None }; let delayed_write = write_flags & WRITE_CACHE != 0; match self.fs.write( Context::from(in_header), in_header.nodeid.into(), fh.into(), r, size, offset, owner, delayed_write, flags, ) { Ok(count) => { let out = WriteOut { size: count as u32, ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn statfs<W: Writer>(&self, in_header: InHeader, w: W) -> Result<usize> { match self .fs .statfs(Context::from(in_header), in_header.nodeid.into()) { Ok(st) => reply_ok(Some(Kstatfs::from(st)), None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn release<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let ReleaseIn { fh, flags, release_flags, lock_owner, } = ReleaseIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let flush = release_flags & RELEASE_FLUSH != 0; let flock_release = release_flags & RELEASE_FLOCK_UNLOCK != 0; let lock_owner = if flush || flock_release { Some(lock_owner) } else { None }; match self.fs.release( Context::from(in_header), in_header.nodeid.into(), flags, fh.into(), flush, flock_release, lock_owner, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn fsync<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let FsyncIn { fh, fsync_flags, .. } = FsyncIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let datasync = fsync_flags & 0x1 != 0; match self.fs.fsync( Context::from(in_header), in_header.nodeid.into(), datasync, fh.into(), ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn setxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let SetxattrIn { size, flags } = SetxattrIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; // The name and value and encoded one after another and separated by a '\0' character. let len = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<SetxattrIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(len); buf.resize(len, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; // We want to include the '\0' byte in the first slice. let split_pos = buf .iter() .position(|c| *c == b'\0') .map(|p| p + 1) .ok_or(Error::MissingParameter)?; let (name, value) = buf.split_at(split_pos); if size != value.len() as u32 { return Err(Error::InvalidXattrSize(size, value.len())); } match self.fs.setxattr( Context::from(in_header), in_header.nodeid.into(), bytes_to_cstr(name)?, value, flags, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn getxattr<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let GetxattrIn { size, .. } = GetxattrIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let namelen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<GetxattrIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut name = Vec::with_capacity(namelen); name.resize(namelen, 0); r.read_exact(&mut name).map_err(Error::DecodeMessage)?; if size > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } match self.fs.getxattr( Context::from(in_header), in_header.nodeid.into(), bytes_to_cstr(&name)?, size, ) { Ok(GetxattrReply::Value(val)) => reply_ok(None::<u8>, Some(&val), in_header.unique, w), Ok(GetxattrReply::Count(count)) => { let out = GetxattrOut { size: count, ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn listxattr<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let GetxattrIn { size, .. } = GetxattrIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if size > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } match self .fs .listxattr(Context::from(in_header), in_header.nodeid.into(), size) { Ok(ListxattrReply::Names(val)) => reply_ok(None::<u8>, Some(&val), in_header.unique, w), Ok(ListxattrReply::Count(count)) => { let out = GetxattrOut { size: count, ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn removexattr<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let namelen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(namelen); buf.resize(namelen, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; let name = bytes_to_cstr(&buf)?; match self .fs .removexattr(Context::from(in_header), in_header.nodeid.into(), name) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn flush<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let FlushIn { fh, unused: _, padding: _, lock_owner, } = FlushIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self.fs.flush( Context::from(in_header), in_header.nodeid.into(), fh.into(), lock_owner, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn init<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let InitIn { major, minor, max_readahead, flags, } = InitIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if major < KERNEL_VERSION { error!("Unsupported fuse protocol version: {}.{}", major, minor); return reply_error( io::Error::from_raw_os_error(libc::EPROTO), in_header.unique, w, ); } if major > KERNEL_VERSION { // Wait for the kernel to reply back with a 7.X version. let out = InitOut { major: KERNEL_VERSION, minor: KERNEL_MINOR_VERSION, ..Default::default() }; return reply_ok(Some(out), None, in_header.unique, w); } if minor < OLDEST_SUPPORTED_KERNEL_MINOR_VERSION { error!( "Unsupported fuse protocol minor version: {}.{}", major, minor ); return reply_error( io::Error::from_raw_os_error(libc::EPROTO), in_header.unique, w, ); } // These fuse features are supported by this server by default. let supported = FsOptions::ASYNC_READ | FsOptions::PARALLEL_DIROPS | FsOptions::BIG_WRITES | FsOptions::AUTO_INVAL_DATA | FsOptions::HANDLE_KILLPRIV | FsOptions::ASYNC_DIO | FsOptions::HAS_IOCTL_DIR | FsOptions::DO_READDIRPLUS | FsOptions::READDIRPLUS_AUTO | FsOptions::ATOMIC_O_TRUNC | FsOptions::MAP_ALIGNMENT; let capable = FsOptions::from_bits_truncate(flags); match self.fs.init(capable) { Ok(want) => { let mut enabled = capable & (want | supported); // HANDLE_KILLPRIV doesn't work correctly when writeback caching is enabled so turn // it off. if enabled.contains(FsOptions::WRITEBACK_CACHE) { enabled.remove(FsOptions::HANDLE_KILLPRIV); } let out = InitOut { major: KERNEL_VERSION, minor: KERNEL_MINOR_VERSION, max_readahead, flags: enabled.bits(), max_background: ::std::u16::MAX, congestion_threshold: (::std::u16::MAX / 4) * 3, max_write: self.fs.max_buffer_size(), time_gran: 1, // nanoseconds map_alignment: pagesize().trailing_zeros() as u16, ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn opendir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let OpenIn { flags, .. } = OpenIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self .fs .opendir(Context::from(in_header), in_header.nodeid.into(), flags) { Ok((handle, opts)) => { let out = OpenOut { fh: handle.map(Into::into).unwrap_or(0), open_flags: opts.bits(), ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn readdir<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize> { let ReadIn { fh, offset, size, .. } = ReadIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if size > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } if !w.has_sufficient_buffer(size) { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } // Skip over enough bytes for the header. let unique = in_header.unique; let result = w.write_at(size_of::<OutHeader>(), |cursor| { match self.fs.readdir( Context::from(in_header), in_header.nodeid.into(), fh.into(), size, offset, ) { Ok(mut entries) => { let mut total_written = 0; while let Some(dirent) = entries.next() { let remaining = (size as usize).saturating_sub(total_written); match add_dirent(cursor, remaining, dirent, None) { // No more space left in the buffer. Ok(0) => break, Ok(bytes_written) => { total_written += bytes_written; } Err(e) => return Err(e), } } Ok(total_written) } Err(e) => Err(e), } }); match result { Ok(total_written) => reply_readdir(total_written, unique, w), Err(e) => reply_error(e, unique, w), } } fn handle_dirent<'d>( &self, in_header: &InHeader, dir_entry: DirEntry<'d>, ) -> io::Result<(DirEntry<'d>, Entry)> { let parent = in_header.nodeid.into(); let name = dir_entry.name.to_bytes(); let entry = if name == b"." || name == b".." { // Don't do lookups on the current directory or the parent directory. Safe because // this only contains integer fields and any value is valid. let mut attr = unsafe { MaybeUninit::<libc::stat64>::zeroed().assume_init() }; attr.st_ino = dir_entry.ino; attr.st_mode = dir_entry.type_; // We use 0 for the inode value to indicate a negative entry. Entry { inode: 0, generation: 0, attr, attr_timeout: Duration::from_secs(0), entry_timeout: Duration::from_secs(0), } } else { self.fs .lookup(Context::from(*in_header), parent, dir_entry.name)? }; Ok((dir_entry, entry)) } fn readdirplus<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, mut w: W, ) -> Result<usize> { let ReadIn { fh, offset, size, .. } = ReadIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if size > self.fs.max_buffer_size() { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } if !w.has_sufficient_buffer(size) { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } // Skip over enough bytes for the header. let unique = in_header.unique; let result = w.write_at(size_of::<OutHeader>(), |cursor| { match self.fs.readdir( Context::from(in_header), in_header.nodeid.into(), fh.into(), size, offset, ) { Ok(mut entries) => { let mut total_written = 0; while let Some(dirent) = entries.next() { let mut entry_inode = None; match self.handle_dirent(&in_header, dirent).and_then(|(d, e)| { entry_inode = Some(e.inode); let remaining = (size as usize).saturating_sub(total_written); add_dirent(cursor, remaining, d, Some(e)) }) { Ok(0) => { // No more space left in the buffer but we need to undo the lookup // that created the Entry or we will end up with mismatched lookup // counts. if let Some(inode) = entry_inode { self.fs.forget(Context::from(in_header), inode.into(), 1); } break; } Ok(bytes_written) => { total_written += bytes_written; } Err(e) => { if let Some(inode) = entry_inode { self.fs.forget(Context::from(in_header), inode.into(), 1); } if total_written == 0 { // We haven't filled any entries yet so we can just propagate // the error. return Err(e); } // We already filled in some entries. Returning an error now will // cause lookup count mismatches for those entries so just return // whatever we already have. break; } } } Ok(total_written) } Err(e) => Err(e), } }); match result { Ok(total_written) => reply_readdir(total_written, unique, w), Err(e) => reply_error(e, unique, w), } } fn releasedir<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let ReleaseIn { fh, flags, .. } = ReleaseIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self.fs.releasedir( Context::from(in_header), in_header.nodeid.into(), flags, fh.into(), ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn fsyncdir<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let FsyncIn { fh, fsync_flags, .. } = FsyncIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let datasync = fsync_flags & 0x1 != 0; match self.fs.fsyncdir( Context::from(in_header), in_header.nodeid.into(), datasync, fh.into(), ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn getlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> { if let Err(e) = self.fs.getlk() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn setlk<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> { if let Err(e) = self.fs.setlk() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn setlkw<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> { if let Err(e) = self.fs.setlkw() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn access<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let AccessIn { mask, .. } = AccessIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self .fs .access(Context::from(in_header), in_header.nodeid.into(), mask) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn create<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let CreateIn { flags, mode, umask, .. } = CreateIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let buflen = (in_header.len as usize) .checked_sub(size_of::<InHeader>()) .and_then(|l| l.checked_sub(size_of::<CreateIn>())) .ok_or(Error::InvalidHeaderLength)?; let mut buf = Vec::with_capacity(buflen); buf.resize(buflen, 0); r.read_exact(&mut buf).map_err(Error::DecodeMessage)?; let mut iter = split_inclusive(&buf, |&c| c == b'\0'); let name = iter .next() .ok_or(Error::MissingParameter) .and_then(bytes_to_cstr)?; let security_ctx = iter.next().map(bytes_to_cstr).transpose()?; match self.fs.create( Context::from(in_header), in_header.nodeid.into(), name, mode, flags, umask, security_ctx, ) { Ok((entry, handle, opts)) => { let entry_out = EntryOut { nodeid: entry.inode, generation: entry.generation, entry_valid: entry.entry_timeout.as_secs(), attr_valid: entry.attr_timeout.as_secs(), entry_valid_nsec: entry.entry_timeout.subsec_nanos(), attr_valid_nsec: entry.attr_timeout.subsec_nanos(), attr: entry.attr.into(), }; let open_out = OpenOut { fh: handle.map(Into::into).unwrap_or(0), open_flags: opts.bits(), ..Default::default() }; // Kind of a hack to write both structs. reply_ok( Some(entry_out), Some(open_out.as_slice()), in_header.unique, w, ) } Err(e) => reply_error(e, in_header.unique, w), } } fn interrupt(&self, _in_header: InHeader) -> Result<usize> { Ok(0) } fn bmap<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> { if let Err(e) = self.fs.bmap() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn destroy(&self) -> Result<usize> { // No reply to this function. self.fs.destroy(); Ok(0) } fn ioctl<R: Reader, W: Writer>(&self, in_header: InHeader, mut r: R, w: W) -> Result<usize> { let IoctlIn { fh, flags, cmd, arg, in_size, out_size, } = IoctlIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let res = self.fs.ioctl( in_header.into(), fh.into(), IoctlFlags::from_bits_truncate(flags), cmd, arg, in_size, out_size, r, ); match res { Ok(reply) => match reply { IoctlReply::Retry { input, output } => { retry_ioctl(in_header.unique, input, output, w) } IoctlReply::Done(res) => finish_ioctl(in_header.unique, res, w), }, Err(e) => reply_error(e, in_header.unique, w), } } fn poll<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> { if let Err(e) = self.fs.poll() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn notify_reply<R: Reader, W: Writer>( &self, in_header: InHeader, mut _r: R, w: W, ) -> Result<usize> { if let Err(e) = self.fs.notify_reply() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn batch_forget<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let BatchForgetIn { count, .. } = BatchForgetIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; if let Some(size) = (count as usize).checked_mul(size_of::<ForgetOne>()) { if size > self.fs.max_buffer_size() as usize { return reply_error( io::Error::from_raw_os_error(libc::ENOMEM), in_header.unique, w, ); } } else { return reply_error( io::Error::from_raw_os_error(libc::EOVERFLOW), in_header.unique, w, ); } let mut requests = Vec::with_capacity(count as usize); for _ in 0..count { requests.push( ForgetOne::from_reader(&mut r) .map(|f| (f.nodeid.into(), f.nlookup)) .map_err(Error::DecodeMessage)?, ); } self.fs.batch_forget(Context::from(in_header), requests); // No reply for forget messages. Ok(0) } fn fallocate<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let FallocateIn { fh, offset, length, mode, .. } = FallocateIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self.fs.fallocate( Context::from(in_header), in_header.nodeid.into(), fh.into(), mode, offset, length, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } fn lseek<R: Reader, W: Writer>(&self, in_header: InHeader, mut _r: R, w: W) -> Result<usize> { if let Err(e) = self.fs.lseek() { reply_error(e, in_header.unique, w) } else { Ok(0) } } fn copy_file_range<R: Reader, W: Writer>( &self, in_header: InHeader, mut r: R, w: W, ) -> Result<usize> { let CopyFileRangeIn { fh_src, off_src, nodeid_dst, fh_dst, off_dst, len, flags, } = CopyFileRangeIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; match self.fs.copy_file_range( Context::from(in_header), in_header.nodeid.into(), fh_src.into(), off_src, nodeid_dst.into(), fh_dst.into(), off_dst, len, flags, ) { Ok(count) => { let out = WriteOut { size: count as u32, ..Default::default() }; reply_ok(Some(out), None, in_header.unique, w) } Err(e) => reply_error(e, in_header.unique, w), } } fn set_up_mapping<R, W, M>( &self, in_header: InHeader, mut r: R, w: W, mapper: M, ) -> Result<usize> where R: Reader, W: Writer, M: Mapper, { let SetUpMappingIn { fh, foffset, len, flags, moffset, } = SetUpMappingIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; let flags = SetUpMappingFlags::from_bits_truncate(flags); let mut prot = 0; if flags.contains(SetUpMappingFlags::READ) { prot |= libc::PROT_READ as u32; } if flags.contains(SetUpMappingFlags::WRITE) { prot |= libc::PROT_WRITE as u32; } let size = if let Ok(s) = len.try_into() { s } else { return reply_error( io::Error::from_raw_os_error(libc::EOVERFLOW), in_header.unique, w, ); }; match self.fs.set_up_mapping( Context::from(in_header), in_header.nodeid.into(), fh.into(), foffset, moffset, size, prot, mapper, ) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => { error!("set_up_mapping failed: {}", e); reply_error(e, in_header.unique, w) } } } fn remove_mapping<R, W, M>( &self, in_header: InHeader, mut r: R, w: W, mapper: M, ) -> Result<usize> where R: Reader, W: Writer, M: Mapper, { let RemoveMappingIn { count } = RemoveMappingIn::from_reader(&mut r).map_err(Error::DecodeMessage)?; // `FUSE_REMOVEMAPPING_MAX_ENTRY` is defined as // `PAGE_SIZE / sizeof(struct fuse_removemapping_one)` in /kernel/include/uapi/linux/fuse.h. let max_entry = pagesize() / std::mem::size_of::<RemoveMappingOne>(); if max_entry < count as usize { return reply_error( io::Error::from_raw_os_error(libc::EINVAL), in_header.unique, w, ); } let mut msgs = Vec::with_capacity(count as usize); for _ in 0..(count as usize) { msgs.push(RemoveMappingOne::from_reader(&mut r).map_err(Error::DecodeMessage)?); } match self.fs.remove_mapping(&msgs, mapper) { Ok(()) => reply_ok(None::<u8>, None, in_header.unique, w), Err(e) => reply_error(e, in_header.unique, w), } } } fn retry_ioctl<W: Writer>( unique: u64, input: Vec<IoctlIovec>, output: Vec<IoctlIovec>, mut w: W, ) -> Result<usize> { // We don't need to check for overflow here because if adding these 2 values caused an overflow // we would have run out of memory before reaching this point. if input.len() + output.len() > IOCTL_MAX_IOV { return Err(Error::TooManyIovecs( input.len() + output.len(), IOCTL_MAX_IOV, )); } let len = size_of::<OutHeader>() + size_of::<IoctlOut>() + (input.len() * size_of::<IoctlIovec>()) + (output.len() * size_of::<IoctlIovec>()); let header = OutHeader { len: len as u32, error: 0, unique, }; let out = IoctlOut { result: 0, flags: IoctlFlags::RETRY.bits(), in_iovs: input.len() as u32, out_iovs: output.len() as u32, }; let mut total_bytes = size_of::<OutHeader>() + size_of::<IoctlOut>(); w.write_all(header.as_slice()) .map_err(Error::EncodeMessage)?; w.write_all(out.as_slice()).map_err(Error::EncodeMessage)?; for i in input.into_iter().chain(output.into_iter()) { total_bytes += i.as_slice().len(); w.write_all(i.as_slice()).map_err(Error::EncodeMessage)?; } w.flush().map_err(Error::FlushMessage)?; debug_assert_eq!(len, total_bytes); Ok(len) } fn finish_ioctl<W: Writer>(unique: u64, res: io::Result<Vec<u8>>, w: W) -> Result<usize> { let (out, data) = match res { Ok(data) => { let out = IoctlOut { result: 0, ..Default::default() }; (out, Some(data)) } Err(e) => { let out = IoctlOut { result: -e.raw_os_error().unwrap_or(libc::EIO), ..Default::default() }; (out, None) } }; reply_ok(Some(out), data.as_ref().map(|d| &d[..]), unique, w) } fn reply_readdir<W: Writer>(len: usize, unique: u64, mut w: W) -> Result<usize> { let out = OutHeader { len: (size_of::<OutHeader>() + len) as u32, error: 0, unique, }; w.write_all(out.as_slice()).map_err(Error::EncodeMessage)?; w.flush().map_err(Error::FlushMessage)?; Ok(out.len as usize) } fn reply_ok<T: DataInit, W: Writer>( out: Option<T>, data: Option<&[u8]>, unique: u64, mut w: W, ) -> Result<usize> { let mut len = size_of::<OutHeader>(); if out.is_some() { len += size_of::<T>(); } if let Some(ref data) = data { len += data.len(); } let header = OutHeader { len: len as u32, error: 0, unique, }; let mut total_bytes = size_of::<OutHeader>(); w.write_all(header.as_slice()) .map_err(Error::EncodeMessage)?; if let Some(out) = out { total_bytes += out.as_slice().len(); w.write_all(out.as_slice()).map_err(Error::EncodeMessage)?; } if let Some(data) = data { total_bytes += data.len(); w.write_all(data).map_err(Error::EncodeMessage)?; } w.flush().map_err(Error::FlushMessage)?; debug_assert_eq!(len, total_bytes); Ok(len) } fn reply_error<W: Writer>(e: io::Error, unique: u64, mut w: W) -> Result<usize> { let header = OutHeader { len: size_of::<OutHeader>() as u32, error: -e.raw_os_error().unwrap_or(libc::EIO), unique, }; w.write_all(header.as_slice()) .map_err(Error::EncodeMessage)?; w.flush().map_err(Error::FlushMessage)?; Ok(header.len as usize) } fn bytes_to_cstr(buf: &[u8]) -> Result<&CStr> { // Convert to a `CStr` first so that we can drop the '\0' byte at the end // and make sure there are no interior '\0' bytes. CStr::from_bytes_with_nul(buf).map_err(Error::InvalidCString) } fn add_dirent<W: Writer>( cursor: &mut W, max: usize, d: DirEntry, entry: Option<Entry>, ) -> io::Result<usize> { // Strip the trailing '\0'. let name = d.name.to_bytes(); if name.len() > ::std::u32::MAX as usize { return Err(io::Error::from_raw_os_error(libc::EOVERFLOW)); } let dirent_len = size_of::<Dirent>() .checked_add(name.len()) .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?; // Directory entries must be padded to 8-byte alignment. If adding 7 causes // an overflow then this dirent cannot be properly padded. let padded_dirent_len = dirent_len .checked_add(7) .map(|l| l & !7) .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))?; let total_len = if entry.is_some() { padded_dirent_len .checked_add(size_of::<EntryOut>()) .ok_or_else(|| io::Error::from_raw_os_error(libc::EOVERFLOW))? } else { padded_dirent_len }; if max < total_len { Ok(0) } else { if let Some(entry) = entry { cursor.write_all(EntryOut::from(entry).as_slice())?; } let dirent = Dirent { ino: d.ino, off: d.offset, namelen: name.len() as u32, type_: d.type_, }; cursor.write_all(dirent.as_slice())?; cursor.write_all(name)?; // We know that `dirent_len` <= `padded_dirent_len` due to the check above // so there's no need for checked arithmetic. let padding = padded_dirent_len - dirent_len; if padding > 0 { cursor.write_all(&DIRENT_PADDING[..padding])?; } Ok(total_len) } } // TODO: Remove this once std::slice::SplitInclusive is stabilized. struct SplitInclusive<'a, T, F> { buf: &'a [T], pred: F, } impl<'a, T, F> Iterator for SplitInclusive<'a, T, F> where F: FnMut(&T) -> bool, { type Item = &'a [T]; fn next(&mut self) -> Option<Self::Item> { if self.buf.is_empty() { return None; } let split_pos = self .buf .iter() .position(&mut self.pred) .map(|p| p + 1) .unwrap_or(self.buf.len()); let (next, rem) = self.buf.split_at(split_pos); self.buf = rem; Some(next) } fn size_hint(&self) -> (usize, Option<usize>) { if self.buf.is_empty() { (0, Some(0)) } else { (1, Some(self.buf.len())) } } } fn split_inclusive<T, F>(buf: &[T], pred: F) -> SplitInclusive<T, F> where F: FnMut(&T) -> bool, { SplitInclusive { buf, pred } } #[cfg(test)] mod tests { use super::*; #[test] fn split_inclusive_basic() { let slice = [10, 40, 33, 20]; let mut iter = split_inclusive(&slice, |num| num % 3 == 0); assert_eq!(iter.next().unwrap(), &[10, 40, 33]); assert_eq!(iter.next().unwrap(), &[20]); assert!(iter.next().is_none()); } #[test] fn split_inclusive_last() { let slice = [3, 10, 40, 33]; let mut iter = split_inclusive(&slice, |num| num % 3 == 0); assert_eq!(iter.next().unwrap(), &[3]); assert_eq!(iter.next().unwrap(), &[10, 40, 33]); assert!(iter.next().is_none()); } #[test] fn split_inclusive_no_match() { let slice = [3, 10, 40, 33]; let mut iter = split_inclusive(&slice, |num| num % 7 == 0); assert_eq!(iter.next().unwrap(), &slice); assert!(iter.next().is_none()); } }
32.757688
100
0.50624
648b9c0d3a127da591844e4f38c8664cd7cf3c33
19,570
use std::rc::Rc; use std::cell::RefCell; // use std::drop::Drop; use gameboy::memory::MemoryUnit; use gameboy::interrupts::Interrupt; pub const DISPLAY_RESOLUTION_X: u32 = 160; pub const DISPLAY_RESOLUTION_Y: u32 = 144; #[derive(Debug, Clone, Copy)] enum Color { Lightest, Lighter, Darker, Darkest, // Disabled, // Different color for when LCD is disabled? } impl Color { // TODO: Load colors from a config file for different styles, like BGB fn to_rgb(self) -> (u8, u8, u8) { // GRAY // match self { // Color::Lightest => (232, 232, 232), // Color::Lighter => (160, 160, 160), // Color::Darker => (88, 88, 88), // Color::Darkest => (16, 16, 16), // } // GREEN match self { Color::Lightest => (224, 248, 208), Color::Lighter => (136, 192, 112), Color::Darker => (52, 104, 86), Color::Darkest => (8, 24, 32), } // RED // match self { // Color::Lightest => (255, 192, 192), // Color::Lighter => (255, 96, 96), // Color::Darker => (192, 0, 0), // Color::Darkest => (96, 0 ,0), // } // YELLOW // match self { // Color::Lightest => (248, 240, 120), // Color::Lighter => (176, 168, 72), // Color::Darker => (104, 104, 48), // Color::Darkest => (32, 32, 16), // } } } /// LCD Controller Mode #[derive(Debug, Clone, Copy)] enum Mode { HBlank = 0, // CPU can access OAM and VRAM VBlank = 1, // CPU can access OAM and VRAM OamRead = 2, // CPU can access VRAM LCDTransfer = 3, // CPU can't access either } impl Mode { fn cycles(self) -> i32 { // Assumes 1 MHz clock match self { Mode::HBlank => 194, // about 48.6 us Mode::VBlank => 4320, // about 1.08 ms Mode::OamRead => 76, // about 19 us Mode::LCDTransfer => 164, // about 41 us } } fn next_mode(self, scanline: u8) -> Mode { match self { Mode::OamRead => Mode::LCDTransfer, Mode::LCDTransfer => Mode::HBlank, Mode::VBlank => Mode::OamRead, Mode::HBlank => if scanline < 144 { Mode::OamRead } else { Mode::VBlank } } } } const LY_PORT_NUMBER: u8 = 0x44; const LCDC_PORT_NUMBER: u8 = 0x40; const STAT_PORT_NUMBER: u8 = 0x41; pub struct Gpu<'a> { memory: Rc<RefCell<MemoryUnit<'a>>>, /// Current controller mode mode: Mode, /// Number of cycles left until mode switch mode_switch_timer: i32, scanline: u8, clocks: i64, lcdc: u8, } impl<'a> Gpu<'a> { pub fn new(memory: Rc<RefCell<MemoryUnit<'a>>>) -> Gpu<'a> { Gpu { memory, mode: Mode::OamRead, mode_switch_timer: 0, scanline: 0, clocks: 0, lcdc: 0, } } pub fn reset(&mut self) { self.lcdc = 0; self.set_mode(Mode::OamRead); } pub fn step(&mut self, cycles: i32) -> bool { let mut drawframe = false; // The GPU switches between three modes per scanline: // - Mode 2 reads OAM and lasts 77-83 clocks // - Mode 3 transfers data to the LCD driver and lasts 169-175 clocks // - Mode 0 is the H-Blank and lasts 201-207 clocks. // The complete cycle lasts 456 clocks and occurs 144 times. // There is a 4560 clock V-Blank period during which the scanline // count continues to increase. let OAM_READ_CLOCKS = 80; // about 19 us let LCD_TRANSFER_CLOCKS = 171; // about 41 us let HBLANK_CLOCKS = 205; // about 48.6 us let SCANLINE_PERIOD = OAM_READ_CLOCKS + LCD_TRANSFER_CLOCKS + HBLANK_CLOCKS; // assert_eq!(456, OAM_READ_CLOCKS + LCD_TRANSFER_CLOCKS + HBLANK_CLOCKS); // let VBLANK_CLOCKS = 4560; let VBLANK_CLOCKS = 10 * SCANLINE_PERIOD; // TODO: Find a more elegant way to do this. // TODO: OAM and VRAM locking self.clocks += cycles as i64; // if self.clocks <= OAM_READ_CLOCKS { // self.mode = Mode::OamRead; // } // else if self.clocks <= OAM_READ_CLOCKS + LCD_TRANSFER_CLOCKS { // self.mode = Mode::LCDTransfer; // } // else if self.clocks <= OAM_READ_CLOCKS + LCD_TRANSFER_CLOCKS + HBLANK_CLOCKS { // self.mode // } if self.clocks >= SCANLINE_PERIOD { self.scanline += 1; self.clocks = 0; } if self.scanline >= 153 { self.scanline = 0; } self.memory.borrow_mut().set_io_read_value(LY_PORT_NUMBER, self.scanline); // println!(">>> scanline = {}", self.scanline); let mode = if self.scanline >= 144 { if self.scanline == 144 { drawframe = true; Interrupt::VBlank.request(&mut self.memory.borrow_mut()); } Mode::VBlank } else if self.clocks <= OAM_READ_CLOCKS { Mode::OamRead } else if self.clocks <= OAM_READ_CLOCKS + LCD_TRANSFER_CLOCKS { Mode::LCDTransfer } else { Mode::HBlank }; // TODO // let ly_port_write = self.memory.borrow_mut().check_for_io_write(LY_PORT_NUMBER); // if let Some(_) = ly_port_write { // self.scanline = 0; // } // self.mode_switch_timer -= cycles; // if self.mode_switch_timer <= 0 { // self.mode = self.mode.next_mode(self.scanline); // self.mode_switch_timer = self.mode.cycles(); // match self.mode { // // TODO: Scanline will not increment past 144. Is that a problem? // Mode::HBlank => { self.scanline += 1; }, // Mode::VBlank => { // Interrupt::VBlank.request(&mut self.memory.borrow_mut()); // vblank = true; // self.scanline = 0; // TODO: Should this wait until VBlank ends? // }, // _ => (), // } // } // TODO: Extract to function // TODO: Add other bit features // let lcd_status = 0 | (self.mode as u8); let lcd_status = 0 | (mode as u8); self.memory.borrow_mut().set_io_read_value(STAT_PORT_NUMBER, lcd_status); { let mut memory = self.memory.borrow_mut(); if let Some(lcdc) = memory.check_for_io_write(LCDC_PORT_NUMBER) { self.lcdc = lcdc; memory.set_io_read_value(LCDC_PORT_NUMBER, self.lcdc); } } drawframe } // fn cycles(self) -> i32 { // // Assumes 1 MHz clock // match self { // Mode::HBlank => 194, // about 48.6 us // Mode::VBlank => 4320, // about 1.08 ms // Mode::OamRead => 76, // about 19 us // Mode::LCDTransfer => 164, // about 41 us // } // } // fn next_mode(self, scanline: u8) -> Mode { // match self { // Mode::OamRead => Mode::LCDTransfer, // Mode::LCDTransfer => Mode::HBlank, // Mode::VBlank => Mode::OamRead, // Mode::HBlank => if scanline < 144 { // Mode::OamRead // } // else { // Mode::VBlank // } // } // } pub fn is_in_vblank(&self) -> bool { self.scanline >= DISPLAY_RESOLUTION_Y as u8 } fn set_mode(&mut self, mode: Mode) { // self.mode = mode; // self.mode_switch_timer = match mode { // Mode::HBlank => 777, // Mode:: // } } // This function is WAY TOO SLOW!!! Start by replacing the pixel data // Vec with a plain array. If that's still too slow, maybe optimize // getting background tile patterns? // // Or maybe it's not too slow... wtf pub fn get_pixel_data(&self) -> Box<[u8]> { // use std::time::{Instant, Duration}; // let start_time = Instant::now(); // The gameboy framebuffer is 256x256 pixels. Each tile is 8x8 pixels, // so there are 32x32 tiles in the framebuffer, or 1024 total. let background_tile_map1 = self.memory.borrow().read_range(0x9800, 1024); let background_tile_map2 = self.memory.borrow().read_range(0x9c00, 1024); fn decode_pattern(pattern_data: &[u8]) -> Box<[Color]> { let mut pixels = Vec::with_capacity(64); for i in 0..8 { let byte1 = pattern_data[2 * i]; let byte2 = pattern_data[2 * i + 1]; for bit in (0..8).rev() { let bit1 = byte1 & (1 << bit) != 0; let bit2 = byte2 & (1 << bit) != 0; let color = match (bit1, bit2) { (false, false) => Color::Lightest, (false, true) => Color::Lighter, (true, false) => Color::Darker, (true, true) => Color::Darkest, }; pixels.push(color); } } pixels.into_boxed_slice() } // Pattern Table 1 (at 0x8000-8fff, pattern numbers from 0 to 255) can be used for sprites, background, and window. // Pattern table 2 (at 0x8800-97ff, pattern numbers from -128 to 127) can be used for background and window only. let pattern_table1: Vec<_> = (0..256).map(|tile_number| { let address = 0x8000 + 16 * tile_number; let tile_pattern_data = self.memory.borrow().read_range(address, 16); decode_pattern(&tile_pattern_data) }).collect(); // TODO: Pattern Table 2 let pattern_table2: Vec<_> = (0..256).map(|tile_number| { let address = 0x8800 + 16 * tile_number; let tile_pattern_data = self.memory.borrow().read_range(address, 16); decode_pattern(&tile_pattern_data) }).collect(); #[derive(Debug)] struct SpriteData { y: u8, x: u8, pattern_number: u8, flags: u8, // TODO: Extract This } // TODO: This is where 160 bytes of OAM comes from. It's 40 4-byte blocks. // TODO: 8x16 sprite mode let oam: Vec<_> = (0..40).map(|sprite_number| { let address = 0xfe00 + 4 * sprite_number; let data = self.memory.borrow().read_range(address, 4); // println!(">>>>> read OAM data {:?}", data); SpriteData { y: data[0], x: data[1], pattern_number: data[2], flags: data[3] } }).collect(); // I think with the sprites, this function is slow again. Must look // for a more performant way to process the data? Should profile it to // be sure. // TODO: Window let mut pixel_data = Vec::new(); // let mut pixel_data = Vec::with_capacity((DISPLAY_RESOLUTION_Y * DISPLAY_RESOLUTION_X * 4) as usize); // This helps, but not enough... use a plain array for display_y in 0..DISPLAY_RESOLUTION_Y { for display_x in 0..DISPLAY_RESOLUTION_X { // TODO: Scrolling let tile_y = display_y % 8; let tile_x = display_x % 8; let tile_i = ((tile_y * 8) + tile_x) as usize; // Pixel index into tile pattern // FLOATING POINT DIVISION IS EXPENSIVE!!! Though I would imagine it optimizes (mod 8) to bitwise AND 0x07 // Note: This is per row of framebuffer, NOT per row // of actual display resolution. The gameboy's internal // frame buffer is 256x256; larger than the screen's // display. That's why you have the scroll registers. let tiles_per_row = 256 / 8; // TODO: Scrolling let tile_index_y = display_y / 8; let tile_index_x = display_x / 8; let tile_index = ((tile_index_y * tiles_per_row) + tile_index_x) as usize; // println!("({}, {}) tile_index y={}, x={}, index={}", display_x, display_y, tile_index_y, tile_index_x, tile_index); // TODO: This strategy re-loads the same tile data for every // pixel in every tile. If the same tile is reused, this waste // is even worse. It would probably be better to collect the // usage of each tile and blit them directly into the pixel // data on a pattern-by-pattern basis, rather than on a // pixel-by-pixel basis. // // Actually, it doesn't. All the patterns are loaded once // before this loop ever starts... // TODO: Alternate tile map selector // let tile_pattern_number = background_tile_map1[tile_index] as usize; let tile_pattern_number = if self.lcdc & 0x08 == 0x08 { background_tile_map2[tile_index] } else { background_tile_map1[tile_index] }; // TODO: Select this based on the LCDC control bit // let background_tile_patterns = &pattern_table1; let tile_pattern = if self.lcdc & 0x10 == 0x10 { &pattern_table1[tile_pattern_number as usize] } else { // Tile pattern numbers are stored in a signed format, // so even though the table starts at 0x8800, the data // corresponding to Pattern 0 is at 0x9000. // // Therefore we have to convert the signed 8-bit number // to an unsigned 8-bit number. // // This looks disgusting, but works. // TODO: Clean this up let tile_pattern_index = (((tile_pattern_number as i8) as i16) + 128) as usize; &pattern_table2[tile_pattern_index as usize] }; // let tile_pattern = &background_tile_patterns[tile_pattern_number]; let tile_color = tile_pattern[tile_i]; // This is always true. let sprite_patterns = &pattern_table1; // Now check to see if there is a sprite on the screen here. // If there are multiple sprites then the one with the left-most // X coordinate has priority. If they have the same X coordinate, // then they are ordered according to the table ordering starting // at 0xfe00, 0xfe04, etc. // // TODO: It would probably be better to check for a sprite first, // and only then compute the color of the tile underneath if there // is no sprite or if the sprite's pixel at that spot is transparent. // // Check to see if the BG layer is turned off let mut color = if self.lcdc & 0x01 == 0x01 { tile_color } else { Color::Lightest }; // TODO: Only 10 sprites can be displayed per line. I'm tempted // to add an option to disable this so it will show all the sprites. for sprite_data in &oam { let x_flip = sprite_data.flags & 0x20 == 0x20; let y_flip = sprite_data.flags & 0x40 == 0x40; // TODO: Other flags let sprite_pattern = &sprite_patterns[sprite_data.pattern_number as usize]; // if sprite_data.pattern_number == 0 { // continue; // } // println!("Sprite: {:?}", sprite_data); // println!("sprite pattern number: {}", sprite_data.pattern_number); // // TODO: ??? // let sprite_y = (display_y + sprite_data.y as u32) % 8; // let sprite_x = (display_x + sprite_data.x as u32) % 8; // let sprite_i = ((sprite_y * 8) + sprite_x) as usize; // Pixel index into tile pattern // Check to see if the current pixel is on this sprite // // TODO: What happens if the subtraction goes negative? Does it wrap around the screen? let sprite_position_x = sprite_data.x - 8; let sprite_position_y = sprite_data.y - 16; // I guess the position is on the right/bottom of the sprite? let in_x_window = (display_x as u8) >= sprite_position_x && (display_x as u8) < sprite_position_x + 8; let in_y_window = (display_y as u8) >= sprite_position_y && (display_y as u8) < sprite_position_y + 8; // TODO: 8x16, also this method sucks if in_x_window && in_y_window { let sprite_x = ((display_x as u8) - sprite_position_x) % 8; let sprite_y = ((display_y as u8) - sprite_position_y) % 8; // TODO: 8x16 let sprite_i = ((sprite_y * 8) + sprite_x) as usize; // Pixel index into sprite pattern let sprite_color = sprite_pattern[sprite_i]; // Check to see if the sprite layer is enabled if self.lcdc & 0x02 == 0x02 { // If the priority flag is 0 then the sprite covers // the background and window no matter what. If it's // 1 then it covers them only if the background/window // pixel is the Lightest (00) color. // // TODO: Can these be combined somehow? // if sprite_data.flags & 0x80 == 0 { color = sprite_color; } else if let Color::Lightest = tile_color { color = sprite_color; } } } } // Check to see if the LCD is off if self.lcdc & 0x80 != 0x80 { color = Color::Lightest; } let (red, green, blue) = color.to_rgb(); pixel_data.push(blue); pixel_data.push(green); pixel_data.push(red); pixel_data.push(0xff); // Alpha } } let result = pixel_data.into_boxed_slice(); // let end_time = Instant::now(); // let duration = end_time - start_time; // println!("get_pixel_data time: {:?} ms", (duration.subsec_nanos() as f32) / 1_000_000f32); result // pixel_data.into_boxed_slice() } }
31.014263
164
0.502402
08d6e6b48f1b02b87739cdfd3bff42d0524716ef
34,399
#![allow(dead_code, mutable_transmutes, non_camel_case_types, non_snake_case, non_upper_case_globals, unused_assignments, unused_mut)] use crate::core_memory::xmalloc; use harfbuzz_sys::{hb_font_funcs_t, hb_destroy_func_t, hb_font_t, hb_codepoint_t, hb_position_t, hb_bool_t, hb_font_destroy, hb_glyph_extents_t, hb_font_funcs_create, hb_font_funcs_set_glyph_h_advance_func, hb_font_funcs_set_glyph_v_advance_func, hb_font_funcs_set_glyph_h_origin_func, hb_font_funcs_set_glyph_v_origin_func, hb_font_funcs_set_glyph_contour_point_func, hb_font_funcs_set_glyph_extents_func, hb_blob_create, HB_MEMORY_MODE_WRITABLE, hb_face_t, hb_tag_t, hb_blob_t, hb_font_funcs_set_glyph_name_func, hb_face_create_for_tables, hb_face_set_index, hb_face_set_upem, hb_font_create, hb_face_destroy, hb_font_set_funcs, hb_font_set_scale, hb_font_set_ppem}; use freetype::freetype_sys::{FT_Byte, FT_UInt, FT_Long, FT_ULong, FT_Int32, FT_Pointer, FT_Error, FT_Fixed, FT_Library, FT_Face, FT_Glyph, FT_String, FT_Parameter, FT_Vector, FT_Sfnt_Tag, FT_BBox}; use freetype::freetype_sys::{ FT_Init_FreeType, FT_New_Memory_Face, FT_Attach_Stream, FT_Get_Char_Index, FT_Get_First_Char, FT_Get_Next_Char, FT_Load_Glyph, FT_Get_Glyph, FT_Get_Glyph_Name, FT_Glyph_Get_CBox, FT_Done_Glyph, FT_Get_Sfnt_Table, FT_Get_Kerning, FT_Get_Name_Index, FT_Done_Face, }; use crate::freetype_sys_patch::{FT_Face_GetCharVariantIndex, FT_Get_Advance, FT_Load_Sfnt_Table}; use crate::{ ttstub_input_close, ttstub_input_get_size, ttstub_input_read, ttstub_input_getc, ttstub_input_open, }; use bridge::TTInputFormat; #[cfg(not(target_os = "macos"))] mod imp {} #[cfg(target_os = "macos")] #[path = "xetex_font_info_coretext.rs"] mod imp; extern crate libc; extern "C" { #[no_mangle] fn malloc(_: libc::c_ulong) -> *mut libc::c_void; #[no_mangle] fn free(__ptr: *mut libc::c_void); #[no_mangle] fn strcpy(_: *mut libc::c_char, _: *const libc::c_char) -> *mut libc::c_char; #[no_mangle] fn strrchr(_: *const libc::c_char, _: libc::c_int) -> *mut libc::c_char; #[no_mangle] fn strlen(_: *const libc::c_char) -> libc::c_ulong; /* The internal, C/C++ interface: */ #[no_mangle] fn _tt_abort(format: *const libc::c_char, _: ...) -> !; /* tectonic/core-memory.h: basic dynamic memory helpers Copyright 2016-2018 the Tectonic Project Licensed under the MIT License. */ #[no_mangle] fn xstrdup(s: *const libc::c_char) -> *mut libc::c_char; #[no_mangle] fn tolower(_: libc::c_int) -> libc::c_int; // TODO: NOTE: this api doesn't included in harfbuzz_sys #[no_mangle] fn hb_font_funcs_set_glyph_h_kerning_func( ffuncs: *mut hb_font_funcs_t, func: hb_font_get_glyph_h_kerning_func_t, user_data: *mut libc::c_void, destroy: hb_destroy_func_t, ); #[no_mangle] fn hb_font_funcs_set_glyph_func( ffuncs: *mut hb_font_funcs_t, func: hb_font_get_glyph_func_t, user_data: *mut libc::c_void, destroy: hb_destroy_func_t, ); #[no_mangle] fn hb_font_funcs_set_glyph_v_kerning_func( ffuncs: *mut hb_font_funcs_t, func: hb_font_get_glyph_v_kerning_func_t, user_data: *mut libc::c_void, destroy: hb_destroy_func_t, ); #[no_mangle] fn Fix2D(f: Fixed) -> libc::c_double; } pub type size_t = usize; pub type int32_t = i32; pub type uint16_t = u16; pub type uint32_t = u32; pub type ssize_t = isize; use bridge::InputHandleWrapper; pub type UChar32 = int32_t; /* quasi-hack to get the primary input */ /* */ /* this #if 0 ... #endif clause is for documentation purposes */ pub type hb_font_get_glyph_kerning_func_t = Option< unsafe extern "C" fn( _: *mut hb_font_t, _: *mut libc::c_void, _: hb_codepoint_t, _: hb_codepoint_t, _: *mut libc::c_void, ) -> hb_position_t, >; pub type hb_font_get_glyph_h_kerning_func_t = hb_font_get_glyph_kerning_func_t; pub type hb_font_get_glyph_func_t = Option< unsafe extern "C" fn( _: *mut hb_font_t, _: *mut libc::c_void, _: hb_codepoint_t, _: hb_codepoint_t, _: *mut hb_codepoint_t, _: *mut libc::c_void, ) -> hb_bool_t, >; pub type hb_font_get_glyph_v_kerning_func_t = hb_font_get_glyph_kerning_func_t; pub type OTTag = uint32_t; pub type GlyphID = uint16_t; pub type Fixed = i32; #[derive(Copy, Clone)] #[repr(C)] pub struct GlyphBBox { pub xMin: libc::c_float, pub yMin: libc::c_float, pub xMax: libc::c_float, pub yMax: libc::c_float, } #[derive(Copy, Clone)] #[repr(C)] pub struct XeTeXFontInst { pub m_unitsPerEM: libc::c_ushort, pub m_pointSize: libc::c_float, pub m_ascent: libc::c_float, pub m_descent: libc::c_float, pub m_capHeight: libc::c_float, pub m_xHeight: libc::c_float, pub m_italicAngle: libc::c_float, pub m_vertical: bool, pub m_filename: *mut libc::c_char, pub m_index: uint32_t, pub m_ftFace: FT_Face, pub m_backingData: *mut FT_Byte, pub m_backingData2: *mut FT_Byte, pub m_hbFont: *mut hb_font_t, pub m_subdtor: Option<unsafe extern "C" fn(_: *mut XeTeXFontInst) -> ()>, } /* * file name: XeTeXFontInst.cpp * * created on: 2005-10-22 * created by: Jonathan Kew * * originally based on PortableFontInstance.cpp from ICU */ /* Return NAME with any leading path stripped off. This returns a pointer into NAME. For example, `basename ("/foo/bar.baz")' returns "bar.baz". */ unsafe extern "C" fn xbasename(mut name: *const libc::c_char) -> *const libc::c_char { let mut base: *const libc::c_char = name; let mut p: *const libc::c_char = 0 as *const libc::c_char; p = base; while *p != 0 { if *p as libc::c_int == '/' as i32 { base = p.offset(1) } p = p.offset(1) } return base; } #[no_mangle] pub static mut gFreeTypeLibrary: FT_Library = 0 as FT_Library; static mut hbFontFuncs: *mut hb_font_funcs_t = 0 as *mut hb_font_funcs_t; #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_base_ctor( mut self_0: *mut XeTeXFontInst, mut pathname: *const libc::c_char, mut index: libc::c_int, mut pointSize: libc::c_float, mut status: *mut libc::c_int, ) { (*self_0).m_unitsPerEM = 0i32 as libc::c_ushort; (*self_0).m_pointSize = pointSize; (*self_0).m_ascent = 0i32 as libc::c_float; (*self_0).m_descent = 0i32 as libc::c_float; (*self_0).m_capHeight = 0i32 as libc::c_float; (*self_0).m_xHeight = 0i32 as libc::c_float; (*self_0).m_italicAngle = 0i32 as libc::c_float; (*self_0).m_vertical = 0i32 != 0; (*self_0).m_filename = 0 as *mut libc::c_char; (*self_0).m_index = 0i32 as uint32_t; (*self_0).m_ftFace = 0 as FT_Face; (*self_0).m_backingData = 0 as *mut FT_Byte; (*self_0).m_backingData2 = 0 as *mut FT_Byte; (*self_0).m_hbFont = 0 as *mut hb_font_t; (*self_0).m_subdtor = None; if !pathname.is_null() { XeTeXFontInst_initialize(self_0, pathname, index, status); }; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_create( mut pathname: *const libc::c_char, mut index: libc::c_int, mut pointSize: libc::c_float, mut status: *mut libc::c_int, ) -> *mut XeTeXFontInst { let mut self_0: *mut XeTeXFontInst = malloc(::std::mem::size_of::<XeTeXFontInst>() as libc::c_ulong) as *mut XeTeXFontInst; XeTeXFontInst_base_ctor(self_0, pathname, index, pointSize, status); return self_0; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_delete(mut self_0: *mut XeTeXFontInst) { if self_0.is_null() { return; } if (*self_0).m_subdtor.is_some() { (*self_0).m_subdtor.expect("non-null function pointer")(self_0); } if !(*self_0).m_ftFace.is_null() { FT_Done_Face((*self_0).m_ftFace); (*self_0).m_ftFace = 0 as FT_Face } hb_font_destroy((*self_0).m_hbFont); free((*self_0).m_backingData as *mut libc::c_void); free((*self_0).m_backingData2 as *mut libc::c_void); free((*self_0).m_filename as *mut libc::c_void); free(self_0 as *mut libc::c_void); } /* HarfBuzz font functions */ unsafe extern "C" fn _get_glyph( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut ch: hb_codepoint_t, mut vs: hb_codepoint_t, mut gid: *mut hb_codepoint_t, mut _p: *mut libc::c_void, ) -> hb_bool_t { let mut face: FT_Face = font_data as FT_Face; *gid = 0i32 as hb_codepoint_t; if vs != 0 { *gid = FT_Face_GetCharVariantIndex(face, ch as FT_ULong, vs as FT_ULong) } if *gid == 0i32 as libc::c_uint { *gid = FT_Get_Char_Index(face, ch as FT_ULong) } return (*gid != 0i32 as libc::c_uint) as libc::c_int; } unsafe extern "C" fn _get_glyph_advance( mut face: FT_Face, mut gid: FT_UInt, mut vertical: bool, ) -> FT_Fixed { let mut error: FT_Error = 0; let mut advance: FT_Fixed = 0; let mut flags: libc::c_int = (1i64 << 0i32) as libc::c_int; if vertical { flags = (flags as libc::c_long | 1 << 4i32) as libc::c_int } error = FT_Get_Advance(face, gid, flags, &mut advance); if error != 0 { advance = 0i32 as FT_Fixed } /* FreeType's vertical metrics grows downward */ if vertical { advance = -advance } return advance; } unsafe extern "C" fn _get_glyph_h_advance( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut _p: *mut libc::c_void, ) -> hb_position_t { return _get_glyph_advance(font_data as FT_Face, gid, 0i32 != 0) as hb_position_t; } unsafe extern "C" fn _get_glyph_v_advance( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut _p: *mut libc::c_void, ) -> hb_position_t { return _get_glyph_advance(font_data as FT_Face, gid, 1i32 != 0) as hb_position_t; } unsafe extern "C" fn _get_glyph_h_origin( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut x: *mut hb_position_t, mut y: *mut hb_position_t, mut _p: *mut libc::c_void, ) -> hb_bool_t { // horizontal origin is (0, 0) return 1i32; } unsafe extern "C" fn _get_glyph_v_origin( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut x: *mut hb_position_t, mut y: *mut hb_position_t, mut _p: *mut libc::c_void, ) -> hb_bool_t { // vertical origin is (0, 0) for now return 1i32; } unsafe extern "C" fn _get_glyph_h_kerning( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid1: hb_codepoint_t, mut gid2: hb_codepoint_t, mut _p: *mut libc::c_void, ) -> hb_position_t { use freetype::freetype_sys::FT_KERNING_UNSCALED; let mut face: FT_Face = font_data as FT_Face; let mut error: FT_Error = 0; let mut kerning: FT_Vector = FT_Vector { x: 0, y: 0 }; let mut ret: hb_position_t = 0; error = FT_Get_Kerning( face, gid1, gid2, FT_KERNING_UNSCALED as libc::c_int as FT_UInt, &mut kerning, ); if error != 0 { ret = 0i32 } else { ret = kerning.x as hb_position_t } return ret; } unsafe extern "C" fn _get_glyph_v_kerning( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid1: hb_codepoint_t, mut gid2: hb_codepoint_t, mut _p: *mut libc::c_void, ) -> hb_position_t { /* FreeType does not support vertical kerning */ return 0i32; } unsafe extern "C" fn _get_glyph_extents( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut extents: *mut hb_glyph_extents_t, mut _p: *mut libc::c_void, ) -> hb_bool_t { let mut face: FT_Face = font_data as FT_Face; let mut error: FT_Error = 0; error = FT_Load_Glyph(face, gid, (1i64 << 0i32) as FT_Int32); if error == 0 { (*extents).x_bearing = (*(*face).glyph).metrics.horiBearingX as hb_position_t; (*extents).y_bearing = (*(*face).glyph).metrics.horiBearingY as hb_position_t; (*extents).width = (*(*face).glyph).metrics.width as hb_position_t; (*extents).height = -(*(*face).glyph).metrics.height as hb_position_t } return (error == 0) as libc::c_int; } unsafe extern "C" fn _get_glyph_contour_point( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut point_index: libc::c_uint, mut x: *mut hb_position_t, mut y: *mut hb_position_t, mut _p: *mut libc::c_void, ) -> hb_bool_t { use freetype::freetype_sys::FT_GLYPH_FORMAT_OUTLINE; let mut face: FT_Face = font_data as FT_Face; let mut error: FT_Error = 0; let mut ret: bool = 0i32 != 0; error = FT_Load_Glyph(face, gid, (1i64 << 0i32) as FT_Int32); if error == 0 { if (*(*face).glyph).format as libc::c_uint == FT_GLYPH_FORMAT_OUTLINE as libc::c_int as libc::c_uint { if point_index < (*(*face).glyph).outline.n_points as libc::c_uint { *x = (*(*(*face).glyph).outline.points.offset(point_index as isize)).x as hb_position_t; *y = (*(*(*face).glyph).outline.points.offset(point_index as isize)).y as hb_position_t; ret = 1i32 != 0 } } } return ret as hb_bool_t; } unsafe extern "C" fn _get_glyph_name( mut _hbf: *mut hb_font_t, mut font_data: *mut libc::c_void, mut gid: hb_codepoint_t, mut name: *mut libc::c_char, mut size: libc::c_uint, mut _p: *mut libc::c_void, ) -> hb_bool_t { let mut face: FT_Face = font_data as FT_Face; let mut ret: bool = 0i32 != 0; ret = FT_Get_Glyph_Name(face, gid, name as FT_Pointer, size) == 0; if ret as libc::c_int != 0 && (size != 0 && *name == 0) { ret = 0i32 != 0 } return ret as hb_bool_t; } unsafe extern "C" fn _get_font_funcs() -> *mut hb_font_funcs_t { static mut funcs: *mut hb_font_funcs_t = 0 as *const hb_font_funcs_t as *mut hb_font_funcs_t; if funcs.is_null() { funcs = hb_font_funcs_create() } hb_font_funcs_set_glyph_func( funcs, Some( _get_glyph as unsafe extern "C" fn( _: *mut hb_font_t, _: *mut libc::c_void, _: hb_codepoint_t, _: hb_codepoint_t, _: *mut hb_codepoint_t, _: *mut libc::c_void, ) -> hb_bool_t, ), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_h_advance_func( funcs, Some(_get_glyph_h_advance), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_v_advance_func( funcs, Some(_get_glyph_v_advance), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_h_origin_func( funcs, Some(_get_glyph_h_origin), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_v_origin_func( funcs, Some(_get_glyph_v_origin), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_h_kerning_func( funcs, Some( _get_glyph_h_kerning as unsafe extern "C" fn( _: *mut hb_font_t, _: *mut libc::c_void, _: hb_codepoint_t, _: hb_codepoint_t, _: *mut libc::c_void, ) -> hb_position_t, ), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_v_kerning_func( funcs, Some( _get_glyph_v_kerning as unsafe extern "C" fn( _: *mut hb_font_t, _: *mut libc::c_void, _: hb_codepoint_t, _: hb_codepoint_t, _: *mut libc::c_void, ) -> hb_position_t, ), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_extents_func( funcs, Some(_get_glyph_extents), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_contour_point_func( funcs, Some(_get_glyph_contour_point), 0 as *mut libc::c_void, None, ); hb_font_funcs_set_glyph_name_func( funcs, Some(_get_glyph_name), 0 as *mut libc::c_void, None, ); return funcs; } unsafe extern "C" fn _get_table( mut _hfc: *mut hb_face_t, mut tag: hb_tag_t, mut user_data: *mut libc::c_void, ) -> *mut hb_blob_t { let mut face: FT_Face = user_data as FT_Face; let mut length: FT_ULong = 0i32 as FT_ULong; let mut table: *mut FT_Byte = 0 as *mut FT_Byte; let mut error: FT_Error = 0; let mut blob: *mut hb_blob_t = 0 as *mut hb_blob_t; error = FT_Load_Sfnt_Table( face, tag as FT_ULong, 0i32 as FT_Long, 0 as *mut FT_Byte, &mut length, ); if error == 0 { table = xmalloc( length.wrapping_mul(::std::mem::size_of::<libc::c_char>() as libc::c_ulong) as _, ) as *mut FT_Byte; if !table.is_null() { error = FT_Load_Sfnt_Table(face, tag as FT_ULong, 0i32 as FT_Long, table, &mut length); if error == 0 { blob = hb_blob_create( table as *const libc::c_char, length as libc::c_uint, HB_MEMORY_MODE_WRITABLE, table as *mut libc::c_void, Some(free as unsafe extern "C" fn(_: *mut libc::c_void) -> ()), ) } else { free(table as *mut libc::c_void); } } } return blob; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_initialize( mut self_0: *mut XeTeXFontInst, mut pathname: *const libc::c_char, mut index: libc::c_int, mut status: *mut libc::c_int, ) { use freetype::freetype_sys::{FT_Open_Args}; use freetype::freetype_sys::{TT_OS2, TT_Postscript}; use crate::freetype_sys_patch::{FT_SFNT_POST, FT_SFNT_OS2}; let mut postTable: *mut TT_Postscript = 0 as *mut TT_Postscript; let mut os2Table: *mut TT_OS2 = 0 as *mut TT_OS2; let mut error: FT_Error = 0; let mut hbFace: *mut hb_face_t = 0 as *mut hb_face_t; if gFreeTypeLibrary.is_null() { error = FT_Init_FreeType(&mut gFreeTypeLibrary); if error != 0 { _tt_abort( b"FreeType initialization failed, error %d\x00" as *const u8 as *const libc::c_char, error, ); } } // Here we emulate some logic that was originally in find_native_font(); let mut handle = ttstub_input_open(pathname, TTInputFormat::OPENTYPE, 0) .or_else(|| ttstub_input_open(pathname, TTInputFormat::TRUETYPE, 0)) .or_else(|| ttstub_input_open(pathname, TTInputFormat::TYPE1, 0)); if handle.is_none() { *status = 1i32; return; } let mut handle = handle.unwrap(); let mut sz = ttstub_input_get_size(&mut handle); (*self_0).m_backingData = xmalloc(sz as _) as *mut FT_Byte; let mut r = ttstub_input_read(handle.0.as_ptr(), (*self_0).m_backingData as *mut libc::c_char, sz); if r < 0 || r != sz as i64 { _tt_abort(b"failed to read font file\x00" as *const u8 as *const libc::c_char); } ttstub_input_close(handle); error = FT_New_Memory_Face( gFreeTypeLibrary, (*self_0).m_backingData, sz as FT_Long, index as FT_Long, &mut (*self_0).m_ftFace, ); if (*(*self_0).m_ftFace).face_flags & 1 << 0i32 == 0 { *status = 1i32; return; } /* for non-sfnt-packaged fonts (presumably Type 1), see if there is an AFM file we can attach */ if index == 0i32 && (*(*self_0).m_ftFace).face_flags & 1 << 3i32 == 0 { // Tectonic: this code used to use kpse_find_file and FT_Attach_File // to try to find metrics for this font. Thanks to the existence of // FT_Attach_Stream we can emulate this behavior while going through // the Rust I/O layer. let mut afm: *mut libc::c_char = xstrdup(xbasename(pathname)); let mut p: *mut libc::c_char = strrchr(afm, '.' as i32); if !p.is_null() && strlen(p) == 4i32 as libc::c_ulong && tolower(*p.offset(1) as libc::c_int) == 'p' as i32 && tolower(*p.offset(2) as libc::c_int) == 'f' as i32 { strcpy(p, b".afm\x00" as *const u8 as *const libc::c_char); } let mut afm_handle = ttstub_input_open(afm, TTInputFormat::AFM, 0i32); free(afm as *mut libc::c_void); if let Some(mut afm_handle) = afm_handle { sz = ttstub_input_get_size(&mut afm_handle); (*self_0).m_backingData2 = xmalloc(sz as _) as *mut FT_Byte; r = ttstub_input_read( afm_handle.0.as_ptr(), (*self_0).m_backingData2 as *mut libc::c_char, sz, ); if r < 0 || r != sz as i64 { _tt_abort(b"failed to read AFM file\x00" as *const u8 as *const libc::c_char); } ttstub_input_close(afm_handle); let mut open_args: FT_Open_Args = std::mem::zeroed(); open_args.flags = 0x1i32 as FT_UInt; open_args.memory_base = (*self_0).m_backingData2; open_args.memory_size = sz as FT_Long; FT_Attach_Stream((*self_0).m_ftFace, &mut open_args); } } (*self_0).m_filename = xstrdup(pathname); (*self_0).m_index = index as uint32_t; (*self_0).m_unitsPerEM = (*(*self_0).m_ftFace).units_per_EM; (*self_0).m_ascent = XeTeXFontInst_unitsToPoints(self_0, (*(*self_0).m_ftFace).ascender as libc::c_float); (*self_0).m_descent = XeTeXFontInst_unitsToPoints(self_0, (*(*self_0).m_ftFace).descender as libc::c_float); postTable = XeTeXFontInst_getFontTableFT(self_0, FT_SFNT_POST) as *mut TT_Postscript; if !postTable.is_null() { (*self_0).m_italicAngle = Fix2D((*postTable).italicAngle as Fixed) as libc::c_float } os2Table = XeTeXFontInst_getFontTableFT(self_0, FT_SFNT_OS2) as *mut TT_OS2; if !os2Table.is_null() { (*self_0).m_capHeight = XeTeXFontInst_unitsToPoints(self_0, (*os2Table).sCapHeight as libc::c_float); (*self_0).m_xHeight = XeTeXFontInst_unitsToPoints(self_0, (*os2Table).sxHeight as libc::c_float) } // Set up HarfBuzz font hbFace = hb_face_create_for_tables( Some( _get_table as unsafe extern "C" fn( _: *mut hb_face_t, _: hb_tag_t, _: *mut libc::c_void, ) -> *mut hb_blob_t, ), (*self_0).m_ftFace as *mut libc::c_void, None, ); hb_face_set_index(hbFace, index as libc::c_uint); hb_face_set_upem(hbFace, (*self_0).m_unitsPerEM as libc::c_uint); (*self_0).m_hbFont = hb_font_create(hbFace); hb_face_destroy(hbFace); if hbFontFuncs.is_null() { hbFontFuncs = _get_font_funcs() } hb_font_set_funcs( (*self_0).m_hbFont, hbFontFuncs, (*self_0).m_ftFace as *mut libc::c_void, None, ); hb_font_set_scale( (*self_0).m_hbFont, (*self_0).m_unitsPerEM as libc::c_int, (*self_0).m_unitsPerEM as libc::c_int, ); // We don’t want device tables adjustments hb_font_set_ppem( (*self_0).m_hbFont, 0i32 as libc::c_uint, 0i32 as libc::c_uint, ); } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_setLayoutDirVertical( mut self_0: *mut XeTeXFontInst, mut vertical: bool, ) { (*self_0).m_vertical = vertical; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getFontTable( mut self_0: *const XeTeXFontInst, mut tag: OTTag, ) -> *mut libc::c_void { let mut tmpLength: FT_ULong = 0i32 as FT_ULong; let mut error: FT_Error = FT_Load_Sfnt_Table( (*self_0).m_ftFace, tag as FT_ULong, 0i32 as FT_Long, 0 as *mut FT_Byte, &mut tmpLength, ); if error != 0 { return 0 as *mut libc::c_void; } let mut table: *mut libc::c_void = xmalloc( tmpLength.wrapping_mul(::std::mem::size_of::<libc::c_char>() as libc::c_ulong) as _, ); if !table.is_null() { error = FT_Load_Sfnt_Table( (*self_0).m_ftFace, tag as FT_ULong, 0i32 as FT_Long, table as *mut FT_Byte, &mut tmpLength, ); if error != 0 { free(table); return 0 as *mut libc::c_void; } } return table; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getFontTableFT( mut self_0: *const XeTeXFontInst, mut tag: FT_Sfnt_Tag, ) -> *mut libc::c_void { return FT_Get_Sfnt_Table((*self_0).m_ftFace, tag); } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getGlyphBounds( mut self_0: *mut XeTeXFontInst, mut gid: GlyphID, mut bbox: *mut GlyphBBox, ) { use freetype::freetype_sys::FT_GLYPH_BBOX_UNSCALED; (*bbox).yMax = 0.0f64 as libc::c_float; (*bbox).xMax = (*bbox).yMax; (*bbox).yMin = (*bbox).xMax; (*bbox).xMin = (*bbox).yMin; let mut error: FT_Error = FT_Load_Glyph( (*self_0).m_ftFace, gid as FT_UInt, (1i64 << 0i32) as FT_Int32, ); if error != 0 { return; } let mut glyph: FT_Glyph = 0 as FT_Glyph; error = FT_Get_Glyph((*(*self_0).m_ftFace).glyph, &mut glyph); if error == 0i32 { let mut ft_bbox: FT_BBox = FT_BBox { xMin: 0, yMin: 0, xMax: 0, yMax: 0, }; FT_Glyph_Get_CBox( glyph, FT_GLYPH_BBOX_UNSCALED as libc::c_int as FT_UInt, &mut ft_bbox, ); (*bbox).xMin = XeTeXFontInst_unitsToPoints(self_0, ft_bbox.xMin as libc::c_float); (*bbox).yMin = XeTeXFontInst_unitsToPoints(self_0, ft_bbox.yMin as libc::c_float); (*bbox).xMax = XeTeXFontInst_unitsToPoints(self_0, ft_bbox.xMax as libc::c_float); (*bbox).yMax = XeTeXFontInst_unitsToPoints(self_0, ft_bbox.yMax as libc::c_float); FT_Done_Glyph(glyph); }; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_mapCharToGlyph( mut self_0: *const XeTeXFontInst, mut ch: UChar32, ) -> GlyphID { return FT_Get_Char_Index((*self_0).m_ftFace, ch as FT_ULong) as GlyphID; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getNumGlyphs(mut self_0: *const XeTeXFontInst) -> uint16_t { return (*(*self_0).m_ftFace).num_glyphs as uint16_t; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getGlyphWidth( mut self_0: *mut XeTeXFontInst, mut gid: GlyphID, ) -> libc::c_float { return XeTeXFontInst_unitsToPoints( self_0, _get_glyph_advance((*self_0).m_ftFace, gid as FT_UInt, 0i32 != 0) as libc::c_float, ); } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getGlyphHeightDepth( mut self_0: *mut XeTeXFontInst, mut gid: GlyphID, mut ht: *mut libc::c_float, mut dp: *mut libc::c_float, ) { let mut bbox: GlyphBBox = GlyphBBox { xMin: 0., yMin: 0., xMax: 0., yMax: 0., }; XeTeXFontInst_getGlyphBounds(self_0, gid, &mut bbox); if !ht.is_null() { *ht = bbox.yMax } if !dp.is_null() { *dp = -bbox.yMin }; } /* ***************************************************************************\ Part of the XeTeX typesetting system Copyright (c) 1994-2008 by SIL International Copyright (c) 2009, 2011 by Jonathan Kew SIL Author(s): Jonathan Kew Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Except as contained in this notice, the name of the copyright holders shall not be used in advertising or otherwise to promote the sale, use or other dealings in this Software without prior written authorization from the copyright holders. \****************************************************************************/ /* * file name: XeTeXFontInst.h * * created on: 2005-10-22 * created by: Jonathan Kew * * originally based on PortableFontInstance.h from ICU */ // create specific subclasses for each supported platform // false = horizontal, true = vertical // font filename // face index /* class XeTeXFontInst { protected: public: XeTeXFontInst(float pointSize, int &status); XeTeXFontInst(const char* filename, int index, float pointSize, int &status); virtual ~XeTeXFontInst(); void initialize(const char* pathname, int index, int &status); void *getFontTable(OTTag tableTag) const; void *getFontTable(FT_Sfnt_Tag tableTag) const; hb_font_t *getHbFont() const { return m_hbFont; } void setLayoutDirVertical(bool vertical); bool getLayoutDirVertical() const { return m_vertical; } GlyphID mapCharToGlyph(UChar32 ch) const; GlyphID mapGlyphToIndex(const char* glyphName) const; uint16_t getNumGlyphs() const; void getGlyphBounds(GlyphID glyph, GlyphBBox* bbox); float getGlyphWidth(GlyphID glyph); void getGlyphHeightDepth(GlyphID glyph, float *ht, float* dp); void getGlyphSidebearings(GlyphID glyph, float* lsb, float* rsb); float getGlyphItalCorr(GlyphID glyph); const char* getGlyphName(GlyphID gid, int& nameLen); UChar32 getFirstCharCode(); UChar32 getLastCharCode(); float unitsToPoints(float units) const { return (units * m_pointSize) / (float) m_unitsPerEM; } float pointsToUnits(float points) const { return (points * (float) m_unitsPerEM) / m_pointSize; } }; */ #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getGlyphSidebearings( mut self_0: *mut XeTeXFontInst, mut gid: GlyphID, mut lsb: *mut libc::c_float, mut rsb: *mut libc::c_float, ) { let mut width: libc::c_float = XeTeXFontInst_getGlyphWidth(self_0, gid); let mut bbox: GlyphBBox = GlyphBBox { xMin: 0., yMin: 0., xMax: 0., yMax: 0., }; XeTeXFontInst_getGlyphBounds(self_0, gid, &mut bbox); if !lsb.is_null() { *lsb = bbox.xMin } if !rsb.is_null() { *rsb = width - bbox.xMax }; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getGlyphItalCorr( mut self_0: *mut XeTeXFontInst, mut gid: GlyphID, ) -> libc::c_float { let mut rval: libc::c_float = 0.0f64 as libc::c_float; let mut width: libc::c_float = XeTeXFontInst_getGlyphWidth(self_0, gid); let mut bbox: GlyphBBox = GlyphBBox { xMin: 0., yMin: 0., xMax: 0., yMax: 0., }; XeTeXFontInst_getGlyphBounds(self_0, gid, &mut bbox); if bbox.xMax > width { rval = bbox.xMax - width } return rval; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_mapGlyphToIndex( mut self_0: *const XeTeXFontInst, mut glyphName: *const libc::c_char, ) -> GlyphID { return FT_Get_Name_Index((*self_0).m_ftFace, glyphName as *mut libc::c_char) as GlyphID; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getGlyphName( mut self_0: *mut XeTeXFontInst, mut gid: GlyphID, mut nameLen: *mut libc::c_int, ) -> *const libc::c_char { if (*(*self_0).m_ftFace).face_flags & 1 << 9i32 != 0 { static mut buffer: [libc::c_char; 256] = [0; 256]; FT_Get_Glyph_Name( (*self_0).m_ftFace, gid as FT_UInt, buffer.as_mut_ptr() as FT_Pointer, 256i32 as FT_UInt, ); *nameLen = strlen(buffer.as_mut_ptr()) as libc::c_int; return &mut *buffer.as_mut_ptr().offset(0) as *mut libc::c_char; } else { *nameLen = 0i32; return 0 as *const libc::c_char; }; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getFirstCharCode(mut self_0: *mut XeTeXFontInst) -> UChar32 { let mut gindex: FT_UInt = 0; return FT_Get_First_Char((*self_0).m_ftFace, &mut gindex) as UChar32; } #[no_mangle] pub unsafe extern "C" fn XeTeXFontInst_getLastCharCode(mut self_0: *mut XeTeXFontInst) -> UChar32 { let mut gindex: FT_UInt = 0; let mut ch: UChar32 = FT_Get_First_Char((*self_0).m_ftFace, &mut gindex) as UChar32; let mut prev: UChar32 = ch; while gindex != 0i32 as libc::c_uint { prev = ch; ch = FT_Get_Next_Char((*self_0).m_ftFace, ch as FT_ULong, &mut gindex) as UChar32 } return prev; } #[no_mangle] //#[inline] pub unsafe extern "C" fn XeTeXFontInst_getHbFont(self_0: *const XeTeXFontInst) -> *mut hb_font_t { (*self_0).m_hbFont } #[no_mangle] //#[inline] pub unsafe extern "C" fn XeTeXFontInst_unitsToPoints( self_0: *const XeTeXFontInst, units: libc::c_float, ) -> libc::c_float { (units * (*self_0).m_pointSize) / ((*self_0).m_unitsPerEM as libc::c_float) } #[no_mangle] //#[inline] pub unsafe extern "C" fn XeTeXFontInst_pointsToUnits( self_0: *const XeTeXFontInst, points: libc::c_float, ) -> libc::c_float { (points * ((*self_0).m_unitsPerEM as libc::c_float)) / (*self_0).m_pointSize }
33.494645
108
0.628826
7a8b582ebe25b23aaa53983d928033f027d1e109
5,680
use crate::handlers::{ContentHandler, RedirectHandler}; use crate::settings::UrlObject; use rocket::http::ContentType; use rocket::Route; use serde::{Deserialize, Serialize}; macro_rules! static_file { ($name: literal, $type: ident) => { ContentHandler::bytes( ContentType::$type, include_bytes!(concat!("../swagger-ui/", $name)), ) .into_route(concat!("/", $name)) }; } /// Used to control the way models are displayed by default. #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub enum DefaultModelRendering { /// Expand the `example` section. Example, /// Expand the `model` section. Model, } /// Used to control the default expansion setting for the operations and tags. #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub enum DocExpansion { /// Expands only the tags. List, /// Expands the tags and operations Full, /// Expands nothing None, } /// Used to enable, disable and preconfigure filtering #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(untagged)] pub enum Filter { /// Use this variant to enable or disable filtering. Bool(bool), /// Use this variant to enable filtering, and preconfigure a filter. Str(String), } fn is_zero(num: &u32) -> bool { *num == 0 } /// A struct containing information about where and how the `openapi.json` files are served. #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub struct SwaggerUIConfig { /// The url to a single `openapi.json` file that is showed when the web ui is first opened. #[serde(default, skip_serializing_if = "String::is_empty")] pub url: String, /// A list of named urls that contain all the `openapi.json` files that you want to display in /// your web ui. If this field is populated, the `url` field is not used. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub urls: Vec<UrlObject>, // display options: /// If set to true, enables deep linking for tags and operations. See the /// [Deep Linking documentation](https://github.com/swagger-api/swagger-ui/blob/master/docs/usage/deep-linking.md) /// for more information. /// Default: `false`. pub deep_linking: bool, /// Controls the display of operationId in operations list. /// Default: `false`. pub display_operation_id: bool, /// The default expansion depth for models (set to -1 completely hide the models). /// Default: `1`. pub default_models_expand_depth: i32, /// The default expansion depth for the model on the model-example section. /// Default: `1`. pub default_model_expand_depth: i32, /// Controls how the model is shown when the API is first rendered. (The user can always switch /// the rendering for a given model by clicking the 'Model' and 'Example Value' links.) /// Default: `DefaultModelRendering::Example`. pub default_model_rendering: DefaultModelRendering, /// Controls the display of the request duration (in milliseconds) for "Try it out" requests. /// Default: `false`. pub display_request_duration: bool, /// Controls the default expansion setting for the operations and tags. /// Default: `DocExpansion::List`. pub doc_expansion: DocExpansion, /// If set, enables filtering. The top bar will show an edit box that you can use to filter the /// tagged operations that are shown. Filtering is case sensitive matching the filter expression /// anywhere inside the tag. /// Default: `Filter(false)`. pub filter: Filter, /// If set, limits the number of tagged operations displayed to at most this many. The default /// is to show all operations. /// Default: `None` (displays all tagged operations). #[serde(default, skip_serializing_if = "is_zero")] pub max_displayed_tags: u32, /// Controls the display of vendor extension (`x-`) fields and values for Operations, /// Parameters, and Schema. /// Default: `false`. pub show_extensions: bool, /// Controls the display of extensions (`pattern`, `maxLength`, `minLength`, `maximum`, /// `minimum`) fields and values for Parameters. /// Default: `false`. pub show_common_extensions: bool, } impl Default for SwaggerUIConfig { fn default() -> Self { Self { url: String::new(), urls: vec![], deep_linking: false, display_operation_id: false, default_model_expand_depth: 1, default_model_rendering: DefaultModelRendering::Example, default_models_expand_depth: 1, display_request_duration: false, doc_expansion: DocExpansion::List, filter: Filter::Bool(false), max_displayed_tags: 0, show_extensions: false, show_common_extensions: false, } } } /// Transform the provided `SwaggerUIConfig` into a list of `Route`s that serve the swagger web ui. #[must_use] pub fn make_swagger_ui(config: &SwaggerUIConfig) -> impl Into<Vec<Route>> { let config_handler = ContentHandler::json(config); vec![ RedirectHandler::to("index.html").into_route("/"), // Add custom config file config_handler.into_route("/swagger-ui-config.json"), // Add other static files static_file!("index.html", HTML), static_file!("oauth2-redirect.html", HTML), static_file!("swagger-ui-standalone-preset.js", JavaScript), static_file!("swagger-ui-bundle.js", JavaScript), static_file!("swagger-ui.css", CSS), ] }
39.172414
118
0.666197
8a53e5399413478272076211d824a7b320e57185
21,555
pub type c_char = u8; pub type wchar_t = u32; pub type greg_t = i32; pub type mcontext_t = sigcontext; s! { pub struct sigcontext { pub trap_no: ::c_ulong, pub error_code: ::c_ulong, pub oldmask: ::c_ulong, pub arm_r0: ::c_ulong, pub arm_r1: ::c_ulong, pub arm_r2: ::c_ulong, pub arm_r3: ::c_ulong, pub arm_r4: ::c_ulong, pub arm_r5: ::c_ulong, pub arm_r6: ::c_ulong, pub arm_r7: ::c_ulong, pub arm_r8: ::c_ulong, pub arm_r9: ::c_ulong, pub arm_r10: ::c_ulong, pub arm_fp: ::c_ulong, pub arm_ip: ::c_ulong, pub arm_sp: ::c_ulong, pub arm_lr: ::c_ulong, pub arm_pc: ::c_ulong, pub arm_cpsr: ::c_ulong, pub fault_address: ::c_ulong, } } cfg_if! { if #[cfg(libc_union)] { s_no_extra_traits! { pub struct __c_anonymous_uc_sigmask_with_padding { pub uc_sigmask: ::sigset_t, /* Android has a wrong (smaller) sigset_t on x86. */ __padding_rt_sigset: u32, } pub union __c_anonymous_uc_sigmask { uc_sigmask: __c_anonymous_uc_sigmask_with_padding, uc_sigmask64: ::sigset64_t, } pub struct ucontext_t { pub uc_flags: ::c_ulong, pub uc_link: *mut ucontext_t, pub uc_stack: ::stack_t, pub uc_mcontext: mcontext_t, pub uc_sigmask__c_anonymous_union: __c_anonymous_uc_sigmask, /* The kernel adds extra padding after uc_sigmask to match * glibc sigset_t on ARM. */ __padding: [c_char; 120], __align: [::c_longlong; 0], uc_regspace: [::c_ulong; 128], } } cfg_if! { if #[cfg(feature = "extra_traits")] { impl PartialEq for __c_anonymous_uc_sigmask_with_padding { fn eq( &self, other: &__c_anonymous_uc_sigmask_with_padding ) -> bool { self.uc_sigmask == other.uc_sigmask // Ignore padding } } impl Eq for __c_anonymous_uc_sigmask_with_padding {} impl ::fmt::Debug for __c_anonymous_uc_sigmask_with_padding { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("uc_sigmask_with_padding") .field("uc_sigmask_with_padding", &self.uc_sigmask) // Ignore padding .finish() } } impl ::hash::Hash for __c_anonymous_uc_sigmask_with_padding { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.uc_sigmask.hash(state) // Ignore padding } } impl PartialEq for __c_anonymous_uc_sigmask { fn eq(&self, other: &__c_anonymous_uc_sigmask) -> bool { unsafe { self.uc_sigmask == other.uc_sigmask } } } impl Eq for __c_anonymous_uc_sigmask {} impl ::fmt::Debug for __c_anonymous_uc_sigmask { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("uc_sigmask") .field("uc_sigmask", unsafe { &self.uc_sigmask }) .finish() } } impl ::hash::Hash for __c_anonymous_uc_sigmask { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { unsafe { self.uc_sigmask.hash(state) } } } impl PartialEq for ucontext_t { fn eq(&self, other: &Self) -> bool { self.uc_flags == other.uc_flags && self.uc_link == other.uc_link && self.uc_stack == other.uc_stack && self.uc_mcontext == other.uc_mcontext && self.uc_sigmask__c_anonymous_union == other.uc_sigmask__c_anonymous_union && &self.uc_regspace[..] == &other.uc_regspace[..] // Ignore padding field } } impl Eq for ucontext_t {} impl ::fmt::Debug for ucontext_t { fn fmt(&self, f: &mut ::fmt::Formatter) -> ::fmt::Result { f.debug_struct("ucontext_t") .field("uc_flags", &self.uc_flags) .field("uc_link", &self.uc_link) .field("uc_stack", &self.uc_stack) .field("uc_mcontext", &self.uc_mcontext) .field( "uc_sigmask__c_anonymous_union", &self.uc_sigmask__c_anonymous_union ) .field("uc_regspace", &&self.uc_regspace[..]) // Ignore padding field .finish() } } impl ::hash::Hash for ucontext_t { fn hash<H: ::hash::Hasher>(&self, state: &mut H) { self.uc_flags.hash(state); self.uc_link.hash(state); self.uc_stack.hash(state); self.uc_mcontext.hash(state); self.uc_sigmask__c_anonymous_union.hash(state); &self.uc_regspace[..].hash(state); // Ignore padding field } } } } } } pub const O_DIRECT: ::c_int = 0x10000; pub const O_DIRECTORY: ::c_int = 0x4000; pub const O_NOFOLLOW: ::c_int = 0x8000; pub const O_LARGEFILE: ::c_int = 0o400000; pub const SYS_restart_syscall: ::c_long = 0; pub const SYS_exit: ::c_long = 1; pub const SYS_fork: ::c_long = 2; pub const SYS_read: ::c_long = 3; pub const SYS_write: ::c_long = 4; pub const SYS_open: ::c_long = 5; pub const SYS_close: ::c_long = 6; pub const SYS_creat: ::c_long = 8; pub const SYS_link: ::c_long = 9; pub const SYS_unlink: ::c_long = 10; pub const SYS_execve: ::c_long = 11; pub const SYS_chdir: ::c_long = 12; pub const SYS_mknod: ::c_long = 14; pub const SYS_chmod: ::c_long = 15; pub const SYS_lchown: ::c_long = 16; pub const SYS_lseek: ::c_long = 19; pub const SYS_getpid: ::c_long = 20; pub const SYS_mount: ::c_long = 21; pub const SYS_setuid: ::c_long = 23; pub const SYS_getuid: ::c_long = 24; pub const SYS_ptrace: ::c_long = 26; pub const SYS_pause: ::c_long = 29; pub const SYS_access: ::c_long = 33; pub const SYS_nice: ::c_long = 34; pub const SYS_sync: ::c_long = 36; pub const SYS_kill: ::c_long = 37; pub const SYS_rename: ::c_long = 38; pub const SYS_mkdir: ::c_long = 39; pub const SYS_rmdir: ::c_long = 40; pub const SYS_dup: ::c_long = 41; pub const SYS_pipe: ::c_long = 42; pub const SYS_times: ::c_long = 43; pub const SYS_brk: ::c_long = 45; pub const SYS_setgid: ::c_long = 46; pub const SYS_getgid: ::c_long = 47; pub const SYS_geteuid: ::c_long = 49; pub const SYS_getegid: ::c_long = 50; pub const SYS_acct: ::c_long = 51; pub const SYS_umount2: ::c_long = 52; pub const SYS_ioctl: ::c_long = 54; pub const SYS_fcntl: ::c_long = 55; pub const SYS_setpgid: ::c_long = 57; pub const SYS_umask: ::c_long = 60; pub const SYS_chroot: ::c_long = 61; pub const SYS_ustat: ::c_long = 62; pub const SYS_dup2: ::c_long = 63; pub const SYS_getppid: ::c_long = 64; pub const SYS_getpgrp: ::c_long = 65; pub const SYS_setsid: ::c_long = 66; pub const SYS_sigaction: ::c_long = 67; pub const SYS_setreuid: ::c_long = 70; pub const SYS_setregid: ::c_long = 71; pub const SYS_sigsuspend: ::c_long = 72; pub const SYS_sigpending: ::c_long = 73; pub const SYS_sethostname: ::c_long = 74; pub const SYS_setrlimit: ::c_long = 75; pub const SYS_getrusage: ::c_long = 77; pub const SYS_gettimeofday: ::c_long = 78; pub const SYS_settimeofday: ::c_long = 79; pub const SYS_getgroups: ::c_long = 80; pub const SYS_setgroups: ::c_long = 81; pub const SYS_symlink: ::c_long = 83; pub const SYS_readlink: ::c_long = 85; pub const SYS_uselib: ::c_long = 86; pub const SYS_swapon: ::c_long = 87; pub const SYS_reboot: ::c_long = 88; pub const SYS_munmap: ::c_long = 91; pub const SYS_truncate: ::c_long = 92; pub const SYS_ftruncate: ::c_long = 93; pub const SYS_fchmod: ::c_long = 94; pub const SYS_fchown: ::c_long = 95; pub const SYS_getpriority: ::c_long = 96; pub const SYS_setpriority: ::c_long = 97; pub const SYS_statfs: ::c_long = 99; pub const SYS_fstatfs: ::c_long = 100; pub const SYS_syslog: ::c_long = 103; pub const SYS_setitimer: ::c_long = 104; pub const SYS_getitimer: ::c_long = 105; pub const SYS_stat: ::c_long = 106; pub const SYS_lstat: ::c_long = 107; pub const SYS_fstat: ::c_long = 108; pub const SYS_vhangup: ::c_long = 111; pub const SYS_wait4: ::c_long = 114; pub const SYS_swapoff: ::c_long = 115; pub const SYS_sysinfo: ::c_long = 116; pub const SYS_fsync: ::c_long = 118; pub const SYS_sigreturn: ::c_long = 119; pub const SYS_clone: ::c_long = 120; pub const SYS_setdomainname: ::c_long = 121; pub const SYS_uname: ::c_long = 122; pub const SYS_adjtimex: ::c_long = 124; pub const SYS_mprotect: ::c_long = 125; pub const SYS_sigprocmask: ::c_long = 126; pub const SYS_init_module: ::c_long = 128; pub const SYS_delete_module: ::c_long = 129; pub const SYS_quotactl: ::c_long = 131; pub const SYS_getpgid: ::c_long = 132; pub const SYS_fchdir: ::c_long = 133; pub const SYS_bdflush: ::c_long = 134; pub const SYS_sysfs: ::c_long = 135; pub const SYS_personality: ::c_long = 136; pub const SYS_setfsuid: ::c_long = 138; pub const SYS_setfsgid: ::c_long = 139; pub const SYS_getdents: ::c_long = 141; pub const SYS_flock: ::c_long = 143; pub const SYS_msync: ::c_long = 144; pub const SYS_readv: ::c_long = 145; pub const SYS_writev: ::c_long = 146; pub const SYS_getsid: ::c_long = 147; pub const SYS_fdatasync: ::c_long = 148; pub const SYS_mlock: ::c_long = 150; pub const SYS_munlock: ::c_long = 151; pub const SYS_mlockall: ::c_long = 152; pub const SYS_munlockall: ::c_long = 153; pub const SYS_sched_setparam: ::c_long = 154; pub const SYS_sched_getparam: ::c_long = 155; pub const SYS_sched_setscheduler: ::c_long = 156; pub const SYS_sched_getscheduler: ::c_long = 157; pub const SYS_sched_yield: ::c_long = 158; pub const SYS_sched_get_priority_max: ::c_long = 159; pub const SYS_sched_get_priority_min: ::c_long = 160; pub const SYS_sched_rr_get_interval: ::c_long = 161; pub const SYS_nanosleep: ::c_long = 162; pub const SYS_mremap: ::c_long = 163; pub const SYS_setresuid: ::c_long = 164; pub const SYS_getresuid: ::c_long = 165; pub const SYS_poll: ::c_long = 168; pub const SYS_nfsservctl: ::c_long = 169; pub const SYS_setresgid: ::c_long = 170; pub const SYS_getresgid: ::c_long = 171; pub const SYS_prctl: ::c_long = 172; pub const SYS_rt_sigreturn: ::c_long = 173; pub const SYS_rt_sigaction: ::c_long = 174; pub const SYS_rt_sigprocmask: ::c_long = 175; pub const SYS_rt_sigpending: ::c_long = 176; pub const SYS_rt_sigtimedwait: ::c_long = 177; pub const SYS_rt_sigqueueinfo: ::c_long = 178; pub const SYS_rt_sigsuspend: ::c_long = 179; pub const SYS_pread64: ::c_long = 180; pub const SYS_pwrite64: ::c_long = 181; pub const SYS_chown: ::c_long = 182; pub const SYS_getcwd: ::c_long = 183; pub const SYS_capget: ::c_long = 184; pub const SYS_capset: ::c_long = 185; pub const SYS_sigaltstack: ::c_long = 186; pub const SYS_sendfile: ::c_long = 187; pub const SYS_vfork: ::c_long = 190; pub const SYS_ugetrlimit: ::c_long = 191; pub const SYS_mmap2: ::c_long = 192; pub const SYS_truncate64: ::c_long = 193; pub const SYS_ftruncate64: ::c_long = 194; pub const SYS_stat64: ::c_long = 195; pub const SYS_lstat64: ::c_long = 196; pub const SYS_fstat64: ::c_long = 197; pub const SYS_lchown32: ::c_long = 198; pub const SYS_getuid32: ::c_long = 199; pub const SYS_getgid32: ::c_long = 200; pub const SYS_geteuid32: ::c_long = 201; pub const SYS_getegid32: ::c_long = 202; pub const SYS_setreuid32: ::c_long = 203; pub const SYS_setregid32: ::c_long = 204; pub const SYS_getgroups32: ::c_long = 205; pub const SYS_setgroups32: ::c_long = 206; pub const SYS_fchown32: ::c_long = 207; pub const SYS_setresuid32: ::c_long = 208; pub const SYS_getresuid32: ::c_long = 209; pub const SYS_setresgid32: ::c_long = 210; pub const SYS_getresgid32: ::c_long = 211; pub const SYS_chown32: ::c_long = 212; pub const SYS_setuid32: ::c_long = 213; pub const SYS_setgid32: ::c_long = 214; pub const SYS_setfsuid32: ::c_long = 215; pub const SYS_setfsgid32: ::c_long = 216; pub const SYS_getdents64: ::c_long = 217; pub const SYS_pivot_root: ::c_long = 218; pub const SYS_mincore: ::c_long = 219; pub const SYS_madvise: ::c_long = 220; pub const SYS_fcntl64: ::c_long = 221; pub const SYS_gettid: ::c_long = 224; pub const SYS_readahead: ::c_long = 225; pub const SYS_setxattr: ::c_long = 226; pub const SYS_lsetxattr: ::c_long = 227; pub const SYS_fsetxattr: ::c_long = 228; pub const SYS_getxattr: ::c_long = 229; pub const SYS_lgetxattr: ::c_long = 230; pub const SYS_fgetxattr: ::c_long = 231; pub const SYS_listxattr: ::c_long = 232; pub const SYS_llistxattr: ::c_long = 233; pub const SYS_flistxattr: ::c_long = 234; pub const SYS_removexattr: ::c_long = 235; pub const SYS_lremovexattr: ::c_long = 236; pub const SYS_fremovexattr: ::c_long = 237; pub const SYS_tkill: ::c_long = 238; pub const SYS_sendfile64: ::c_long = 239; pub const SYS_futex: ::c_long = 240; pub const SYS_sched_setaffinity: ::c_long = 241; pub const SYS_sched_getaffinity: ::c_long = 242; pub const SYS_io_setup: ::c_long = 243; pub const SYS_io_destroy: ::c_long = 244; pub const SYS_io_getevents: ::c_long = 245; pub const SYS_io_submit: ::c_long = 246; pub const SYS_io_cancel: ::c_long = 247; pub const SYS_exit_group: ::c_long = 248; pub const SYS_lookup_dcookie: ::c_long = 249; pub const SYS_epoll_create: ::c_long = 250; pub const SYS_epoll_ctl: ::c_long = 251; pub const SYS_epoll_wait: ::c_long = 252; pub const SYS_remap_file_pages: ::c_long = 253; pub const SYS_set_tid_address: ::c_long = 256; pub const SYS_timer_create: ::c_long = 257; pub const SYS_timer_settime: ::c_long = 258; pub const SYS_timer_gettime: ::c_long = 259; pub const SYS_timer_getoverrun: ::c_long = 260; pub const SYS_timer_delete: ::c_long = 261; pub const SYS_clock_settime: ::c_long = 262; pub const SYS_clock_gettime: ::c_long = 263; pub const SYS_clock_getres: ::c_long = 264; pub const SYS_clock_nanosleep: ::c_long = 265; pub const SYS_statfs64: ::c_long = 266; pub const SYS_fstatfs64: ::c_long = 267; pub const SYS_tgkill: ::c_long = 268; pub const SYS_utimes: ::c_long = 269; pub const SYS_arm_fadvise64_64: ::c_long = 270; pub const SYS_pciconfig_iobase: ::c_long = 271; pub const SYS_pciconfig_read: ::c_long = 272; pub const SYS_pciconfig_write: ::c_long = 273; pub const SYS_mq_open: ::c_long = 274; pub const SYS_mq_unlink: ::c_long = 275; pub const SYS_mq_timedsend: ::c_long = 276; pub const SYS_mq_timedreceive: ::c_long = 277; pub const SYS_mq_notify: ::c_long = 278; pub const SYS_mq_getsetattr: ::c_long = 279; pub const SYS_waitid: ::c_long = 280; pub const SYS_socket: ::c_long = 281; pub const SYS_bind: ::c_long = 282; pub const SYS_connect: ::c_long = 283; pub const SYS_listen: ::c_long = 284; pub const SYS_accept: ::c_long = 285; pub const SYS_getsockname: ::c_long = 286; pub const SYS_getpeername: ::c_long = 287; pub const SYS_socketpair: ::c_long = 288; pub const SYS_send: ::c_long = 289; pub const SYS_sendto: ::c_long = 290; pub const SYS_recv: ::c_long = 291; pub const SYS_recvfrom: ::c_long = 292; pub const SYS_shutdown: ::c_long = 293; pub const SYS_setsockopt: ::c_long = 294; pub const SYS_getsockopt: ::c_long = 295; pub const SYS_sendmsg: ::c_long = 296; pub const SYS_recvmsg: ::c_long = 297; pub const SYS_semop: ::c_long = 298; pub const SYS_semget: ::c_long = 299; pub const SYS_semctl: ::c_long = 300; pub const SYS_msgsnd: ::c_long = 301; pub const SYS_msgrcv: ::c_long = 302; pub const SYS_msgget: ::c_long = 303; pub const SYS_msgctl: ::c_long = 304; pub const SYS_shmat: ::c_long = 305; pub const SYS_shmdt: ::c_long = 306; pub const SYS_shmget: ::c_long = 307; pub const SYS_shmctl: ::c_long = 308; pub const SYS_add_key: ::c_long = 309; pub const SYS_request_key: ::c_long = 310; pub const SYS_keyctl: ::c_long = 311; pub const SYS_semtimedop: ::c_long = 312; pub const SYS_vserver: ::c_long = 313; pub const SYS_ioprio_set: ::c_long = 314; pub const SYS_ioprio_get: ::c_long = 315; pub const SYS_inotify_init: ::c_long = 316; pub const SYS_inotify_add_watch: ::c_long = 317; pub const SYS_inotify_rm_watch: ::c_long = 318; pub const SYS_mbind: ::c_long = 319; pub const SYS_get_mempolicy: ::c_long = 320; pub const SYS_set_mempolicy: ::c_long = 321; pub const SYS_openat: ::c_long = 322; pub const SYS_mkdirat: ::c_long = 323; pub const SYS_mknodat: ::c_long = 324; pub const SYS_fchownat: ::c_long = 325; pub const SYS_futimesat: ::c_long = 326; pub const SYS_fstatat64: ::c_long = 327; pub const SYS_unlinkat: ::c_long = 328; pub const SYS_renameat: ::c_long = 329; pub const SYS_linkat: ::c_long = 330; pub const SYS_symlinkat: ::c_long = 331; pub const SYS_readlinkat: ::c_long = 332; pub const SYS_fchmodat: ::c_long = 333; pub const SYS_faccessat: ::c_long = 334; pub const SYS_pselect6: ::c_long = 335; pub const SYS_ppoll: ::c_long = 336; pub const SYS_unshare: ::c_long = 337; pub const SYS_set_robust_list: ::c_long = 338; pub const SYS_get_robust_list: ::c_long = 339; pub const SYS_splice: ::c_long = 340; pub const SYS_arm_sync_file_range: ::c_long = 341; pub const SYS_tee: ::c_long = 342; pub const SYS_vmsplice: ::c_long = 343; pub const SYS_move_pages: ::c_long = 344; pub const SYS_getcpu: ::c_long = 345; pub const SYS_epoll_pwait: ::c_long = 346; pub const SYS_kexec_load: ::c_long = 347; pub const SYS_utimensat: ::c_long = 348; pub const SYS_signalfd: ::c_long = 349; pub const SYS_timerfd_create: ::c_long = 350; pub const SYS_eventfd: ::c_long = 351; pub const SYS_fallocate: ::c_long = 352; pub const SYS_timerfd_settime: ::c_long = 353; pub const SYS_timerfd_gettime: ::c_long = 354; pub const SYS_signalfd4: ::c_long = 355; pub const SYS_eventfd2: ::c_long = 356; pub const SYS_epoll_create1: ::c_long = 357; pub const SYS_dup3: ::c_long = 358; pub const SYS_pipe2: ::c_long = 359; pub const SYS_inotify_init1: ::c_long = 360; pub const SYS_preadv: ::c_long = 361; pub const SYS_pwritev: ::c_long = 362; pub const SYS_rt_tgsigqueueinfo: ::c_long = 363; pub const SYS_perf_event_open: ::c_long = 364; pub const SYS_recvmmsg: ::c_long = 365; pub const SYS_accept4: ::c_long = 366; pub const SYS_fanotify_init: ::c_long = 367; pub const SYS_fanotify_mark: ::c_long = 368; pub const SYS_prlimit64: ::c_long = 369; pub const SYS_name_to_handle_at: ::c_long = 370; pub const SYS_open_by_handle_at: ::c_long = 371; pub const SYS_clock_adjtime: ::c_long = 372; pub const SYS_syncfs: ::c_long = 373; pub const SYS_sendmmsg: ::c_long = 374; pub const SYS_setns: ::c_long = 375; pub const SYS_process_vm_readv: ::c_long = 376; pub const SYS_process_vm_writev: ::c_long = 377; pub const SYS_kcmp: ::c_long = 378; pub const SYS_finit_module: ::c_long = 379; pub const SYS_sched_setattr: ::c_long = 380; pub const SYS_sched_getattr: ::c_long = 381; pub const SYS_renameat2: ::c_long = 382; pub const SYS_seccomp: ::c_long = 383; pub const SYS_getrandom: ::c_long = 384; pub const SYS_memfd_create: ::c_long = 385; pub const SYS_bpf: ::c_long = 386; pub const SYS_execveat: ::c_long = 387; pub const SYS_userfaultfd: ::c_long = 388; pub const SYS_membarrier: ::c_long = 389; pub const SYS_mlock2: ::c_long = 390; pub const SYS_copy_file_range: ::c_long = 391; pub const SYS_preadv2: ::c_long = 392; pub const SYS_pwritev2: ::c_long = 393; pub const SYS_pkey_mprotect: ::c_long = 394; pub const SYS_pkey_alloc: ::c_long = 395; pub const SYS_pkey_free: ::c_long = 396; // offsets in mcontext_t.gregs from sys/ucontext.h pub const REG_R0: ::c_int = 0; pub const REG_R1: ::c_int = 1; pub const REG_R2: ::c_int = 2; pub const REG_R3: ::c_int = 3; pub const REG_R4: ::c_int = 4; pub const REG_R5: ::c_int = 5; pub const REG_R6: ::c_int = 6; pub const REG_R7: ::c_int = 7; pub const REG_R8: ::c_int = 8; pub const REG_R9: ::c_int = 9; pub const REG_R10: ::c_int = 10; pub const REG_R11: ::c_int = 11; pub const REG_R12: ::c_int = 12; pub const REG_R13: ::c_int = 13; pub const REG_R14: ::c_int = 14; pub const REG_R15: ::c_int = 15; pub const NGREG: ::c_int = 18; f! { // Sadly, Android before 5.0 (API level 21), the accept4 syscall is not // exposed by the libc. As work-around, we implement it through `syscall` // directly. This workaround can be removed if the minimum version of // Android is bumped. When the workaround is removed, `accept4` can be // moved back to `linux_like/mod.rs` pub fn accept4( fd: ::c_int, addr: *mut ::sockaddr, len: *mut ::socklen_t, flg: ::c_int ) -> ::c_int { ::syscall(SYS_accept4, fd, addr, len, flg) as ::c_int } }
39.916667
79
0.644073
76c1558e0213c9d0f86a68fd6586b51b0deb7bc2
2,558
use crate::{auth::Jwt, db::Db, error::Error, models}; use api::{ auth::AuthWrapper, routes::friends::{Friend, FriendRequestsResponse, FriendsResponse}, }; use warp::{Rejection, Reply}; /// POST /api/friends/{username} [+Auth] pub async fn add(username: String, db: Db, jwt: Jwt) -> Result<impl Reply, Rejection> { models::FriendRequest::insert(&db, jwt.id_user(), &username).await?; Ok(warp::reply::json(&AuthWrapper { token: Some(jwt.token()?), response: (), })) } /// DELETE /api/friends/{username} [+Auth] pub async fn remove(username: String, db: Db, jwt: Jwt) -> Result<impl Reply, Rejection> { models::FriendRequest::delete(&db, jwt.id_user(), &username).await?; Ok(warp::reply::json(&AuthWrapper { token: Some(jwt.token()?), response: (), })) } /// GET /api/friends [+Auth] pub async fn list(db: Db, jwt: Jwt) -> Result<impl Reply, Rejection> { let friends = sqlx::query_file!("sql/friends/list_friends.sql", jwt.id_user()) .fetch_all(&db) .await .map_err(Error::Sqlx)? .into_iter() .filter_map(|row| { row.since.map(|since| Friend { username: row.username, since, }) }) .collect(); Ok(warp::reply::json(&AuthWrapper { token: Some(jwt.token()?), response: FriendsResponse { friends }, })) } /// GET /api/friends/requests/incoming [+Auth] pub async fn list_incoming(db: Db, jwt: Jwt) -> Result<impl Reply, Rejection> { let requests = sqlx::query_file!("sql/friends/list_incoming.sql", jwt.id_user()) .fetch_all(&db) .await .map_err(Error::Sqlx)? .into_iter() .map(|row| Friend { username: row.username, since: row.date_sent, }) .collect(); Ok(warp::reply::json(&AuthWrapper { token: Some(jwt.token()?), response: FriendRequestsResponse { requests }, })) } /// GET /api/friends/requests/outgoing [+Auth] pub async fn list_outgoing(db: Db, jwt: Jwt) -> Result<impl Reply, Rejection> { let requests = sqlx::query_file!("sql/friends/list_outgoing.sql", jwt.id_user()) .fetch_all(&db) .await .map_err(Error::Sqlx)? .into_iter() .map(|row| Friend { username: row.username, since: row.date_sent, }) .collect(); Ok(warp::reply::json(&AuthWrapper { token: (Some(jwt.token()?)), response: FriendRequestsResponse { requests }, })) }
29.744186
90
0.576231
ac4997371308eb8ef62fcb21f27088290950b4f8
1,756
use crate::{alloc::Allocator, collections::TryReserveError}; /// Extend a collection "fallibly" with the contents of an iterator. pub trait TryExtend<A> { type Err; /// Extends a collection "fallibly" with the contents of an iterator. /// /// /// # Examples /// /// Basic usage: /// /// ``` /// use alloc_wg::{iter::TryExtend, vec}; /// /// // You can extend a Vec<char> with some chars: /// let mut message = vec!['a', 'b', 'c']; /// /// message.try_extend(['d', 'e', 'f'].iter())?; /// /// assert_eq!(vec!['a', 'b', 'c', 'd', 'e', 'f'], message); /// # Ok::<(), alloc_wg::collections::TryReserveError>(()) /// ``` fn try_extend<T: IntoIterator<Item = A>>(&mut self, iter: T) -> Result<(), Self::Err>; } pub trait FromIteratorIn<T, A: Allocator> { fn from_iter_in<I: IntoIterator<Item = T>>(iter: I, allocator: A) -> Self; fn try_from_iter_in<I: IntoIterator<Item = T>>( iter: I, allocator: A, ) -> Result<Self, TryReserveError> where Self: Sized; } pub trait IteratorExt: Iterator + Sized { #[inline] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] fn collect_in<T: FromIteratorIn<Self::Item, A>, A: Allocator>(self, allocator: A) -> T { FromIteratorIn::from_iter_in(self, allocator) } #[inline] #[must_use = "if you really need to exhaust the iterator, consider `.for_each(drop)` instead"] fn try_collect_in<T: FromIteratorIn<Self::Item, A>, A: Allocator>( self, allocator: A, ) -> Result<T, TryReserveError> { FromIteratorIn::try_from_iter_in(self, allocator) } } impl<T> IteratorExt for T where T: Iterator {}
31.357143
98
0.589977
917373a83077651198ad15d761b551401bee6168
11,752
//! Defines the versioning and logging functions. use crate::{ error::{ CoreCommonError, CoreError, CoreExecutionError, CoreInstantiationError, CoreLoadError, CoreValidationError, WasmEdgeError, }, ffi::{self, WasmEdge_Result, WasmEdge_ResultGetCode, WasmEdge_ResultOK}, WasmEdgeResult, }; use std::{ ffi::{CStr, CString}, path::Path, }; #[cfg(unix)] pub(crate) fn path_to_cstring(path: &Path) -> WasmEdgeResult<CString> { use std::os::unix::ffi::OsStrExt; Ok(CString::new(path.as_os_str().as_bytes())?) } #[cfg(windows)] pub(crate) fn path_to_cstring(path: &Path) -> WasmEdgeResult<CString> { match path.to_str() { Some(s) => Ok(CString::new(s)?), None => { let message = format!("Couldn't convert path '{}' to UTF-8", path.display()); Err(message.into()) } } } pub(crate) fn string_to_c_char(arg: impl AsRef<str>) -> *const std::os::raw::c_char { let s = CString::new(arg.as_ref()).unwrap(); s.as_ptr() } /// Full version. /// /// ## Example /// /// ```rust /// // in `wasmedge.rs` /// pub const WASMEDGE_VERSION: &'static [u8; 22usize] = b"0.8.2-rc.5-1-g809c746\0"; /// pub const WASMEDGE_VERSION_MAJOR: u32 = 0; /// pub const WASMEDGE_VERSION_MINOR: u32 = 8; /// pub const WASMEDGE_VERSION_PATCH: u32 = 2; /// ``` pub fn full_version() -> WasmEdgeResult<&'static str> { Ok(CStr::from_bytes_with_nul(ffi::WASMEDGE_VERSION)?.to_str()?) } /// Semantic Version. pub fn semv_version() -> String { format!( "{}.{}.{}", ffi::WASMEDGE_VERSION_MAJOR, ffi::WASMEDGE_VERSION_MINOR, ffi::WASMEDGE_VERSION_PATCH ) } /// Logs the debug information. pub fn log_debug_info() { unsafe { ffi::WasmEdge_LogSetDebugLevel() } } /// Logs the error information. pub fn log_error_info() { unsafe { ffi::WasmEdge_LogSetErrorLevel() } } // Checks the result of a `FFI` function. pub(crate) fn check(result: WasmEdge_Result) -> WasmEdgeResult<()> { let code = unsafe { if !WasmEdge_ResultOK(result) { WasmEdge_ResultGetCode(result) } else { 0u32 } }; match code { // Success or terminated (exit and return success) 0x00 | 0x01 => Ok(()), // Common errors 0x02 => Err(WasmEdgeError::Core(CoreError::Common( CoreCommonError::RuntimeError, ))), 0x03 => Err(WasmEdgeError::Core(CoreError::Common( CoreCommonError::CostLimitExceeded, ))), 0x04 => Err(WasmEdgeError::Core(CoreError::Common( CoreCommonError::WrongVMWorkflow, ))), 0x05 => Err(WasmEdgeError::Core(CoreError::Common( CoreCommonError::FuncNotFound, ))), 0x06 => Err(WasmEdgeError::Core(CoreError::Common( CoreCommonError::AOTDisabled, ))), 0x07 => Err(WasmEdgeError::Core(CoreError::Common( CoreCommonError::Interrupted, ))), // Load phase 0x20 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IllegalPath, ))), 0x21 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::ReadError, ))), 0x22 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::UnexpectedEnd, ))), 0x23 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedMagic, ))), 0x24 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedVersion, ))), 0x25 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedSection, ))), 0x26 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::SectionSizeMismatch, ))), 0x27 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::NameSizeOutOfBounds, ))), 0x28 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::JunkSection, ))), 0x29 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IncompatibleFuncCode, ))), 0x2A => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IncompatibleDataCount, ))), 0x2B => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::DataCountRequired, ))), 0x2C => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedImportKind, ))), 0x2D => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedExportKind, ))), 0x2E => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::ExpectedZeroByte, ))), 0x2F => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::InvalidMut, ))), 0x30 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::TooManyLocals, ))), 0x31 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedValType, ))), 0x32 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedElemType, ))), 0x33 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedRefType, ))), 0x34 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::MalformedUTF8, ))), 0x35 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IntegerTooLarge, ))), 0x36 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IntegerTooLong, ))), 0x37 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IllegalOpCode, ))), 0x38 => Err(WasmEdgeError::Core(CoreError::Load( CoreLoadError::IllegalGrammar, ))), // Validation phase 0x40 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidAlignment, ))), 0x41 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::TypeCheckFailed, ))), 0x42 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidLabelIdx, ))), 0x43 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidLocalIdx, ))), 0x44 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidFuncTypeIdx, ))), 0x45 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidFuncIdx, ))), 0x46 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidTableIdx, ))), 0x47 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidMemoryIdx, ))), 0x48 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidGlobalIdx, ))), 0x49 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidElemIdx, ))), 0x4A => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidDataIdx, ))), 0x4B => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidRefIdx, ))), 0x4C => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::ConstExprRequired, ))), 0x4D => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::DupExportName, ))), 0x4E => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::ImmutableGlobal, ))), 0x4F => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidResultArity, ))), 0x50 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::MultiTables, ))), 0x51 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::MultiMemories, ))), 0x52 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidLimit, ))), 0x53 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidMemPages, ))), 0x54 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidStartFunc, ))), 0x55 => Err(WasmEdgeError::Core(CoreError::Validation( CoreValidationError::InvalidLaneIdx, ))), // Instantiation phase 0x60 => Err(WasmEdgeError::Core(CoreError::Instantiation( CoreInstantiationError::ModuleNameConflict, ))), 0x61 => Err(WasmEdgeError::Core(CoreError::Instantiation( CoreInstantiationError::IncompatibleImportType, ))), 0x62 => Err(WasmEdgeError::Core(CoreError::Instantiation( CoreInstantiationError::UnknownImport, ))), 0x63 => Err(WasmEdgeError::Core(CoreError::Instantiation( CoreInstantiationError::DataSegDoesNotFit, ))), 0x64 => Err(WasmEdgeError::Core(CoreError::Instantiation( CoreInstantiationError::ElemSegDoesNotFit, ))), // Execution phase 0x80 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::WrongInstanceAddress, ))), 0x81 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::WrongInstanceIndex, ))), 0x82 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::InstrTypeMismatch, ))), 0x83 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::FuncTypeMismatch, ))), 0x84 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::DivideByZero, ))), 0x85 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::IntegerOverflow, ))), 0x86 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::InvalidConvToInt, ))), 0x87 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::TableOutOfBounds, ))), 0x88 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::MemoryOutOfBounds, ))), 0x89 => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::Unreachable, ))), 0x8A => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::UninitializedElement, ))), 0x8B => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::UndefinedElement, ))), 0x8C => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::IndirectCallTypeMismatch, ))), 0x8D => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::ExecutionFailed, ))), 0x8E => Err(WasmEdgeError::Core(CoreError::Execution( CoreExecutionError::RefTypeMismatch, ))), _ => panic!("unknown error code: {}", code), } } /// Loads plugins from default paths. /// /// The default paths include: /// /// * The path specified by the `WASMEDGE_PLUGIN_PATH` environment variable. /// /// * For Linux and MacOS, `$HOME/.wasmedge/plugins/wasmedge_process`. /// /// * For Windows, `%USERPROFILE%\.wasmedge\plugins\wasmedge_process`. pub fn load_plugin_from_default_paths() { unsafe { ffi::WasmEdge_Plugin_loadWithDefaultPluginPaths() } }
35.504532
94
0.603897
01173e6596c6fce75ad11978699eafc46807e4b3
12,454
use crate::tree_hash::vec_tree_hash_root; use crate::Error; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; use std::ops::{Deref, Index, IndexMut}; use std::slice::SliceIndex; use tree_hash::Hash256; use typenum::Unsigned; pub use typenum; /// Emulates a SSZ `Vector` (distinct from a Rust `Vec`). /// /// An ordered, heap-allocated, fixed-length, homogeneous collection of `T`, with `N` values. /// /// This struct is backed by a Rust `Vec` but constrained such that it must be instantiated with a /// fixed number of elements and you may not add or remove elements, only modify. /// /// The length of this struct is fixed at the type-level using /// [typenum](https://crates.io/crates/typenum). /// /// ## Note /// /// Whilst it is possible with this library, SSZ declares that a `FixedVector` with a length of `0` /// is illegal. /// /// ## Example /// /// ``` /// use ssz_types::{FixedVector, typenum}; /// /// let base: Vec<u64> = vec![1, 2, 3, 4]; /// /// // Create a `FixedVector` from a `Vec` that has the expected length. /// let exact: FixedVector<_, typenum::U4> = FixedVector::from(base.clone()); /// assert_eq!(&exact[..], &[1, 2, 3, 4]); /// /// // Create a `FixedVector` from a `Vec` that is too long and the `Vec` is truncated. /// let short: FixedVector<_, typenum::U3> = FixedVector::from(base.clone()); /// assert_eq!(&short[..], &[1, 2, 3]); /// /// // Create a `FixedVector` from a `Vec` that is too short and the missing values are created /// // using `std::default::Default`. /// let long: FixedVector<_, typenum::U5> = FixedVector::from(base); /// assert_eq!(&long[..], &[1, 2, 3, 4, 0]); /// ``` #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(transparent)] pub struct FixedVector<T, N> { vec: Vec<T>, _phantom: PhantomData<N>, } impl<T, N: Unsigned> FixedVector<T, N> { /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns /// `Err`. pub fn new(vec: Vec<T>) -> Result<Self, Error> { if vec.len() == Self::capacity() { Ok(Self { vec, _phantom: PhantomData, }) } else { Err(Error::OutOfBounds { i: vec.len(), len: Self::capacity(), }) } } /// Create a new vector filled with clones of `elem`. pub fn from_elem(elem: T) -> Self where T: Clone, { Self { vec: vec![elem; N::to_usize()], _phantom: PhantomData, } } /// Identical to `self.capacity`, returns the type-level constant length. /// /// Exists for compatibility with `Vec`. pub fn len(&self) -> usize { self.vec.len() } /// True if the type-level constant length of `self` is zero. pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the type-level constant length. pub fn capacity() -> usize { N::to_usize() } } impl<T: Default, N: Unsigned> From<Vec<T>> for FixedVector<T, N> { fn from(mut vec: Vec<T>) -> Self { vec.resize_with(Self::capacity(), Default::default); Self { vec, _phantom: PhantomData, } } } impl<T, N: Unsigned> Into<Vec<T>> for FixedVector<T, N> { fn into(self) -> Vec<T> { self.vec } } impl<T, N: Unsigned> Default for FixedVector<T, N> { fn default() -> Self { Self { vec: Vec::default(), _phantom: PhantomData, } } } impl<T, N: Unsigned, I: SliceIndex<[T]>> Index<I> for FixedVector<T, N> { type Output = I::Output; #[inline] fn index(&self, index: I) -> &Self::Output { Index::index(&self.vec, index) } } impl<T, N: Unsigned, I: SliceIndex<[T]>> IndexMut<I> for FixedVector<T, N> { #[inline] fn index_mut(&mut self, index: I) -> &mut Self::Output { IndexMut::index_mut(&mut self.vec, index) } } impl<T, N: Unsigned> Deref for FixedVector<T, N> { type Target = [T]; fn deref(&self) -> &[T] { &self.vec[..] } } impl<T, N: Unsigned> tree_hash::TreeHash for FixedVector<T, N> where T: tree_hash::TreeHash, { fn tree_hash_type() -> tree_hash::TreeHashType { tree_hash::TreeHashType::Vector } fn tree_hash_packed_encoding(&self) -> Vec<u8> { unreachable!("Vector should never be packed.") } fn tree_hash_packing_factor() -> usize { unreachable!("Vector should never be packed.") } fn tree_hash_root(&self) -> Hash256 { vec_tree_hash_root::<T, N>(&self.vec) } } impl<T, N: Unsigned> ssz::Encode for FixedVector<T, N> where T: ssz::Encode, { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() } fn ssz_fixed_len() -> usize { if <Self as ssz::Encode>::is_ssz_fixed_len() { T::ssz_fixed_len() * N::to_usize() } else { ssz::BYTES_PER_LENGTH_OFFSET } } fn ssz_bytes_len(&self) -> usize { self.vec.ssz_bytes_len() } fn ssz_append(&self, buf: &mut Vec<u8>) { if T::is_ssz_fixed_len() { buf.reserve(T::ssz_fixed_len() * self.len()); for item in &self.vec { item.ssz_append(buf); } } else { let mut encoder = ssz::SszEncoder::container(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); for item in &self.vec { encoder.append(item); } encoder.finalize(); } } } impl<T, N: Unsigned> ssz::Decode for FixedVector<T, N> where T: ssz::Decode + Default, { fn is_ssz_fixed_len() -> bool { T::is_ssz_fixed_len() } fn ssz_fixed_len() -> usize { if <Self as ssz::Decode>::is_ssz_fixed_len() { T::ssz_fixed_len() * N::to_usize() } else { ssz::BYTES_PER_LENGTH_OFFSET } } fn from_ssz_bytes(bytes: &[u8]) -> Result<Self, ssz::DecodeError> { let fixed_len = N::to_usize(); if bytes.is_empty() { Err(ssz::DecodeError::InvalidByteLength { len: 0, expected: 1, }) } else if T::is_ssz_fixed_len() { let num_items = bytes .len() .checked_div(T::ssz_fixed_len()) .ok_or(ssz::DecodeError::ZeroLengthItem)?; if num_items != fixed_len { return Err(ssz::DecodeError::BytesInvalid(format!( "FixedVector of {} items has {} items", num_items, fixed_len ))); } bytes .chunks(T::ssz_fixed_len()) .map(|chunk| T::from_ssz_bytes(chunk)) .collect::<Result<Vec<T>, _>>() .and_then(|vec| { if vec.len() == fixed_len { Ok(vec.into()) } else { Err(ssz::DecodeError::BytesInvalid(format!( "Wrong number of FixedVector elements, got: {}, expected: {}", vec.len(), N::to_usize() ))) } }) } else { ssz::decode_list_of_variable_length_items(bytes, Some(fixed_len)).map(|vec| vec.into()) } } } #[cfg(feature = "arbitrary")] impl<T: arbitrary::Arbitrary, N: 'static + Unsigned> arbitrary::Arbitrary for FixedVector<T, N> { fn arbitrary(u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result<Self> { let size = N::to_usize(); let mut vec: Vec<T> = Vec::with_capacity(size); for _ in 0..size { vec.push(<T>::arbitrary(u)?); } Ok(Self::new(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?) } } #[cfg(test)] mod test { use super::*; use ssz::*; use tree_hash::{merkle_root, TreeHash}; use tree_hash_derive::TreeHash; use typenum::*; #[test] fn new() { let vec = vec![42; 5]; let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec); assert!(fixed.is_err()); let vec = vec![42; 3]; let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec); assert!(fixed.is_err()); let vec = vec![42; 4]; let fixed: Result<FixedVector<u64, U4>, _> = FixedVector::new(vec); assert!(fixed.is_ok()); } #[test] fn indexing() { let vec = vec![1, 2]; let mut fixed: FixedVector<u64, U8192> = vec.clone().into(); assert_eq!(fixed[0], 1); assert_eq!(&fixed[0..1], &vec[0..1]); assert_eq!((&fixed[..]).len(), 8192); fixed[1] = 3; assert_eq!(fixed[1], 3); } #[test] fn length() { let vec = vec![42; 5]; let fixed: FixedVector<u64, U4> = FixedVector::from(vec.clone()); assert_eq!(&fixed[..], &vec[0..4]); let vec = vec![42; 3]; let fixed: FixedVector<u64, U4> = FixedVector::from(vec.clone()); assert_eq!(&fixed[0..3], &vec[..]); assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); let vec = vec![]; let fixed: FixedVector<u64, U4> = FixedVector::from(vec); assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); } #[test] fn deref() { let vec = vec![0, 2, 4, 6]; let fixed: FixedVector<u64, U4> = FixedVector::from(vec); assert_eq!(fixed.get(0), Some(&0)); assert_eq!(fixed.get(3), Some(&6)); assert_eq!(fixed.get(4), None); } #[test] fn ssz_encode() { let vec: FixedVector<u16, U2> = vec![0; 2].into(); assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); assert_eq!(<FixedVector<u16, U2> as Encode>::ssz_fixed_len(), 4); } fn ssz_round_trip<T: Encode + Decode + std::fmt::Debug + PartialEq>(item: T) { let encoded = &item.as_ssz_bytes(); assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } #[test] fn ssz_round_trip_u16_len_8() { ssz_round_trip::<FixedVector<u16, U8>>(vec![42; 8].into()); ssz_round_trip::<FixedVector<u16, U8>>(vec![0; 8].into()); } #[test] fn tree_hash_u8() { let fixed: FixedVector<u8, U0> = FixedVector::from(vec![]); assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); let fixed: FixedVector<u8, U1> = FixedVector::from(vec![0; 1]); assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); let fixed: FixedVector<u8, U8> = FixedVector::from(vec![0; 8]); assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); let fixed: FixedVector<u8, U16> = FixedVector::from(vec![42; 16]); assert_eq!(fixed.tree_hash_root(), merkle_root(&[42; 16], 0)); let source: Vec<u8> = (0..16).collect(); let fixed: FixedVector<u8, U16> = FixedVector::from(source.clone()); assert_eq!(fixed.tree_hash_root(), merkle_root(&source, 0)); } #[derive(Clone, Copy, TreeHash, Default)] struct A { a: u32, b: u32, } fn repeat(input: &[u8], n: usize) -> Vec<u8> { let mut output = vec![]; for _ in 0..n { output.append(&mut input.to_vec()); } output } #[test] fn tree_hash_composite() { let a = A { a: 0, b: 1 }; let fixed: FixedVector<A, U0> = FixedVector::from(vec![]); assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 32], 0)); let fixed: FixedVector<A, U1> = FixedVector::from(vec![a]); assert_eq!( fixed.tree_hash_root(), merkle_root(a.tree_hash_root().as_bytes(), 0) ); let fixed: FixedVector<A, U8> = FixedVector::from(vec![a; 8]); assert_eq!( fixed.tree_hash_root(), merkle_root(&repeat(a.tree_hash_root().as_bytes(), 8), 0) ); let fixed: FixedVector<A, U13> = FixedVector::from(vec![a; 13]); assert_eq!( fixed.tree_hash_root(), merkle_root(&repeat(a.tree_hash_root().as_bytes(), 13), 0) ); let fixed: FixedVector<A, U16> = FixedVector::from(vec![a; 16]); assert_eq!( fixed.tree_hash_root(), merkle_root(&repeat(a.tree_hash_root().as_bytes(), 16), 0) ); } }
28.962791
99
0.536856
6781b44a402d4bba12aa34319260524709cab38a
1,186
use bevy::prelude::*; fn main() { App::build() .add_resource(Msaa { samples: 4 }) .add_default_plugins() .add_startup_system(setup.system()) .run(); } fn setup( mut commands: Commands, asset_server: Res<AssetServer>, mut materials: ResMut<Assets<StandardMaterial>>, ) { // add entities to the world commands // mesh .spawn(PbrComponents { // load the mesh mesh: asset_server .load("assets/models/monkey/Monkey.gltf") .unwrap(), // create a material for the mesh material: materials.add(Color::rgb(0.5, 0.4, 0.3).into()), ..Default::default() }) // light .spawn(LightComponents { translation: Translation::new(4.0, 5.0, 4.0), ..Default::default() }) // camera .spawn(Camera3dComponents { transform: Transform::new_sync_disabled(Mat4::face_toward( Vec3::new(-2.0, 2.0, 6.0), Vec3::new(0.0, 0.0, 0.0), Vec3::new(0.0, 1.0, 0.0), )), ..Default::default() }); }
27.581395
70
0.49747
188243fcd14036ab20012b5646f4412b6240d06e
6,527
use super::error::{Result, ResultExt}; use super::{Call, Fault, Response, Value}; use base64; use regex::Regex; use serde_xml_rs::de::from_str; use std; use std::collections::HashMap; fn wrap_in_string(content: String) -> String { lazy_static::lazy_static! { static ref RE1: Regex = Regex::new(r"<value\s*/>").unwrap(); static ref RE2: Regex = Regex::new(r"<value\s*>\s*<string\s*/>\s*</value\s*>").unwrap(); static ref RE3: Regex = Regex::new(r"<value\s*>(?P<rest>[^<>]*)</value\s*>").unwrap(); } RE3.replace_all( &RE2.replace_all( &RE1.replace_all(&content, "<value><string></string></value>"), "<value><string></string></value>", ), "<value><string>$rest</string></value>", ) .into() } #[allow(dead_code)] pub fn xml<T: std::io::Read>(mut r: T) -> Result<Value> { let mut content = String::new(); r.read_to_string(&mut content) .chain_err(|| "Failed to read data source.")?; let data: XmlValue = from_str(&wrap_in_string(content)).chain_err(|| "Failed to parse XML-RPC data.")?; data.into() } pub fn call<T: std::io::Read>(mut r: T) -> Result<Call> { let mut content = String::new(); r.read_to_string(&mut content) .chain_err(|| "Failed to read data source.")?; let data: XmlCall = from_str(&wrap_in_string(content)).chain_err(|| "Failed to parse XML-RPC call.")?; data.into() } pub fn response<T: std::io::Read>(mut r: T) -> Result<Response> { let mut content = String::new(); r.read_to_string(&mut content) .chain_err(|| "Failed to read data source.")?; let data: XmlResponse = from_str(&wrap_in_string(content)).chain_err(|| "Failed to parse XML-RPC response.")?; data.into() } #[derive(Debug, PartialEq, Deserialize)] enum XmlValue { #[serde(rename = "i4")] I4(i32), #[serde(rename = "int")] Int(i32), #[serde(rename = "boolean")] Bool(i32), #[serde(rename = "string")] Str(String), #[serde(rename = "double")] Double(String), #[serde(rename = "dateTime.iso8601")] DateTime(String), #[serde(rename = "base64")] Base64(String), #[serde(rename = "array")] Array(XmlArray), #[serde(rename = "struct")] Struct(XmlStruct), } impl Into<Result<Value>> for XmlValue { fn into(self) -> Result<Value> { Ok(match self { XmlValue::I4(v) | XmlValue::Int(v) => Value::Int(v), XmlValue::Bool(v) => Value::Bool(v != 0), XmlValue::Str(v) => Value::String(v), XmlValue::Double(v) => Value::Double(v.parse().chain_err(|| "Failed to parse double")?), XmlValue::DateTime(v) => Value::DateTime(v), XmlValue::Base64(v) => { Value::Base64(base64::decode(v.as_bytes()).chain_err(|| "Failed to parse base64")?) } XmlValue::Array(v) => { let items: Result<Vec<Value>> = v.into(); Value::Array(items?) } XmlValue::Struct(v) => { let items: Result<HashMap<String, Value>> = v.into(); Value::Struct(items?) } }) } } #[derive(Debug, PartialEq, Deserialize)] #[serde(rename = "methodCall")] struct XmlCall { #[serde(rename = "methodName")] pub name: String, pub params: XmlParams, } impl Into<Result<Call>> for XmlCall { fn into(self) -> Result<Call> { let params: Result<Vec<Value>> = self.params.into(); Ok(Call { name: self.name, params: params?, }) } } #[derive(Debug, PartialEq, Deserialize)] enum XmlResponseResult { #[serde(rename = "params")] Success(XmlParams), #[serde(rename = "fault")] Failure { value: XmlValue }, } impl Into<Result<Response>> for XmlResponseResult { fn into(self) -> Result<Response> { match self { XmlResponseResult::Success(params) => { let params: Result<Vec<Value>> = params.into(); Ok(Ok(params?)) } XmlResponseResult::Failure { value: v } => { use serde::Deserialize; let val: Result<Value> = v.into(); Ok(Err( Fault::deserialize(val?).chain_err(|| "Failed to decode fault structure")? )) } } } } #[derive(Debug, PartialEq, Deserialize)] enum XmlResponse { #[serde(rename = "methodResponse")] Response(XmlResponseResult), } impl Into<Result<Response>> for XmlResponse { fn into(self) -> Result<Response> { match self { XmlResponse::Response(v) => v.into(), } } } #[derive(Debug, PartialEq, Deserialize)] struct XmlParams { #[serde(rename = "param", default)] pub params: Vec<XmlParamData>, } impl Into<Result<Vec<Value>>> for XmlParams { fn into(self) -> Result<Vec<Value>> { self.params .into_iter() .map(Into::<Result<Value>>::into) .collect() } } #[derive(Debug, PartialEq, Deserialize)] struct XmlParamData { pub value: XmlValue, } impl Into<Result<Value>> for XmlParamData { fn into(self) -> Result<Value> { self.value.into() } } #[derive(Debug, PartialEq, Deserialize)] struct XmlArray { #[serde(rename = "data")] pub data: XmlArrayData, } impl Into<Result<Vec<Value>>> for XmlArray { fn into(self) -> Result<Vec<Value>> { self.data.into() } } #[derive(Debug, PartialEq, Deserialize)] struct XmlArrayData { #[serde(default)] pub value: Vec<XmlValue>, } impl Into<Result<Vec<Value>>> for XmlArrayData { fn into(self) -> Result<Vec<Value>> { self.value .into_iter() .map(Into::<Result<Value>>::into) .collect() } } #[derive(Debug, PartialEq, Deserialize)] struct XmlStruct { #[serde(rename = "member", default)] pub members: Vec<XmlStructItem>, } impl Into<Result<HashMap<String, Value>>> for XmlStruct { fn into(self) -> Result<HashMap<String, Value>> { self.members .into_iter() .map(Into::<Result<(String, Value)>>::into) .collect() } } #[derive(Debug, PartialEq, Deserialize)] struct XmlStructItem { pub name: String, pub value: XmlValue, } impl Into<Result<(String, Value)>> for XmlStructItem { fn into(self) -> Result<(String, Value)> { let value: Result<Value> = self.value.into(); Ok((self.name, value?)) } }
27.42437
100
0.568102
877b0aba473920fe697e9d6651d13222fe26056f
149
fn test<'x>(x: &'x isize) { drop::<Box<for<'z> FnMut(&'z isize) -> &'z isize>>(Box::new(|z| { x //~ ERROR E0312 })); } fn main() {}
18.625
69
0.442953
181872f42ca3e014351aaaff1208ea411858baf1
6,517
//! This contains the [`Value`](Value) struct. use std::fmt; /// The value of an [`Instruction`](crate::Instruction). #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] pub enum Value { /// A singular value. Singular(String), /// A mapping (2 values). Mapping(String, String), /// A mapping with an optional key. OptionalKey(Option<String>, String), /// A mapping with an optional key. Once printed, the value won't have quotes. UnquotedOptionalKey(Option<String>, String), /// A mapping with an optional value. OptionalValue(String, Option<String>), /// A mapping with an optional value. Once printed, if the value exists, the value won't have /// quotes. UnquotedOptionalValue(String, Option<String>), /// A mapping with the value not having quotes. UnquotedMapping(String, String), } impl Value { /// Returns `true` if the value is a [`Singular`](Self::Singular) value. pub fn is_singular(&self) -> bool { matches!(*self, Self::Singular(_)) } /// Returns `true` if the value is a [`Mapping`](Self::Mapping) value. pub fn is_mapping(&self) -> bool { matches!(*self, Self::Mapping(_, _)) } /// Returns `true` if the value is an [`OptionalKey`](Self::OptionalKey) value. pub fn is_optional_key(&self) -> bool { matches!(*self, Self::OptionalKey(_, _)) } /// Returns `true` if the value is an [`UnquotedOptionalKey`](Self::UnquotedOptionalKey) value. pub fn is_unquoted_optional_key(&self) -> bool { matches!(*self, Self::UnquotedOptionalKey(_, _)) } /// Returns `true` if the value is an [`OptionalValue`](Self::OptionalValue) value. pub fn is_optional_value(&self) -> bool { matches!(*self, Self::OptionalValue(_, _)) } /// Returns `true` if the value is an [`UnquotedOptionalValue`](Self::UnquotedOptionalValue) value. pub fn is_unquoted_optional_value(&self) -> bool { matches!(*self, Self::UnquotedOptionalValue(_, _)) } /// Returns `true` if the value is an [`UnquotedMapping`](Self::UnquotedMapping) value. pub fn is_unquoted_mapping(&self) -> bool { matches!(*self, Self::UnquotedMapping(_, _)) } } impl fmt::Display for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::Singular(value) => write!(f, "{}", value), Self::Mapping(key, value) => write!(f, "{}=\"{}\"", key, value), Self::OptionalKey(key, value) => { if let Some(key) = key { write!(f, "{}=\"{}\"", key, value) } else { write!(f, "{}", value) } } Self::UnquotedOptionalKey(key, value) => { if let Some(key) = key { write!(f, "{}={}", key, value) } else { write!(f, "{}", value) } } Self::OptionalValue(key, value) => { if let Some(value) = value { write!(f, "{}=\"{}\"", key, value) } else { write!(f, "{}", key) } } Self::UnquotedOptionalValue(key, value) => { if let Some(value) = value { write!(f, "{}={}", key, value) } else { write!(f, "{}", key) } } Self::UnquotedMapping(key, value) => write!(f, "{}={}", key, value), } } } #[cfg(test)] mod tests { use crate::Value; macro_rules! new_display_test { ($name:ident, $value:expr, $expected:literal) => { #[test] fn $name() { let value = $value; let output = format!("{}", value); let expected = $expected; assert_eq!(output, expected) } }; } #[test] fn test_is_singular() { assert!(Value::Singular("".into()).is_singular()) } new_display_test!( test_singular_display, Value::Singular("singular".into()), "singular" ); #[test] fn test_is_mapping() { assert!(Value::Mapping("".into(), "".into()).is_mapping()) } new_display_test!( test_mapping_display, Value::Mapping("key".into(), "value".into()), "key=\"value\"" ); #[test] fn test_is_optional_key() { assert!(Value::OptionalKey(None, "".into()).is_optional_key()) } new_display_test!( test_optional_key_display, Value::OptionalKey(Some("key".into()), "value".into()), "key=\"value\"" ); new_display_test!( test_optional_key_display_none, Value::OptionalKey(None, "value".into()), "value" ); #[test] fn test_is_unquoted_optional_key() { assert!(Value::UnquotedOptionalKey(None, "".into()).is_unquoted_optional_key()) } new_display_test!( test_unquoted_optional_key_display, Value::UnquotedOptionalKey(Some("key".into()), "value".into()), "key=value" ); new_display_test!( test_unquoted_optional_key_display_none, Value::UnquotedOptionalKey(None, "value".into()), "value" ); #[test] fn test_is_optional_value() { assert!(Value::OptionalValue("".into(), None).is_optional_value()) } new_display_test!( test_optional_value_display, Value::OptionalValue("key".into(), Some("value".into())), "key=\"value\"" ); new_display_test!( test_optional_value_display_none, Value::OptionalValue("key".into(), None), "key" ); #[test] fn test_is_unquoted_optional_value() { assert!(Value::UnquotedOptionalValue("".into(), None).is_unquoted_optional_value()) } new_display_test!( test_unquoted_optional_value_display, Value::UnquotedOptionalValue("key".into(), Some("value".into())), "key=value" ); new_display_test!( test_unquoted_optional_value_display_none, Value::UnquotedOptionalValue("key".into(), None), "key" ); #[test] fn test_is_unquoted_mapping() { assert!(Value::UnquotedMapping("".into(), "".into()).is_unquoted_mapping()) } new_display_test!( test_unquoted_mapping_display, Value::UnquotedMapping("key".into(), "value".into()), "key=value" ); }
29.757991
103
0.549639
91d06e307f482f71e470dc2e3042ea89d09a5fbe
8,575
use crate::message_prelude::*; use crate::tool::ToolType; use super::{ keyboard::{Key, KeyStates, NUMBER_OF_KEYS}, InputPreprocessor, }; #[impl_message(Message, InputMapper)] #[derive(PartialEq, Clone, Debug)] pub enum InputMapperMessage { PointerMove, KeyUp(Key), KeyDown(Key), } #[derive(PartialEq, Clone, Debug)] struct MappingEntry { trigger: InputMapperMessage, modifiers: KeyStates, action: Message, } #[derive(Debug, Clone)] struct KeyMappingEntries(Vec<MappingEntry>); impl KeyMappingEntries { fn match_mapping(&self, keys: &KeyStates, actions: ActionList) -> Option<Message> { for entry in self.0.iter() { let all_required_modifiers_pressed = ((*keys & entry.modifiers) ^ entry.modifiers).is_empty(); if all_required_modifiers_pressed && actions.iter().flatten().any(|action| entry.action.to_discriminant() == *action) { return Some(entry.action.clone()); } } None } fn push(&mut self, entry: MappingEntry) { self.0.push(entry) } const fn new() -> Self { Self(Vec::new()) } fn key_array() -> [Self; NUMBER_OF_KEYS] { const DEFAULT: KeyMappingEntries = KeyMappingEntries::new(); [DEFAULT; NUMBER_OF_KEYS] } } impl Default for KeyMappingEntries { fn default() -> Self { Self::new() } } #[derive(Debug, Clone)] struct Mapping { up: [KeyMappingEntries; NUMBER_OF_KEYS], down: [KeyMappingEntries; NUMBER_OF_KEYS], pointer_move: KeyMappingEntries, } macro_rules! modifiers { ($($m:ident),*) => {{ #[allow(unused_mut)] let mut state = KeyStates::new(); $( state.set(Key::$m as usize); )* state }}; } macro_rules! entry { {action=$action:expr, key_down=$key:ident $(, modifiers=[$($m:ident),* $(,)?])?} => {{ entry!{action=$action, message=InputMapperMessage::KeyDown(Key::$key) $(, modifiers=[$($m),*])?} }}; {action=$action:expr, key_up=$key:ident $(, modifiers=[$($m:ident),* $(,)?])?} => {{ entry!{action=$action, message=InputMapperMessage::KeyUp(Key::$key) $(, modifiers=[$($m),* ])?} }}; {action=$action:expr, message=$message:expr $(, modifiers=[$($m:ident),* $(,)?])?} => {{ MappingEntry {trigger: $message, modifiers: modifiers!($($($m),*)?), action: $action.into()} }}; } macro_rules! mapping { //[$(<action=$action:expr; message=$key:expr; $(modifiers=[$($m:ident),* $(,)?];)?>)*] => {{ [$($entry:expr),* $(,)?] => {{ let mut up = KeyMappingEntries::key_array(); let mut down = KeyMappingEntries::key_array(); let mut pointer_move: KeyMappingEntries = Default::default(); $( let arr = match $entry.trigger { InputMapperMessage::KeyDown(key) => &mut down[key as usize], InputMapperMessage::KeyUp(key) => &mut up[key as usize], InputMapperMessage::PointerMove => &mut pointer_move, }; arr.push($entry); )* (up, down, pointer_move) }}; } impl Default for Mapping { fn default() -> Self { let (up, down, pointer_move) = mapping![ // Rectangle entry! {action=RectangleMessage::Center, key_down=KeyAlt}, entry! {action=RectangleMessage::UnCenter, key_up=KeyAlt}, entry! {action=RectangleMessage::MouseMove, message=InputMapperMessage::PointerMove}, entry! {action=RectangleMessage::DragStart, key_down=Lmb}, entry! {action=RectangleMessage::DragStop, key_up=Lmb}, entry! {action=RectangleMessage::Abort, key_down=Rmb}, entry! {action=RectangleMessage::Abort, key_down=KeyEscape}, entry! {action=RectangleMessage::LockAspectRatio, key_down=KeyShift}, entry! {action=RectangleMessage::UnlockAspectRatio, key_up=KeyShift}, // Ellipse entry! {action=EllipseMessage::Center, key_down=KeyAlt}, entry! {action=EllipseMessage::UnCenter, key_up=KeyAlt}, entry! {action=EllipseMessage::MouseMove, message=InputMapperMessage::PointerMove}, entry! {action=EllipseMessage::DragStart, key_down=Lmb}, entry! {action=EllipseMessage::DragStop, key_up=Lmb}, entry! {action=EllipseMessage::Abort, key_down=Rmb}, entry! {action=EllipseMessage::Abort, key_down=KeyEscape}, entry! {action=EllipseMessage::LockAspectRatio, key_down=KeyShift}, entry! {action=EllipseMessage::UnlockAspectRatio, key_up=KeyShift}, // Shape entry! {action=ShapeMessage::Center, key_down=KeyAlt}, entry! {action=ShapeMessage::UnCenter, key_up=KeyAlt}, entry! {action=ShapeMessage::MouseMove, message=InputMapperMessage::PointerMove}, entry! {action=ShapeMessage::DragStart, key_down=Lmb}, entry! {action=ShapeMessage::DragStop, key_up=Lmb}, entry! {action=ShapeMessage::Abort, key_down=Rmb}, entry! {action=ShapeMessage::Abort, key_down=KeyEscape}, entry! {action=ShapeMessage::LockAspectRatio, key_down=KeyShift}, entry! {action=ShapeMessage::UnlockAspectRatio, key_up=KeyShift}, // Line entry! {action=LineMessage::Center, key_down=KeyAlt}, entry! {action=LineMessage::UnCenter, key_up=KeyAlt}, entry! {action=LineMessage::MouseMove, message=InputMapperMessage::PointerMove}, entry! {action=LineMessage::DragStart, key_down=Lmb}, entry! {action=LineMessage::DragStop, key_up=Lmb}, entry! {action=LineMessage::Abort, key_down=Rmb}, entry! {action=LineMessage::Abort, key_down=KeyEscape}, entry! {action=LineMessage::LockAngle, key_down=KeyControl}, entry! {action=LineMessage::UnlockAngle, key_up=KeyControl}, entry! {action=LineMessage::SnapToAngle, key_down=KeyShift}, entry! {action=LineMessage::UnSnapToAngle, key_up=KeyShift}, // Pen entry! {action=PenMessage::MouseMove, message=InputMapperMessage::PointerMove}, entry! {action=PenMessage::DragStart, key_down=Lmb}, entry! {action=PenMessage::DragStop, key_up=Lmb}, entry! {action=PenMessage::Confirm, key_down=Rmb}, entry! {action=PenMessage::Confirm, key_down=KeyEscape}, entry! {action=PenMessage::Confirm, key_down=KeyEnter}, // Tool Actions entry! {action=ToolMessage::SelectTool(ToolType::Rectangle), key_down=KeyM}, entry! {action=ToolMessage::SelectTool(ToolType::Ellipse), key_down=KeyE}, entry! {action=ToolMessage::SelectTool(ToolType::Select), key_down=KeyV}, entry! {action=ToolMessage::SelectTool(ToolType::Line), key_down=KeyL}, entry! {action=ToolMessage::SelectTool(ToolType::Pen), key_down=KeyP}, entry! {action=ToolMessage::SelectTool(ToolType::Shape), key_down=KeyY}, entry! {action=ToolMessage::ResetColors, key_down=KeyX, modifiers=[KeyShift, KeyControl]}, entry! {action=ToolMessage::SwapColors, key_down=KeyX, modifiers=[KeyShift]}, // Document Actions entry! {action=DocumentMessage::Undo, key_down=KeyZ, modifiers=[KeyControl]}, entry! {action=DocumentMessage::DeleteSelectedLayers, key_down=KeyDelete}, entry! {action=DocumentMessage::DeleteSelectedLayers, key_down=KeyX}, entry! {action=DocumentMessage::DeleteSelectedLayers, key_down=KeyBackspace}, entry! {action=DocumentMessage::ExportDocument, key_down=KeyS, modifiers=[KeyControl, KeyShift]}, entry! {action=DocumentMessage::ExportDocument, key_down=KeyE, modifiers=[KeyControl]}, entry! {action=DocumentMessage::MouseMove, message=InputMapperMessage::PointerMove}, entry! {action=DocumentMessage::TranslateDown, key_down=Mmb}, entry! {action=DocumentMessage::TranslateUp, key_up=Mmb}, entry! {action=DocumentMessage::NewDocument, key_down=KeyN, modifiers=[KeyShift]}, entry! {action=DocumentMessage::NextDocument, key_down=KeyTab, modifiers=[KeyShift]}, entry! {action=DocumentMessage::CloseActiveDocument, key_down=KeyW, modifiers=[KeyShift]}, // Global Actions entry! {action=GlobalMessage::LogInfo, key_down=Key1}, entry! {action=GlobalMessage::LogDebug, key_down=Key2}, entry! {action=GlobalMessage::LogTrace, key_down=Key3}, entry! {action=DocumentMessage::DuplicateSelectedLayers, key_down=KeyD, modifiers=[KeyControl]}, ]; Self { up, down, pointer_move } } } impl Mapping { fn match_message(&self, message: InputMapperMessage, keys: &KeyStates, actions: ActionList) -> Option<Message> { use InputMapperMessage::*; let list = match message { KeyDown(key) => &self.down[key as usize], KeyUp(key) => &self.up[key as usize], PointerMove => &self.pointer_move, }; list.match_mapping(keys, actions) } } #[derive(Debug, Default)] pub struct InputMapper { mapping: Mapping, } impl MessageHandler<InputMapperMessage, (&InputPreprocessor, ActionList)> for InputMapper { fn process_action(&mut self, message: InputMapperMessage, data: (&InputPreprocessor, ActionList), responses: &mut VecDeque<Message>) { let (input, actions) = data; if let Some(message) = self.mapping.match_message(message, &input.keyboard, actions) { responses.push_back(message); } } advertise_actions!(); }
40.258216
135
0.718251
90c0998868aa036fce51d9296fef9a8cd3505033
2,587
use super::{Context, Module, RootModuleConfig}; use crate::configs::cmake::CMakeConfig; use crate::formatter::StringFormatter; use crate::utils; /// Creates a module with the current CMake version /// /// Will display the CMake version if any of the following criteria are met: /// - The current directory contains a `CMakeLists.txt` file pub fn module<'a>(context: &'a Context) -> Option<Module<'a>> { let is_cmake_project = context .try_begin_scan()? .set_files(&["CMakeLists.txt"]) .is_match(); if !is_cmake_project { return None; } let mut module = context.new_module("cmake"); let config = CMakeConfig::try_load(module.config); let parsed = StringFormatter::new(config.format).and_then(|formatter| { formatter .map_meta(|variable, _| match variable { "symbol" => Some(config.symbol), _ => None, }) .map_style(|variable| match variable { "style" => Some(Ok(config.style)), _ => None, }) .map(|variable| match variable { "version" => utils::exec_cmd("cmake", &["--version"]) .map(|output| format_cmake_version(&output.stdout)) .flatten() .map(Ok), _ => None, }) .parse(None) }); module.set_segments(match parsed { Ok(segments) => segments, Err(error) => { log::warn!("Error in module `cmake`: \n{}", error); return None; } }); Some(module) } fn format_cmake_version(cmake_version: &str) -> Option<String> { let version = cmake_version.split_whitespace().nth(2)?; Some(format!("v{}", version)) } #[cfg(test)] mod tests { use crate::modules::utils::test::render_module; use ansi_term::Color; use std::fs::File; use std::io; #[test] fn folder_without_cmake_lists() -> io::Result<()> { let dir = tempfile::tempdir()?; let actual = render_module("cmake", dir.path(), None); let expected = None; assert_eq!(expected, actual); dir.close() } #[test] fn folder_with_cmake_lists() -> io::Result<()> { let dir = tempfile::tempdir()?; File::create(dir.path().join("CMakeLists.txt"))?.sync_all()?; let actual = render_module("cmake", dir.path(), None); let expected = Some(format!("via {} ", Color::Blue.bold().paint("🛆 v3.17.3"))); assert_eq!(expected, actual); dir.close() } }
30.435294
87
0.555856
8ab21020a298c519bebc8836bf43ca77742bb683
2,315
//! //! Numeric equality of floating point values. //! //! Traits and implementations of _numeric_ equality--that is, //! equality with an "epsilon" or error term--for floating point //! types. //! use float_cmp::ApproxEq; /// /// Numeric equality predicates. /// pub trait NumEq: PartialEq { /// Epsilon type may be an independent type. type Error; /// Are two values numerically equal within `eps`? fn num_eq(&self, other: &Self, _eps: &Self::Error) -> bool { self == other } /// Are two values numerically unequal within `eps`? fn num_ne(&self, other: &Self, eps: &Self::Error) -> bool { !self.num_eq(other, eps) } /// Are two values numerically equal within default epsilon? fn eq(&self, other: &Self) -> bool { self == other } /// Are two values numerically unequal within default epsilon? fn ne(&self, other: &Self) -> bool { self != other } } /// /// Trait implementation macro for floating point types. /// /// A macro used to avoid writing repetitive, boilerplate `NumEq` /// implementations for built-in floating point types. We rely on the /// [`float_cmp`] crate to do the detailed comparisons using epsilon and /// ULPS error terms. /// /// [`float_cmp`]: https://crates.io/crates/float-cmp /// macro_rules! float_num_eq { ($type:ty, $eps:expr, $ulps:expr) => { impl NumEq for $type { /// Error type is the same floating point type. type Error = Self; /// Equality within _eps_ and ULPS error bounds. fn num_eq(&self, other: &Self, eps: &$type) -> bool { self.approx_eq(other, *eps, $ulps) } /// Inequality within _eps_ and ULPS error bounds. fn num_ne(&self, other: &Self, eps: &$type) -> bool { self.approx_ne(other, *eps, $ulps) } /// Equality within default _eps_ and ULPS error bounds. fn eq(&self, other: &Self) -> bool { self.approx_eq(other, $eps, $ulps) } /// Inequality within default _eps_ and ULPS error bounds. fn ne(&self, other: &Self) -> bool { self.approx_ne(other, $eps, $ulps) } } }; } // 32 bit IEEE floating point equality (4 ULPS, 1e-6 epsilon). float_num_eq! {f32, 1e-6, 4} // 64 bit IEEE floating point equality (4 ULPS, 1e-14 epsilon). float_num_eq! {f64, 1e-14, 4}
22.696078
72
0.625054
33714c4c822e8f007777ff135750a98d53b21fd2
6,638
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets. //! use influx_db_client as influxdb; use metrics; use packet::{Blob, SharedBlobs, SharedPackets}; use result::{Error, Result}; use std::net::UdpSocket; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::{Receiver, RecvTimeoutError, Sender}; use std::sync::Arc; use std::thread::{Builder, JoinHandle}; use std::time::{Duration, Instant}; use timing::duration_as_ms; pub type PacketReceiver = Receiver<SharedPackets>; pub type PacketSender = Sender<SharedPackets>; pub type BlobSender = Sender<SharedBlobs>; pub type BlobReceiver = Receiver<SharedBlobs>; fn recv_loop( sock: &UdpSocket, exit: &Arc<AtomicBool>, channel: &PacketSender, channel_tag: &'static str, ) -> Result<()> { loop { let msgs = SharedPackets::default(); loop { // Check for exit signal, even if socket is busy // (for instance the leader trasaction socket) if exit.load(Ordering::Relaxed) { return Ok(()); } if msgs.write().unwrap().recv_from(sock).is_ok() { let len = msgs.read().unwrap().packets.len(); metrics::submit( influxdb::Point::new(channel_tag) .add_field("count", influxdb::Value::Integer(len as i64)) .to_owned(), ); channel.send(msgs)?; break; } } } } pub fn receiver( sock: Arc<UdpSocket>, exit: Arc<AtomicBool>, packet_sender: PacketSender, sender_tag: &'static str, ) -> JoinHandle<()> { let res = sock.set_read_timeout(Some(Duration::new(1, 0))); if res.is_err() { panic!("streamer::receiver set_read_timeout error"); } Builder::new() .name("solana-receiver".to_string()) .spawn(move || { let _ = recv_loop(&sock, &exit, &packet_sender, sender_tag); () }).unwrap() } fn recv_send(sock: &UdpSocket, r: &BlobReceiver) -> Result<()> { let timer = Duration::new(1, 0); let msgs = r.recv_timeout(timer)?; Blob::send_to(sock, msgs)?; Ok(()) } pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec<SharedPackets>, usize, u64)> { let timer = Duration::new(1, 0); let msgs = recvr.recv_timeout(timer)?; let recv_start = Instant::now(); trace!("got msgs"); let mut len = msgs.read().unwrap().packets.len(); let mut batch = vec![msgs]; while let Ok(more) = recvr.try_recv() { trace!("got more msgs"); len += more.read().unwrap().packets.len(); batch.push(more); if len > 100_000 { break; } } trace!("batch len {}", batch.len()); Ok((batch, len, duration_as_ms(&recv_start.elapsed()))) } pub fn responder(name: &'static str, sock: Arc<UdpSocket>, r: BlobReceiver) -> JoinHandle<()> { Builder::new() .name(format!("solana-responder-{}", name)) .spawn(move || loop { if let Err(e) = recv_send(&sock, &r) { match e { Error::RecvTimeoutError(RecvTimeoutError::Disconnected) => break, Error::RecvTimeoutError(RecvTimeoutError::Timeout) => (), _ => warn!("{} responder error: {:?}", name, e), } } }).unwrap() } //TODO, we would need to stick block authentication before we create the //window. fn recv_blobs(sock: &UdpSocket, s: &BlobSender) -> Result<()> { trace!("recv_blobs: receiving on {}", sock.local_addr().unwrap()); let dq = Blob::recv_from(sock)?; if !dq.is_empty() { s.send(dq)?; } Ok(()) } pub fn blob_receiver(sock: Arc<UdpSocket>, exit: Arc<AtomicBool>, s: BlobSender) -> JoinHandle<()> { //DOCUMENTED SIDE-EFFECT //1 second timeout on socket read let timer = Duration::new(1, 0); sock.set_read_timeout(Some(timer)) .expect("set socket timeout"); Builder::new() .name("solana-blob_receiver".to_string()) .spawn(move || loop { if exit.load(Ordering::Relaxed) { break; } let _ = recv_blobs(&sock, &s); }).unwrap() } #[cfg(test)] mod test { use packet::{Blob, Packet, Packets, SharedBlob, PACKET_DATA_SIZE}; use std::io; use std::io::Write; use std::net::UdpSocket; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::time::Duration; use streamer::PacketReceiver; use streamer::{receiver, responder}; fn get_msgs(r: PacketReceiver, num: &mut usize) { for _t in 0..5 { let timer = Duration::new(1, 0); match r.recv_timeout(timer) { Ok(m) => *num += m.read().unwrap().packets.len(), _ => info!("get_msgs error"), } if *num == 10 { break; } } } #[test] pub fn streamer_debug() { write!(io::sink(), "{:?}", Packet::default()).unwrap(); write!(io::sink(), "{:?}", Packets::default()).unwrap(); write!(io::sink(), "{:?}", Blob::default()).unwrap(); } #[test] pub fn streamer_send_test() { let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); read.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); let addr = read.local_addr().unwrap(); let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); let exit = Arc::new(AtomicBool::new(false)); let (s_reader, r_reader) = channel(); let t_receiver = receiver(Arc::new(read), exit.clone(), s_reader, "streamer-test"); let t_responder = { let (s_responder, r_responder) = channel(); let t_responder = responder("streamer_send_test", Arc::new(send), r_responder); let mut msgs = Vec::new(); for i in 0..10 { let mut b = SharedBlob::default(); { let mut w = b.write().unwrap(); w.data[0] = i as u8; w.meta.size = PACKET_DATA_SIZE; w.meta.set_addr(&addr); } msgs.push(b); } s_responder.send(msgs).expect("send"); t_responder }; let mut num = 0; get_msgs(r_reader, &mut num); assert_eq!(num, 10); exit.store(true, Ordering::Relaxed); t_receiver.join().expect("join"); t_responder.join().expect("join"); } }
33.024876
100
0.547906
ffb2ac75486e49de188ef84f3e394ba77f641dc4
16
pub mod codecs;
8
15
0.75
91871ad5203a76b768aa46aa3227054df5fb5b31
3,044
use cpsnarks_set::transcript::{TranscriptProtocolChallenge, TranscriptProtocolInteger}; use cpsnarks_set::utils::ConvertibleUnknownOrderGroup; use rug::Integer; use merlin::Transcript; use serde::Serialize; const TRANSCRIPT_SEP: &'static [u8] = b"ZK_POK_EXP"; const STAT_SECURITY: usize = 128; pub trait ProofOfExp<G: ConvertibleUnknownOrderGroup>: Serialize { fn new<I: Iterator<Item = Integer>>(base: &G::Elem, keys: I) -> Self; fn verify<I: Iterator<Item = Integer>>(&self, base: &G::Elem, keys: I) -> Option<G::Elem>; } #[derive(Debug, Serialize)] pub struct TrivialProof(); impl<G: ConvertibleUnknownOrderGroup> ProofOfExp<G> for TrivialProof { fn new<I: Iterator<Item = Integer>>(base: &G::Elem, keys: I) -> Self { TrivialProof() } fn verify<I: Iterator<Item = Integer>>(&self, base: &G::Elem, keys: I) -> Option<G::Elem> { let mut res = base.clone(); for key in keys { res = G::exp(&res, &key); } Some(res) } } /// This proof is just a proof-of-knowledge (not zero-knowledge) #[derive(Debug, Serialize)] pub struct Proof<G: ConvertibleUnknownOrderGroup> { Q: G::Elem, r: Integer, p: Integer, } impl<G: ConvertibleUnknownOrderGroup> ProofOfExp<G> for Proof<G> { fn new<I: Iterator<Item = Integer>>(base: &G::Elem, keys: I) -> Self { // compute exponent let mut exp = Integer::from(1); // commit to statement let mut transcript = Transcript::new(TRANSCRIPT_SEP); TranscriptProtocolInteger::<G>::append_integer_point(&mut transcript, b"base", base); for key in keys { exp = exp * &key; TranscriptProtocolInteger::<G>::append_integer_scalar(&mut transcript, b"keys", &key); } let p = TranscriptProtocolChallenge::challenge_scalar( &mut transcript, b"prime", STAT_SECURITY as u16, ) .next_prime(); let (q, r) = exp.div_rem(p.clone()); Self { Q: G::exp(base, &q), r, p, } } fn verify<I: Iterator<Item = Integer>>(&self, base: &G::Elem, keys: I) -> Option<G::Elem> { // commit to statement let mut rem = Integer::from(1); let mut transcript = Transcript::new(TRANSCRIPT_SEP); TranscriptProtocolInteger::<G>::append_integer_point(&mut transcript, b"base", base); for key in keys { rem = rem * &key; let (_q, r) = rem.div_rem(self.p.clone()); rem = r; TranscriptProtocolInteger::<G>::append_integer_scalar(&mut transcript, b"keys", &key); } // check consistency with prime let p = TranscriptProtocolChallenge::challenge_scalar( &mut transcript, b"prime", STAT_SECURITY as u16, ) .next_prime(); if self.p != p { return None; } let Qp = G::exp(&self.Q, &p); let Br = G::exp(base, &rem); Some(G::op(&Qp, &Br)) } }
31.061224
98
0.582457
50e19efe16b641749644a0e4fc3c75e592d38b07
1,891
use crate::prelude::*; pub struct RegradingSubmission { pub id: Uuid, pub submission_id: Uuid, pub grading_before_regrading: Uuid, pub grading_after_regrading: Option<Uuid>, } pub async fn insert( conn: &mut PgConnection, regrading_id: Uuid, submission_id: Uuid, grading_before_regrading_id: Uuid, ) -> ModelResult<Uuid> { let res = sqlx::query!( " INSERT INTO regrading_submissions ( regrading_id, submission_id, grading_before_regrading ) VALUES ($1, $2, $3) RETURNING id ", regrading_id, submission_id, grading_before_regrading_id ) .fetch_one(conn) .await?; Ok(res.id) } pub async fn get_regrading_submission( conn: &mut PgConnection, regrading_submission_id: Uuid, ) -> ModelResult<RegradingSubmission> { let res = sqlx::query_as!( RegradingSubmission, " SELECT id, submission_id, grading_before_regrading, grading_after_regrading FROM regrading_submissions WHERE id = $1 ", regrading_submission_id ) .fetch_one(conn) .await?; Ok(res) } pub async fn get_regrading_submissions( conn: &mut PgConnection, regrading_id: Uuid, ) -> ModelResult<Vec<RegradingSubmission>> { let res = sqlx::query_as!( RegradingSubmission, " SELECT id, submission_id, grading_before_regrading, grading_after_regrading FROM regrading_submissions WHERE regrading_id = $1 ", regrading_id ) .fetch_all(conn) .await?; Ok(res) } pub async fn set_grading_after_regrading( conn: &mut PgConnection, regrading_submission_id: Uuid, new_grading_id: Uuid, ) -> ModelResult<()> { sqlx::query!( " UPDATE regrading_submissions SET grading_after_regrading = $1 WHERE id = $2 ", new_grading_id, regrading_submission_id ) .execute(conn) .await?; Ok(()) }
19.905263
46
0.667901
3397f9763c2b7bccc3ef4c6a01f2b7f0bdef6aca
5,382
// (C) Copyright 2019 Hewlett Packard Enterprise Development LP use std::cell::RefCell; use std::rc::{Rc, Weak}; use std::sync::Arc; use crate::config::Config; use crate::filter::Filter; use crate::renderer::types::*; use super::log::LogState; use super::bar::BarState; use super::filter_bar::FilterBarState; use super::search_bar::SearchBarState; pub struct FilteredEntry { pub index: usize, pub entry: Weak<MessageEntry>, } /// shared state between all components /// this struct is semi-immutable: each action should return a new clone, but /// certain fields (entries and filtered_entries) are shared as cloning would /// be very expensive /// for perf reasons, this should generally be passed around inside a Cow /// (see CowState) #[derive(Clone)] pub struct RenderState { pub config: Arc<Config>, pub width: u16, pub height: u16, /// A list of all parsed entries read from the input. /// /// This list may be quite large and is shared between otherwise immutable /// RenderState instances for performance. pub entries: Rc<RefCell<Vec<Rc<MessageEntry>>>>, /// A list of filters used to generated `filtered_entries` from `entries` pub filters: Rc<RefCell<Vec<Box<dyn Filter>>>>, /// A Vec of entries filtered from the main list.. /// /// This list contains the subset of entries requested by the user pub filtered_entries: Rc<RefCell<Vec<FilteredEntry>>>, /// A cached temporary filter representing the user's current filter input, /// if it exists and is valid. /// /// in a refcell because we can't clone all filter types :/ pub highlight_filter: Option<Rc<Box<dyn Filter>>>, /// If true, input EoF has been reached pub eof: bool, pub log: LogState, pub bar: BarState, pub filter: FilterBarState, pub search: SearchBarState } /// A RenderState wrapped in a Cow for perf reasons pub type RcState = Rc<RenderState>; // TODO should RenderState be passed to actions in a Cow? // then we can skip cloning when nothing is changed, or if we only write to // one of the refcell fields impl RenderState { pub fn new(config: Arc<Config>) -> Self { RenderState { config, width: 0, height: 0, entries: Rc::new(RefCell::new(Vec::new())), filters: Rc::new(RefCell::new(Vec::new())), filtered_entries: Rc::new(RefCell::new(Vec::new())), highlight_filter: None, eof: false, log: LogState::new(), bar: BarState::new(), filter: FilterBarState::new(), search: SearchBarState::new() } } } pub fn filter_pass(state: RcState, entry: &MessageEntry) -> bool { let filters = state.filters.borrow(); if filters.is_empty() { return true; } for filter in filters.iter() { if !filter.filter(&entry.message) { return false; } } true } pub mod actions { use super::*; pub fn add_filter(mut state: RcState, filter: Box<dyn Filter>) -> RcState { let state_mut = Rc::make_mut(&mut state); state_mut.filters.borrow_mut().push(filter); // TODO: figure out how to keep the selection while adjusting filters state_mut.log.selection = None; *state.filtered_entries.borrow_mut() = state.entries.borrow().iter() .enumerate() .filter(|(_, e)| filter_pass(Rc::clone(&state), e)) .map(|(i, e)| FilteredEntry { index: i, entry: Rc::downgrade(e) }) .collect(); state } pub fn pop_filter(mut state: RcState) -> RcState { let state_mut = Rc::make_mut(&mut state); state_mut.log.selection = None; state.filters.borrow_mut().pop(); let new_filtered = if state.filters.borrow().is_empty() { state.entries.borrow().iter() .enumerate() .filter(|(_, e)| filter_pass(Rc::clone(&state), e)) .map(|(i, e)| FilteredEntry { index: i, entry: Rc::downgrade(e) }) .collect() } else { state.entries.borrow().iter() .enumerate() .filter(|(_, e)| filter_pass(Rc::clone(&state), e)) .map(|(i, e)| FilteredEntry { index: i, entry: Rc::downgrade(e) }) .collect() }; *state.filtered_entries.borrow_mut() = new_filtered; state } /// updates the temp filter based on user input pub fn set_highlight_filter( mut state: RcState, filter: Option<Rc<Box<dyn Filter>>> ) -> RcState { let state_mut = Rc::make_mut(&mut state); if let Some(filter) = filter { state_mut.highlight_filter = Some(filter); } else { state_mut.highlight_filter = None; } state } pub fn add_entry(state: RcState, entry: MessageEntry) -> RcState { { // this mut borrow needs to be dropped so we can return state let mut entries = state.entries.borrow_mut(); if filter_pass(Rc::clone(&state), &entry) { entries.push(Rc::new(entry)); state.filtered_entries.borrow_mut().push(FilteredEntry { index: entries.len() - 1, entry: Rc::downgrade(&entries[entries.len() - 1]), }); } else { entries.push(Rc::new(entry)); } } state } pub fn internal(state: RcState, text: &str) -> RcState { add_entry(state, MessageEntry::internal(text)) } pub fn set_eof(mut state: RcState, eof: bool) -> RcState { let state_mut = Rc::make_mut(&mut state); state_mut.eof = eof; state } }
26
77
0.636195
188680ddd4b27c64b0d1cbcd334aa69c0be67072
2,120
mod utils; fn main() { let contents = utils::load_input("inputs/day2.txt"); let checksum = checksum(&contents); println!("{}", checksum); } fn count(box_id: &str) -> (bool, bool) { let mut found_2 = false; let mut found_3 = false; let mut chars: Vec<char> = box_id.chars().collect(); chars.sort(); let mut iter = chars.iter(); let mut count = 1; let mut last_char = iter.next().unwrap(); for character in iter { if last_char != character && count == 2 { found_2 = true; count = 1; } else if last_char != character && count == 3 { found_3 = true; count = 1; } else if last_char == character { count += 1; } last_char = character; } if count == 2 { found_2 = true; } else if count == 3 { found_3 = true; } (found_2,found_3) } fn checksum(box_ids: &str) -> usize { let mut twos = 0; let mut threes = 0; for box_id in box_ids.lines() { let (two, three) = count(box_id); if two { twos += 1; } if three { threes += 1; } } println!("twos: {}, threes: {}", twos, threes); twos * threes } #[cfg(test)] mod tests { use super::*; #[test] fn counts_no_doubles() { assert_eq!((false,false), count("abcdef")); } #[test] fn counts_one_double() { assert_eq!((true,false), count("abbcde")); assert_eq!((true, false), count("abcdee")); } #[test] fn counts_one_triple() { assert_eq!((false,true), count("abcccd")); assert_eq!((false,true), count("abcddd")); } #[test] fn counts_one_double_and_one_triple() { assert_eq!((true, true), count("bababc")); } #[test] fn counts_two_doubles_once() { assert_eq!((true, false), count("aabcdd")); } #[test] fn correct_checksum() { let box_ids = vec!["abcdef", "bababc", "abbcde", "abcccd", "aabcdd", "abcdee", "ababab"].join("\n"); assert_eq!(12, checksum(&box_ids)); } }
23.043478
108
0.517453
6a025c87c3f9a656bb0c5033c4b9e78af625572b
10,714
#[derive(Clone, Copy, Debug)] pub struct Elevator { pub pak_name: &'static str, pub name: &'static str, pub mlvl: u32, pub mrea: u32, pub mrea_idx: u32, pub scly_id: u32, pub room_id: u32, pub room_strg: u32, pub hologram_strg: u32, pub control_strg: u32, pub default_dest: u8, } impl Elevator { pub fn end_game_elevator() -> Elevator { Elevator { pak_name: "Metroid8.pak", name: "End of Game", mlvl: 0x13d79165, mrea: 0xb4b41c48, mrea_idx: 0, scly_id: 0xFFFFFFFF, room_id: 0, room_strg: 0xFFFFFFFF, hologram_strg: 0xFFFFFFFF, control_strg: 0xFFFFFFFF, default_dest: 0xFF, } } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct SpawnRoom { pub pak_name: &'static str, pub mlvl: u32, pub mrea: u32, pub mrea_idx: u32, pub room_id: u32 } impl SpawnRoom { pub fn from_room_idx(idx: usize) -> SpawnRoom { if idx == 20 { SpawnRoom::landing_site_spawn_room() } else { let elv = &ELEVATORS[idx]; SpawnRoom { pak_name: elv.pak_name, mlvl: elv.mlvl, mrea: elv.mrea, mrea_idx: elv.mrea_idx, room_id: elv.room_id, } } } pub fn landing_site_spawn_room() -> SpawnRoom { SpawnRoom { pak_name: "Metroid4.pak", mlvl: 0x39f2de28, mrea: 0xb2701146, mrea_idx: 0, room_id: 0x8FF17910, } } pub fn frigate_spawn_room() -> SpawnRoom { SpawnRoom { pak_name: "Metroid1.pak", mlvl: 0x158EFE17, mrea: 0xD1241219, mrea_idx: 0, room_id: 0, // Not referenced and also not needed } } } pub const ELEVATORS: &[Elevator] = &[ Elevator { pak_name: "Metroid2.pak", name: "Chozo Ruins West\0(Main Plaza)",// "Transport to Tallon Overworld North", mlvl: 0x83f6ff6f, mrea: 0x3e6b2bb7, mrea_idx: 0, scly_id: 0x007d, room_id: 0xDBED08BA, room_strg: 0xF747143D, hologram_strg: 0xD3F29D19, control_strg: 0x3C6FF426, default_dest: 6, }, Elevator { pak_name: "Metroid2.pak", name: "Chozo Ruins North\0(Sun Tower)",// "Transport to Magmoor Caverns North", mlvl: 0x83f6ff6f, mrea: 0x8316edf5, mrea_idx: 24, scly_id: 0x180027, room_id: 0x372F1027, room_strg: 0x71D36693, hologram_strg: 0xB4B44968, control_strg: 0xC610DFE6, default_dest: 14, }, Elevator { pak_name: "Metroid2.pak", name: "Chozo Ruins East\0(Reflecting Pool, Save Station)",// "Transport to Tallon Overworld East", mlvl: 0x83f6ff6f, mrea: 0xa5fa69a1, mrea_idx: 62, scly_id: 0x3e002c, room_id: 0xC705A398, room_strg: 0x1CE1DDBC, hologram_strg: 0x598EF87A, control_strg: 0xFCD69EB0, default_dest: 8, }, Elevator { pak_name: "Metroid2.pak", name: "Chozo Ruins South\0(Reflecting Pool, Far End)",// "Transport to Tallon Overworld South", mlvl: 0x83f6ff6f, mrea: 0x236e1b0f, mrea_idx: 63, scly_id: 0x3f0028, room_id: 0x23F35FE1, room_strg: 0x9A75AF12, hologram_strg: 0x48F39203, control_strg: 0x411CF27E, default_dest: 10, }, Elevator { pak_name: "Metroid3.pak", name: "Phendrana Drifts North\0(Phendrana Shorelines)",// "Transport to Magmoor Caverns West", mlvl: 0xa8be6291, mrea: 0xc00e3781, mrea_idx: 0, scly_id: 0x002d, room_id: 0xB2E861AC, room_strg: 0xF7D14F4D, hologram_strg: 0x38F9BAC5, control_strg: 0x2DDB22E1, default_dest: 15, }, Elevator { pak_name: "Metroid3.pak", name: "Phendrana Drifts South\0(Quarantine Cave)",// "Transport to Magmoor Caverns South", mlvl: 0xa8be6291, mrea: 0xdd0b0739, mrea_idx: 29, scly_id: 0x1d005a, room_id: 0x31D08ACB, room_strg: 0xEAD47FF5, hologram_strg: 0x0CEE0B66, control_strg: 0x993CEFE8, default_dest: 18, }, Elevator { pak_name: "Metroid4.pak", name: "Tallon Overworld North\0(Tallon Canyon)",// "Transport to Chozo Ruins West", mlvl: 0x39f2de28, mrea: 0x11a02448, mrea_idx: 14, scly_id: 0xe0005, room_id: 0x6FD3B9AB, room_strg: 0x9EE2172A, hologram_strg: 0x04685AE9, control_strg: 0x73A833EB, default_dest: 0, }, // XXX Two? /* Elevator { pak_name: "Metroid4.pak", mlvl: 0x39f2de28, mrea: 0x2398e906, mrea_idx: 0, scly_id: 0x1002d1, // Artifact Temple room_id: 0xCD2B0EA2, room_strg: 0xFFFFFFFF, hologram_strg: 0x00000000, control_strg: 0xFFFFFFFF, }, */ Elevator { pak_name: "Metroid4.pak", name: "Artifact Temple", mlvl: 0x39f2de28, mrea: 0x2398e906, mrea_idx: 16, scly_id: 0x1002da, room_id: 0xCD2B0EA2, room_strg: 0xFFFFFFFF, hologram_strg: 0xFFFFFFFF, control_strg: 0xFFFFFFFF, default_dest: 19, }, Elevator { pak_name: "Metroid4.pak", name: "Tallon Overworld East\0(Frigate Crash Site)",// "Transport to Chozo Ruins East", mlvl: 0x39f2de28, mrea: 0x8a31665e, mrea_idx: 22, scly_id: 0x160038, room_id: 0xB0C789B5, room_strg: 0x0573553C, hologram_strg: 0x55A27CA9, control_strg: 0x51DCA8D9, default_dest: 2, }, Elevator { pak_name: "Metroid4.pak", name: "Tallon Overworld West\0(Root Cave)",// "Transport to Magmoor Caverns East", mlvl: 0x39f2de28, mrea: 0x15d6ff8b, mrea_idx: 23, scly_id: 0x170032, room_id: 0x6D105C48, room_strg: 0xF92C2264, hologram_strg: 0xD658ADBD, control_strg: 0x8EA61E34, default_dest: 16, }, Elevator { pak_name: "Metroid4.pak", name: "Tallon Overworld South\0(Great Tree Hall, Upper)",// "Transport to Chozo Ruins South", mlvl: 0x39f2de28, mrea: 0xca514f0, mrea_idx: 41, scly_id: 0x290024, room_id: 0x5301E9D, room_strg: 0x630EA5FC, hologram_strg: 0xCC401AA8, control_strg: 0xEC16C417, default_dest: 3, }, Elevator { pak_name: "Metroid4.pak", name: "Tallon Overworld South\0(Great Tree Hall, Lower)",// "Transport to Phazon Mines East", mlvl: 0x39f2de28, mrea: 0x7d106670, mrea_idx: 43, scly_id: 0x2b0023, room_id: 0xBC2A964C, room_strg: 0xF2525512, hologram_strg: 0x4921B661, control_strg: 0x294EC2B2, default_dest: 12, }, Elevator { pak_name: "metroid5.pak", name: "Phazon Mines East\0(Main Quarry)",// "Transport to Tallon Overworld South", mlvl: 0xb1ac4d65, mrea: 0x430e999c, mrea_idx: 0, scly_id: 0x001c, room_id: 0x2AC6EC36, room_strg: 0x8D7B16B4, hologram_strg: 0xB60F6ADF, control_strg: 0xA00EF446, default_dest: 11, }, Elevator { pak_name: "metroid5.pak", name: "Phazon Mines West\0(Phazon Processing Center)",// "Transport to Magmoor Caverns South", mlvl: 0xb1ac4d65, mrea: 0xe2c2cf38, mrea_idx: 25, scly_id: 0x190011, room_id: 0x91C144BF, room_strg: 0x47C4108D, hologram_strg: 0xDFD2AE6D, control_strg: 0x1D8BB16C, default_dest: 17, }, Elevator { pak_name: "Metroid6.pak", name: "Magmoor Caverns North\0(Lava Lake)",// "Transport to Chozo Ruins North", mlvl: 0x3ef8237c, mrea: 0x3beaadc9, mrea_idx: 0, scly_id: 0x001f, room_id: 0x7DC0D75B, room_strg: 0x1BEFC19B, hologram_strg: 0x8EA3FD98, control_strg: 0x0D3EC7DC, default_dest: 1, }, Elevator { pak_name: "Metroid6.pak", name: "Magmoor Caverns West\0(Monitor Station)",// "Transport to Phendrana Drifts North", mlvl: 0x3ef8237c, mrea: 0xdca9a28b, mrea_idx: 13, scly_id: 0xd0022, room_id: 0x4318F156, room_strg: 0xE0E1C4DA, hologram_strg: 0x4F2D2258, control_strg: 0xD0A81E59, default_dest: 4, }, Elevator { pak_name: "Metroid6.pak", name: "Magmoor Caverns East\0(Twin Fires)",// "Transport to Tallon Overworld West", mlvl: 0x3ef8237c, mrea: 0x4c3d244c, mrea_idx: 16, scly_id: 0x100020, room_id: 0xB3128CF6, room_strg: 0xBD4E14B9, hologram_strg: 0x58DA42EA, control_strg: 0x4BE9A4CC, default_dest: 9, }, Elevator { pak_name: "Metroid6.pak", name: "Magmoor Caverns South\0(Magmoor Workstation, Debris)",// "Transport to Phazon Mines West", mlvl: 0x3ef8237c, mrea: 0xef2f1440, mrea_idx: 26, scly_id: 0x1a0024, room_id: 0x921FFEDB, room_strg: 0xFF5F6594, hologram_strg: 0x28E3D615, control_strg: 0x2FAF7EDA, default_dest: 13, }, Elevator { pak_name: "Metroid6.pak", name: "Magmoor Caverns South\0(Magmoor Workstation, Save Station)",// "Transport to Phendrana Drifts South", mlvl: 0x3ef8237c, mrea: 0xc1ac9233, mrea_idx: 27, scly_id: 0x1b0028, room_id: 0xC0201A31, room_strg: 0x66DEBE97, hologram_strg: 0x61805AFF, control_strg: 0x6F30E3D4, default_dest: 5, }, Elevator { pak_name: "Metroid7.pak", name: "Crater Entry Point", mlvl: 0xc13b09d1, mrea: 0x93668996, mrea_idx: 0, scly_id: 0x0098, room_id: 0x2B878F78, room_strg: 0xFFFFFFFF, hologram_strg: 0xFFFFFFFF, control_strg: 0xFFFFFFFF, default_dest: 7, }, /* Elevator { pak_name: "Metroid7.pak", mlvl: 0xc13b09d1, mrea: 0x1a666c55, mrea_idx: 0, scly_id: 0xb0182,// Metroid Prime Lair room_id: 0xE420D94B, room_strg: 0xFFFFFFFF, hologram_strg: 0x00000000, control_strg: 0xFFFFFFFF, }, */ ];
25.03271
116
0.568228
e9e78031bd6c9a985fa5a4d85e87084a03a6aacd
3,915
// Copyright 2016 Joe Wilm, The Alacritty Project Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use ansi::{NamedColor, Color}; use grid; use index::Column; bitflags! { pub struct Flags: u32 { const INVERSE = 0b0000_0001; const BOLD = 0b0000_0010; const ITALIC = 0b0000_0100; const UNDERLINE = 0b0000_1000; const WRAPLINE = 0b0001_0000; const WIDE_CHAR = 0b0010_0000; const WIDE_CHAR_SPACER = 0b0100_0000; const DIM = 0b1000_0000; const DIM_BOLD = 0b1000_0010; } } #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct Cell { pub c: char, pub fg: Color, pub bg: Color, pub flags: Flags, } impl Default for Cell { fn default() -> Cell { Cell::new( ' ', Color::Named(NamedColor::Foreground), Color::Named(NamedColor::Background) ) } } /// Get the length of occupied cells in a line pub trait LineLength { /// Calculate the occupied line length fn line_length(&self) -> Column; } impl LineLength for grid::Row<Cell> { fn line_length(&self) -> Column { let mut length = Column(0); if self[Column(self.len() - 1)].flags.contains(Flags::WRAPLINE) { return Column(self.len()); } for (index, cell) in self[..].iter().rev().enumerate() { if cell.c != ' ' { length = Column(self.len() - index); break; } } length } } impl Cell { #[inline] pub fn bold(&self) -> bool { self.flags.contains(Flags::BOLD) } #[inline] pub fn inverse(&self) -> bool { self.flags.contains(Flags::INVERSE) } #[inline] pub fn dim(&self) -> bool { self.flags.contains(Flags::DIM) } pub fn new(c: char, fg: Color, bg: Color) -> Cell { Cell { c, bg, fg, flags: Flags::empty(), } } #[inline] pub fn is_empty(&self) -> bool { self.c == ' ' && self.bg == Color::Named(NamedColor::Background) && !self.flags.intersects(Flags::INVERSE | Flags::UNDERLINE) } #[inline] pub fn reset(&mut self, template: &Cell) { // memcpy template to self *self = *template; } } #[cfg(test)] mod tests { use super::{Cell, LineLength}; use grid::Row; use index::Column; #[test] fn line_length_works() { let template = Cell::default(); let mut row = Row::new(Column(10), &template); row[Column(5)].c = 'a'; assert_eq!(row.line_length(), Column(6)); } #[test] fn line_length_works_with_wrapline() { let template = Cell::default(); let mut row = Row::new(Column(10), &template); row[Column(9)].flags.insert(super::Flags::WRAPLINE); assert_eq!(row.line_length(), Column(10)); } } #[cfg(all(test, feature = "bench"))] mod benches { extern crate test; use super::Cell; #[bench] fn cell_reset(b: &mut test::Bencher) { b.iter(|| { let mut cell = Cell::default(); for _ in 0..100 { cell.reset(test::black_box(&Cell::default())); } test::black_box(cell); }); } }
24.622642
75
0.551213
393eeddd877ad7ddf2aaf6ce6750513c0d6a4ae0
2,851
#![allow(unused_imports)] use core::intrinsics; // NOTE These functions are implemented using assembly because they using a custom // calling convention which can't be implemented using a normal Rust function // NOTE These functions are never mangled as they are not tested against compiler-rt // and mangling ___chkstk would break the `jmp ___chkstk` instruction in __alloca intrinsics! { #[naked] #[cfg(all( windows, target_env = "gnu", not(feature = "no-asm") ))] pub unsafe extern "C" fn ___chkstk_ms() { core::arch::asm!( "push %rcx", "push %rax", "cmp $0x1000,%rax", "lea 24(%rsp),%rcx", "jb 1f", "2:", "sub $0x1000,%rcx", "test %rcx,(%rcx)", "sub $0x1000,%rax", "cmp $0x1000,%rax", "ja 2b", "1:", "sub %rax,%rcx", "test %rcx,(%rcx)", "pop %rax", "pop %rcx", "ret", options(noreturn, att_syntax) ); } #[naked] #[cfg(all( windows, target_env = "gnu", not(feature = "no-asm") ))] pub unsafe extern "C" fn __alloca() { core::arch::asm!( "mov %rcx,%rax", // x64 _alloca is a normal function with parameter in rcx "jmp ___chkstk", // Jump to ___chkstk since fallthrough may be unreliable" options(noreturn, att_syntax) ); } #[naked] #[cfg(all( windows, target_env = "gnu", not(feature = "no-asm") ))] pub unsafe extern "C" fn ___chkstk() { core::arch::asm!( "push %rcx", "cmp $0x1000,%rax", "lea 16(%rsp),%rcx", // rsp before calling this routine -> rcx "jb 1f", "2:", "sub $0x1000,%rcx", "test %rcx,(%rcx)", "sub $0x1000,%rax", "cmp $0x1000,%rax", "ja 2b", "1:", "sub %rax,%rcx", "test %rcx,(%rcx)", "lea 8(%rsp),%rax", // load pointer to the return address into rax "mov %rcx,%rsp", // install the new top of stack pointer into rsp "mov -8(%rax),%rcx", // restore rcx "push (%rax)", // push return address onto the stack "sub %rsp,%rax", // restore the original value in rax "ret", options(noreturn, att_syntax) ); } } // HACK(https://github.com/rust-lang/rust/issues/62785): x86_64-unknown-uefi needs special LLVM // support unless we emit the _fltused mod _fltused { #[no_mangle] #[used] #[cfg(target_os = "uefi")] static _fltused: i32 = 0; }
30.010526
95
0.478429
bfef6b7e81fbd8abef907c84a524f617cca24e47
3,509
use std::ops::Deref; use std::net::IpAddr; use db::Pool; use diesel; use diesel::prelude::*; use r2d2; use r2d2_diesel::ConnectionManager; use rocket; use rocket_contrib::Json; use rocket::http::Status; use rocket::request::{self, FromRequest}; use rocket::{Outcome, Request, State}; use models::Minion; use config::Config; const API_KEY_HEADER: &str = "X-API-KEY"; /// Database request guard struct DbConn(r2d2::PooledConnection<ConnectionManager<SqliteConnection>>); /// Client IP request guard struct Ip(IpAddr); /// Expected data for a minion's registration #[derive(Deserialize)] struct Registration { username: String, // TODO: how to handle both string and integer? port: i32, directory: String, } impl<'a, 'r> FromRequest<'a, 'r> for DbConn { type Error = (); fn from_request(request: &'a Request<'r>) -> request::Outcome<DbConn, ()> { let pool = request.guard::<State<Pool>>()?; match pool.get() { Ok(conn) => Outcome::Success(DbConn(conn)), Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())), } } } impl<'a, 'r> FromRequest<'a, 'r> for Ip { type Error = (); fn from_request(request: &'a Request<'r>) -> request::Outcome<Ip, ()> { match request.remote() { Some(addr) => Outcome::Success(Ip(addr.ip())), None => Outcome::Failure((Status::BadRequest, ())), } } } /// Convenience Deref implementation to use DbConn as a SqliteConnection impl Deref for DbConn { type Target = SqliteConnection; fn deref(&self) -> &Self::Target { &self.0 } } impl<'a, 'r> FromRequest<'a, 'r> for Minion { type Error = (); fn from_request(request: &'a Request<'r>) -> request::Outcome<Minion, ()> { use schema::minions::dsl; let keys: Vec<_> = request.headers().get(API_KEY_HEADER).collect(); if keys.len() != 1 { return Outcome::Failure((Status::Unauthorized, ())); } let key = keys[0]; let pool = request.guard::<State<Pool>>()?; if let Ok(conn) = pool.get() { match dsl::minions.filter(dsl::key.eq(key)).first(&*conn) { Ok(minion) => Outcome::Success(minion), Err(_) => Outcome::Failure((Status::Forbidden, ())), } } else { Outcome::Failure((Status::ServiceUnavailable, ())) } } } // Register a minion as active #[post("/minion", data = "<input>")] fn register( conn: DbConn, minion: Minion, ip: Ip, input: Json<Registration>, config: State<Config>, ) -> String { use schema::minions::dsl; diesel::update(&minion) .set(( dsl::active.eq(true), dsl::ip.eq(format!("{}", ip.0)), dsl::username.eq(&input.username), dsl::port.eq(&input.port), dsl::directory.eq(&input.directory), )) .execute(&*conn) .expect(&format!("Could not update {}", &minion.name)); config.pubkey.clone() } // Set a minion as inactive #[delete("/minion")] fn unregister(conn: DbConn, minion: Minion) { use schema::minions::dsl; diesel::update(&minion) .set((dsl::active.eq(false), dsl::ip.eq(""))) .execute(&*conn) .expect(&format!("Could not update {}", &minion.name)); } pub fn serve(pool: Pool, config: Config) { rocket::ignite() .mount("/", routes![register, unregister]) .manage(pool) .manage(config) .launch(); }
26.78626
79
0.577372
64318a19442b5ca1fb9f1c592d0669fb178f9666
257
#[cfg(test)] mod tests { use super::super::modules::*; #[test] fn test_Not(){ let mut m = Not::new(); m.in_ = false; m.prop(); assert_eq!(m.in_, false); assert_eq!(m.out, true); m.in_ = true; m.prop(); assert_eq!(m.in_, true); assert_eq!(m.out, false); } }
15.117647
29
0.618677
16e60919f23eb0da74e1961b7258077a11489176
2,147
// Copyright 2015-2020 Parity Technologies (UK) Ltd. // This file is part of Open Ethereum. // Open Ethereum is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Open Ethereum is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Open Ethereum. If not, see <http://www.gnu.org/licenses/>. //! EIP-191 compliant decoding + hashing use v1::types::{EIP191Version, Bytes, PresignedTransaction}; use eip_712::{hash_structured_data, EIP712}; use serde_json::{Value, from_value}; use v1::helpers::errors; use jsonrpc_core::Error; use v1::helpers::dispatch::eth_data_hash; use hash::keccak; use std::fmt::Display; use ethereum_types::H256; /// deserializes and hashes the message depending on the version specifier pub fn hash_message(version: EIP191Version, message: Value) -> Result<H256, Error> { let data = match version { EIP191Version::StructuredData => { let typed_data = from_value::<EIP712>(message) .map_err(map_serde_err("StructuredData"))?; hash_structured_data(typed_data) .map_err(|err| errors::invalid_call_data(err.kind()))? } EIP191Version::PresignedTransaction => { let data = from_value::<PresignedTransaction>(message) .map_err(map_serde_err("WithValidator"))?; let prefix = b"\x19\x00"; let data = [&prefix[..], &data.validator.0[..], &data.data.0[..]].concat(); keccak(data) } EIP191Version::PersonalMessage => { let bytes = from_value::<Bytes>(message) .map_err(map_serde_err("Bytes"))?; eth_data_hash(bytes.0) } }; Ok(data) } fn map_serde_err<T: Display>(struct_name: &'static str) -> impl Fn(T) -> Error { move |error: T| { errors::invalid_call_data(format!("Error deserializing '{}': {}", struct_name, error)) } }
34.629032
88
0.719609
79df187214d96dc3e632e74cd01520a9cf782228
17,182
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use fidl_fuchsia_wlan_common as fidl_common; use fidl_fuchsia_wlan_mlme as fidl_mlme; use log::error; use wlan_common::{ channel::{Cbw, Channel, Phy}, ie::*, RadioConfig, }; use crate::DeviceInfo; fn convert_chanwidth_to_cbw(chan_width_set: ChanWidthSet) -> fidl_common::Cbw { if chan_width_set == ChanWidthSet::TWENTY_ONLY { fidl_common::Cbw::Cbw20 } else { fidl_common::Cbw::Cbw40Below } } fn convert_secchan_offset_to_cbw(secchan_offset: SecChanOffset) -> fidl_common::Cbw { if secchan_offset == SecChanOffset::SECONDARY_ABOVE { fidl_common::Cbw::Cbw40 } else if secchan_offset == SecChanOffset::SECONDARY_BELOW { fidl_common::Cbw::Cbw40Below } else { fidl_common::Cbw::Cbw20 } } fn convert_vht_segments_to_cbw(seg0: u8, seg1: u8) -> fidl_common::Cbw { // See IEEE Std 802.11-2016, Table 9-253 let gap = if seg0 >= seg1 { seg0 - seg1 } else { seg1 - seg0 }; if seg1 == 0 { fidl_common::Cbw::Cbw80 } else if gap == 8 { fidl_common::Cbw::Cbw160 } else if gap > 16 { fidl_common::Cbw::Cbw80P80 } else { fidl_common::Cbw::Cbw80 } } fn derive_cbw_ht(client_ht_cap: &HtCapabilities, bss_ht_op: &HtOperation) -> fidl_common::Cbw { let client_ht_cap_info = client_ht_cap.ht_cap_info; let client_cbw = convert_chanwidth_to_cbw(client_ht_cap_info.chan_width_set()); let ap_cbw = convert_secchan_offset_to_cbw({ bss_ht_op.ht_op_info_head }.secondary_chan_offset()); std::cmp::min(client_cbw, ap_cbw) } fn derive_cbw_vht( client_ht_cap: &HtCapabilities, _client_vht_cap: &VhtCapabilities, bss_ht_op: &HtOperation, bss_vht_op: &VhtOperation, ) -> fidl_common::Cbw { // Derive CBW from AP's VHT IEs let ap_cbw = if bss_vht_op.vht_cbw == VhtChannelBandwidth::CBW_80_160_80P80 { convert_vht_segments_to_cbw(bss_vht_op.center_freq_seg0, bss_vht_op.center_freq_seg1) } else { derive_cbw_ht(client_ht_cap, bss_ht_op) }; // TODO(NET-1575): Support CBW160 and CBW80P80 // See IEEE Std 802.11-2016 table 9-250 for full decoding let client_cbw = fidl_common::Cbw::Cbw80; std::cmp::min(client_cbw, ap_cbw) } fn get_band_id(primary_chan: u8) -> fidl_common::Band { if primary_chan <= 14 { fidl_common::Band::WlanBand2Ghz } else { fidl_common::Band::WlanBand5Ghz } } pub fn get_device_band_info( device_info: &DeviceInfo, channel: u8, ) -> Option<&fidl_mlme::BandCapabilities> { let target = get_band_id(channel); device_info.bands.iter().find(|b| b.band_id == target) } /// Derive PHY and CBW for Client role pub fn derive_phy_cbw( bss: &fidl_mlme::BssDescription, device_info: &DeviceInfo, radio_cfg: &RadioConfig, ) -> (fidl_common::Phy, fidl_common::Cbw) { let band_cap = match get_device_band_info(device_info, bss.chan.primary) { None => { error!( "Could not find the device capability corresponding to the \ channel {} of the selected AP {:?} \ Falling back to ERP with 20 MHz bandwidth", bss.chan.primary, bss.bssid ); // Fallback to a common ground of Fuchsia return (fidl_common::Phy::Erp, fidl_common::Cbw::Cbw20); } Some(bc) => bc, }; let supported_phy = if band_cap.ht_cap.is_none() || bss.ht_cap.is_none() || bss.ht_op.is_none() { fidl_common::Phy::Erp } else if band_cap.vht_cap.is_none() || bss.vht_cap.is_none() || bss.vht_op.is_none() { fidl_common::Phy::Ht } else { fidl_common::Phy::Vht }; let phy_to_use = match radio_cfg.phy { None => supported_phy, Some(override_phy) => std::cmp::min(override_phy.to_fidl(), supported_phy), }; // Safe to unwrap below because phy_to_use guarantees that IEs exist. // TODO(38205): Clean up this part to remove all the expect(...). let best_cbw = match phy_to_use { fidl_common::Phy::Hr => fidl_common::Cbw::Cbw20, fidl_common::Phy::Erp => fidl_common::Cbw::Cbw20, fidl_common::Phy::Ht => derive_cbw_ht( &parse_ht_capabilities(&band_cap.ht_cap.as_ref().unwrap().bytes[..]) .expect("band capability needs ht_cap"), &parse_ht_operation(&bss.ht_op.as_ref().unwrap().bytes[..]) .expect("bss is expected to have ht_op"), ), fidl_common::Phy::Vht | fidl_common::Phy::Hew => derive_cbw_vht( &parse_ht_capabilities(&band_cap.ht_cap.as_ref().unwrap().bytes[..]) .expect("band capability needs ht_cap"), &parse_vht_capabilities(&band_cap.vht_cap.as_ref().unwrap().bytes[..]) .expect("band capability needs vht_cap"), &parse_ht_operation(&bss.ht_op.as_ref().unwrap().bytes[..]).expect("bss needs ht_op"), &parse_vht_operation(&bss.vht_op.as_ref().unwrap().bytes[..]) .expect("bss needs vht_op"), ), }; let cbw_to_use = match radio_cfg.cbw { None => best_cbw, Some(override_cbw) => { let (cbw, _) = override_cbw.to_fidl(); std::cmp::min(best_cbw, cbw) } }; (phy_to_use, cbw_to_use) } /// Derive PHY to use for AP or Mesh role. Input config_phy and chan are required to be valid. pub fn derive_phy_cbw_for_ap( device_info: &DeviceInfo, config_phy: &Phy, chan: &Channel, ) -> (Phy, Cbw) { let band_cap = match get_device_band_info(device_info, chan.primary) { None => { error!( "Could not find the device capability corresponding to the \ channel {} Falling back to HT with 20 MHz bandwidth", chan.primary ); return (Phy::Ht, Cbw::Cbw20); } Some(bc) => bc, }; let supported_phy = if band_cap.ht_cap.is_none() { Phy::Erp } else if band_cap.vht_cap.is_none() { Phy::Ht } else { Phy::Vht }; let phy_to_use = std::cmp::min(*config_phy, supported_phy); let best_cbw = match phy_to_use { Phy::Hr | Phy::Erp => Cbw::Cbw20, Phy::Ht => { // Consider the input chan of this function can be Channel // { primary: 48, cbw: Cbw80 }, which is valid. If phy_to_use is HT, however, // Cbw80 becomes infeasible, and a next feasible CBW needs to be found. let ht_cap = parse_ht_capabilities(&band_cap.ht_cap.as_ref().unwrap().bytes[..]) .expect("band capability needs ht_cap"); let ht_cap_info = ht_cap.ht_cap_info; if ht_cap_info.chan_width_set() == ChanWidthSet::TWENTY_ONLY { Cbw::Cbw20 } else { // Only one is feasible. let c = Channel::new(chan.primary, Cbw::Cbw40); if c.is_valid() { Cbw::Cbw40 } else { Cbw::Cbw40Below } } } // TODO(porce): CBW160, CBW80P80, HEW support Phy::Vht | Phy::Hew => Cbw::Cbw80, }; let cbw_to_use = std::cmp::min(chan.cbw, best_cbw); (phy_to_use, cbw_to_use) } #[cfg(test)] mod tests { use { super::*, crate::{client::test_utils::fake_vht_bss_description, test_utils::*}, wlan_common::{ channel::{Cbw, Channel, Phy}, RadioConfig, }, }; #[test] fn band_id() { assert_eq!(fidl_common::Band::WlanBand2Ghz, get_band_id(1)); assert_eq!(fidl_common::Band::WlanBand2Ghz, get_band_id(14)); assert_eq!(fidl_common::Band::WlanBand5Ghz, get_band_id(36)); assert_eq!(fidl_common::Band::WlanBand5Ghz, get_band_id(165)); } #[test] fn test_convert_chanwidth_to_cbw() { assert_eq!(fidl_common::Cbw::Cbw20, convert_chanwidth_to_cbw(ChanWidthSet::TWENTY_ONLY)); assert_eq!( fidl_common::Cbw::Cbw40Below, convert_chanwidth_to_cbw(ChanWidthSet::TWENTY_FORTY) ); } #[test] fn test_convert_secchan_offset_to_cbw() { assert_eq!( fidl_common::Cbw::Cbw20, convert_secchan_offset_to_cbw(SecChanOffset::SECONDARY_NONE) ); assert_eq!( fidl_common::Cbw::Cbw40, convert_secchan_offset_to_cbw(SecChanOffset::SECONDARY_ABOVE) ); assert_eq!( fidl_common::Cbw::Cbw40Below, convert_secchan_offset_to_cbw(SecChanOffset::SECONDARY_BELOW) ); } #[test] fn test_convert_vht_segments_to_cbw() { assert_eq!(fidl_common::Cbw::Cbw80, convert_vht_segments_to_cbw(255, 0)); assert_eq!(fidl_common::Cbw::Cbw160, convert_vht_segments_to_cbw(255, 247)); assert_eq!(fidl_common::Cbw::Cbw80P80, convert_vht_segments_to_cbw(255, 200)); assert_eq!(fidl_common::Cbw::Cbw80, convert_vht_segments_to_cbw(255, 250)); } #[test] fn test_derive_cbw_ht() { { let want = fidl_common::Cbw::Cbw20; let got = derive_cbw_ht( &fake_ht_cap_chanwidth(ChanWidthSet::TWENTY_FORTY), &fake_ht_op_sec_offset(SecChanOffset::SECONDARY_NONE), ); assert_eq!(want, got); } { let want = fidl_common::Cbw::Cbw20; let got = derive_cbw_ht( &fake_ht_cap_chanwidth(ChanWidthSet::TWENTY_ONLY), &fake_ht_op_sec_offset(SecChanOffset::SECONDARY_ABOVE), ); assert_eq!(want, got); } { let want = fidl_common::Cbw::Cbw40; let got = derive_cbw_ht( &fake_ht_cap_chanwidth(ChanWidthSet::TWENTY_FORTY), &fake_ht_op_sec_offset(SecChanOffset::SECONDARY_ABOVE), ); assert_eq!(want, got); } { let want = fidl_common::Cbw::Cbw40Below; let got = derive_cbw_ht( &fake_ht_cap_chanwidth(ChanWidthSet::TWENTY_FORTY), &fake_ht_op_sec_offset(SecChanOffset::SECONDARY_BELOW), ); assert_eq!(want, got); } } #[test] fn test_derive_cbw_vht() { { let want = fidl_common::Cbw::Cbw80; let got = derive_cbw_vht( &fake_ht_cap_chanwidth(ChanWidthSet::TWENTY_FORTY), &fake_vht_capabilities(), &fake_ht_op_sec_offset(SecChanOffset::SECONDARY_ABOVE), &fake_vht_op_cbw(VhtChannelBandwidth::CBW_80_160_80P80), ); assert_eq!(want, got); } { let want = fidl_common::Cbw::Cbw40; let got = derive_cbw_vht( &fake_ht_cap_chanwidth(ChanWidthSet::TWENTY_FORTY), &fake_vht_capabilities(), &fake_ht_op_sec_offset(SecChanOffset::SECONDARY_ABOVE), &fake_vht_op_cbw(VhtChannelBandwidth::CBW_20_40), ); assert_eq!(want, got); } } #[test] fn test_get_band_id() { assert_eq!(fidl_common::Band::WlanBand2Ghz, get_band_id(14)); assert_eq!(fidl_common::Band::WlanBand5Ghz, get_band_id(36)); } #[test] fn test_get_device_band_info() { assert_eq!( fidl_common::Band::WlanBand5Ghz, get_device_band_info(&fake_device_info_ht(ChanWidthSet::TWENTY_FORTY), 36) .unwrap() .band_id ); } #[test] fn test_derive_phy_cbw() { { let want = (fidl_common::Phy::Vht, fidl_common::Cbw::Cbw80); let got = derive_phy_cbw( &fake_vht_bss_description(), &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &RadioConfig::default(), ); assert_eq!(want, got); } { let want = (fidl_common::Phy::Ht, fidl_common::Cbw::Cbw40); let got = derive_phy_cbw( &fake_vht_bss_description(), &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &fake_overrider(fidl_common::Phy::Ht, fidl_common::Cbw::Cbw80), ); assert_eq!(want, got); } { let want = (fidl_common::Phy::Ht, fidl_common::Cbw::Cbw20); let got = derive_phy_cbw( &fake_vht_bss_description(), &fake_device_info_ht(ChanWidthSet::TWENTY_ONLY), &fake_overrider(fidl_common::Phy::Vht, fidl_common::Cbw::Cbw80), ); assert_eq!(want, got); } } struct UserCfg { phy: Phy, chan: Channel, } impl UserCfg { fn new(phy: Phy, primary_chan: u8, cbw: Cbw) -> Self { UserCfg { phy, chan: Channel::new(primary_chan, cbw) } } } #[test] fn test_derive_phy_cbw_for_ap() { // VHT config, VHT device { let usr_cfg = UserCfg::new(Phy::Vht, 36, Cbw::Cbw80); let want = (Phy::Vht, Cbw::Cbw80); let got = derive_phy_cbw_for_ap( &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Vht, 36, Cbw::Cbw40); let want = (Phy::Vht, Cbw::Cbw40); let got = derive_phy_cbw_for_ap( &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Vht, 36, Cbw::Cbw20); let want = (Phy::Vht, Cbw::Cbw20); let got = derive_phy_cbw_for_ap( &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Vht, 40, Cbw::Cbw40Below); let want = (Phy::Vht, Cbw::Cbw40Below); let got = derive_phy_cbw_for_ap( &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } // HT config, VHT device { let usr_cfg = UserCfg::new(Phy::Ht, 36, Cbw::Cbw40); let want = (Phy::Ht, Cbw::Cbw40); let got = derive_phy_cbw_for_ap( &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Ht, 40, Cbw::Cbw40Below); let want = (Phy::Ht, Cbw::Cbw40Below); let got = derive_phy_cbw_for_ap( &fake_device_info_vht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Ht, 36, Cbw::Cbw40); let want = (Phy::Ht, Cbw::Cbw40); let got = derive_phy_cbw_for_ap( &fake_device_info_ht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Ht, 36, Cbw::Cbw20); let want = (Phy::Ht, Cbw::Cbw20); let got = derive_phy_cbw_for_ap( &fake_device_info_ht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } // VHT config, HT device { let usr_cfg = UserCfg::new(Phy::Vht, 36, Cbw::Cbw80); let want = (Phy::Ht, Cbw::Cbw40); let got = derive_phy_cbw_for_ap( &fake_device_info_ht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Vht, 40, Cbw::Cbw80); let want = (Phy::Ht, Cbw::Cbw40Below); let got = derive_phy_cbw_for_ap( &fake_device_info_ht(ChanWidthSet::TWENTY_FORTY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } { let usr_cfg = UserCfg::new(Phy::Vht, 36, Cbw::Cbw80); let want = (Phy::Ht, Cbw::Cbw20); let got = derive_phy_cbw_for_ap( &fake_device_info_ht(ChanWidthSet::TWENTY_ONLY), &usr_cfg.phy, &usr_cfg.chan, ); assert_eq!(want, got); } } }
34.09127
99
0.560412
f96895c23fc47e12eec1a3e5730492673bb4fd2e
413
/* cargo run -p vectors --bin vector3 cargo fmt --verbose --package vectors */ fn value(n: Option<&i32>) { match n { Some(n) => println!("Fourth element of a vector is {}", n), None => println!("None"), } } fn main() { let v = vec![20, 30, 40, 50]; let a: Option<&i32> = v.get(3); value(a); for i in 0..v.len() { value(v.get(i)); } value(Option::None); }
18.772727
67
0.510896
618e5f249e7235dfb86118df9b29caedcf8d57ec
11,270
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. /// Service config. /// /// /// Service configuration allows for customization of endpoints, region, credentials providers, /// and retry configuration. Generally, it is constructed automatically for you from a shared /// configuration loaded by the `aws-config` crate. For example: /// /// ```ignore /// // Load a shared config from the environment /// let shared_config = aws_config::from_env().load().await; /// // The client constructor automatically converts the shared config into the service config /// let client = Client::new(&shared_config); /// ``` /// /// The service config can also be constructed manually using its builder. /// pub struct Config { app_name: Option<aws_types::app_name::AppName>, pub(crate) timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>, pub(crate) sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>, pub(crate) retry_config: Option<aws_smithy_types::retry::RetryConfig>, pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>, pub(crate) region: Option<aws_types::region::Region>, pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider, } impl std::fmt::Debug for Config { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut config = f.debug_struct("Config"); config.finish() } } impl Config { /// Constructs a config builder. pub fn builder() -> Builder { Builder::default() } /// Returns the name of the app that is using the client, if it was provided. /// /// This _optional_ name is used to identify the application in the user agent that /// gets sent along with requests. pub fn app_name(&self) -> Option<&aws_types::app_name::AppName> { self.app_name.as_ref() } /// Creates a new [service config](crate::Config) from a [shared `config`](aws_types::config::Config). pub fn new(config: &aws_types::config::Config) -> Self { Builder::from(config).build() } /// The signature version 4 service signing name to use in the credential scope when signing requests. /// /// The signing service may be overridden by the `Endpoint`, or by specifying a custom /// [`SigningService`](aws_types::SigningService) during operation construction pub fn signing_service(&self) -> &'static str { "awsmobilehubservice" } } /// Builder for creating a `Config`. #[derive(Default)] pub struct Builder { app_name: Option<aws_types::app_name::AppName>, timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>, sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>, retry_config: Option<aws_smithy_types::retry::RetryConfig>, endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>, region: Option<aws_types::region::Region>, credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>, } impl Builder { /// Constructs a config builder. pub fn new() -> Self { Self::default() } /// Sets the name of the app that is using the client. /// /// This _optional_ name is used to identify the application in the user agent that /// gets sent along with requests. pub fn app_name(mut self, app_name: aws_types::app_name::AppName) -> Self { self.set_app_name(Some(app_name)); self } /// Sets the name of the app that is using the client. /// /// This _optional_ name is used to identify the application in the user agent that /// gets sent along with requests. pub fn set_app_name(&mut self, app_name: Option<aws_types::app_name::AppName>) -> &mut Self { self.app_name = app_name; self } /// Set the timeout_config for the builder /// /// # Examples /// /// ```no_run /// # use std::time::Duration; /// use aws_sdk_mobile::config::Config; /// use aws_smithy_types::timeout::TimeoutConfig; /// /// let timeout_config = TimeoutConfig::new() /// .with_api_call_attempt_timeout(Some(Duration::from_secs(1))); /// let config = Config::builder().timeout_config(timeout_config).build(); /// ``` pub fn timeout_config( mut self, timeout_config: aws_smithy_types::timeout::TimeoutConfig, ) -> Self { self.set_timeout_config(Some(timeout_config)); self } /// Set the timeout_config for the builder /// /// # Examples /// /// ```no_run /// # use std::time::Duration; /// use aws_sdk_mobile::config::{Builder, Config}; /// use aws_smithy_types::timeout::TimeoutConfig; /// /// fn set_request_timeout(builder: &mut Builder) { /// let timeout_config = TimeoutConfig::new() /// .with_api_call_timeout(Some(Duration::from_secs(3))); /// builder.set_timeout_config(Some(timeout_config)); /// } /// /// let mut builder = Config::builder(); /// set_request_timeout(&mut builder); /// let config = builder.build(); /// ``` pub fn set_timeout_config( &mut self, timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>, ) -> &mut Self { self.timeout_config = timeout_config; self } /// Set the sleep_impl for the builder /// /// # Examples /// /// ```no_run /// use aws_sdk_mobile::config::Config; /// use aws_smithy_async::rt::sleep::AsyncSleep; /// use aws_smithy_async::rt::sleep::Sleep; /// /// #[derive(Debug)] /// pub struct ForeverSleep; /// /// impl AsyncSleep for ForeverSleep { /// fn sleep(&self, duration: std::time::Duration) -> Sleep { /// Sleep::new(std::future::pending()) /// } /// } /// /// let sleep_impl = std::sync::Arc::new(ForeverSleep); /// let config = Config::builder().sleep_impl(sleep_impl).build(); /// ``` pub fn sleep_impl( mut self, sleep_impl: std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>, ) -> Self { self.set_sleep_impl(Some(sleep_impl)); self } /// Set the sleep_impl for the builder /// /// # Examples /// /// ```no_run /// use aws_sdk_mobile::config::{Builder, Config}; /// use aws_smithy_async::rt::sleep::AsyncSleep; /// use aws_smithy_async::rt::sleep::Sleep; /// /// #[derive(Debug)] /// pub struct ForeverSleep; /// /// impl AsyncSleep for ForeverSleep { /// fn sleep(&self, duration: std::time::Duration) -> Sleep { /// Sleep::new(std::future::pending()) /// } /// } /// /// fn set_never_ending_sleep_impl(builder: &mut Builder) { /// let sleep_impl = std::sync::Arc::new(ForeverSleep); /// builder.set_sleep_impl(Some(sleep_impl)); /// } /// /// let mut builder = Config::builder(); /// set_never_ending_sleep_impl(&mut builder); /// let config = builder.build(); /// ``` pub fn set_sleep_impl( &mut self, sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>, ) -> &mut Self { self.sleep_impl = sleep_impl; self } /// Set the retry_config for the builder /// /// # Examples /// ```no_run /// use aws_sdk_mobile::config::Config; /// use aws_smithy_types::retry::RetryConfig; /// /// let retry_config = RetryConfig::new().with_max_attempts(5); /// let config = Config::builder().retry_config(retry_config).build(); /// ``` pub fn retry_config(mut self, retry_config: aws_smithy_types::retry::RetryConfig) -> Self { self.set_retry_config(Some(retry_config)); self } /// Set the retry_config for the builder /// /// # Examples /// ```no_run /// use aws_sdk_mobile::config::{Builder, Config}; /// use aws_smithy_types::retry::RetryConfig; /// /// fn disable_retries(builder: &mut Builder) { /// let retry_config = RetryConfig::new().with_max_attempts(1); /// builder.set_retry_config(Some(retry_config)); /// } /// /// let mut builder = Config::builder(); /// disable_retries(&mut builder); /// let config = builder.build(); /// ``` pub fn set_retry_config( &mut self, retry_config: Option<aws_smithy_types::retry::RetryConfig>, ) -> &mut Self { self.retry_config = retry_config; self } // TODO(docs): include an example of using a static endpoint /// Sets the endpoint resolver to use when making requests. pub fn endpoint_resolver( mut self, endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static, ) -> Self { self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver)); self } /// Sets the AWS region to use when making requests. pub fn region(mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self { self.region = region.into(); self } /// Sets the credentials provider for this service pub fn credentials_provider( mut self, credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static, ) -> Self { self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new( credentials_provider, )); self } /// Sets the credentials provider for this service pub fn set_credentials_provider( &mut self, credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>, ) -> &mut Self { self.credentials_provider = credentials_provider; self } /// Builds a [`Config`]. pub fn build(self) -> Config { Config { app_name: self.app_name, timeout_config: self.timeout_config, sleep_impl: self.sleep_impl, retry_config: self.retry_config, endpoint_resolver: self .endpoint_resolver .unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())), region: self.region, credentials_provider: self.credentials_provider.unwrap_or_else(|| { aws_types::credentials::SharedCredentialsProvider::new( crate::no_credentials::NoCredentials, ) }), } } } impl From<&aws_types::config::Config> for Builder { fn from(input: &aws_types::config::Config) -> Self { let mut builder = Builder::default(); builder = builder.region(input.region().cloned()); builder.set_retry_config(input.retry_config().cloned()); builder.set_timeout_config(input.timeout_config().cloned()); builder.set_sleep_impl(input.sleep_impl().clone()); builder.set_credentials_provider(input.credentials_provider().cloned()); builder.set_app_name(input.app_name().cloned()); builder } } impl From<&aws_types::config::Config> for Config { fn from(config: &aws_types::config::Config) -> Self { Builder::from(config).build() } }
36.472492
106
0.626708
bbdc2672d59c4dbe34aea0beb6b7ea7f7f0e9c9f
2,372
use cargo_snippet::snippet; #[snippet("UFT")] #[allow(dead_code)] /// Union Find Tree pub struct UFT { pub par: Vec<usize>, pub rank: Vec<usize>, } #[snippet("UFT")] impl UFT { #[allow(dead_code)] pub fn new(n: usize) -> Self { UFT { par: (0..n).collect(), rank: vec![0; n], } } #[allow(dead_code)] pub fn root(&mut self, x: usize) -> usize { if self.par[x] == x { x } else { let p = self.par[x]; let pp = self.root(p); self.par[x] = pp; pp } } #[allow(dead_code)] pub fn merge(&mut self, x: usize, y: usize) { let x = self.root(x); let y = self.root(y); if x == y { return; } if self.rank[x] < self.rank[y] { self.par[x] = y; } else { self.par[y] = x; if self.rank[x] == self.rank[y] { self.rank[x] += 1; } } } } #[snippet("WeightedUFT")] /// https://qiita.com/drken/items/cce6fc5c579051e64fab pub struct WeightedUFT { pub par: Vec<usize>, pub rank: Vec<usize>, pub diff_weight: Vec<i64>, } #[snippet("WeightedUFT")] impl WeightedUFT { pub fn new(size: usize) -> WeightedUFT { WeightedUFT { par: (0..size).collect(), rank: vec![0; size], diff_weight: vec![0; size], } } pub fn root(&mut self, x: usize) -> usize { if self.par[x] == x { x } else { let p = self.par[x]; let r = self.root(p); self.diff_weight[x] += self.diff_weight[p]; self.par[x] = r; r } } pub fn weight(&mut self, x: usize) -> i64 { self.root(x); self.diff_weight[x] } pub fn merge(&mut self, mut x: usize, mut y: usize, mut w: i64) -> bool { w += self.weight(x); w -= self.weight(y); x = self.root(x); y = self.root(y); if x == y { return false; } if self.rank[x] < self.rank[y] { std::mem::swap(&mut x, &mut y); w = -w; } if self.rank[x] == self.rank[y] { self.rank[y] += 1; } self.par[y] = x; self.diff_weight[y] = w; true } }
21.563636
77
0.436341
dea82a987ea8a1eab4ec55ccae349c8203b6afd4
2,437
use crate::util::is_valid_ident; use swc_ecma_ast::*; use swc_ecma_visit::{noop_fold_type, Fold, FoldWith}; /// babel: `transform-property-literals` /// /// # Input /// ```js /// var foo = { /// // changed /// "bar": function () {}, /// "1": function () {}, /// /// // not changed /// "default": 1, /// [a]: 2, /// foo: 1 /// }; /// ``` /// /// # Output /// ```js /// var foo = { /// bar: function () {}, /// 1: function () {}, /// /// "default": 1, /// [a]: 2, /// foo: 1 /// }; /// ``` pub fn property_literals() -> impl Fold { PropertyLiteral } struct PropertyLiteral; impl Fold for PropertyLiteral { noop_fold_type!(); fn fold_prop_name(&mut self, n: PropName) -> PropName { let n = validate!(n.fold_children_with(self)); match n { PropName::Str(Str { value: sym, span, .. }) => { if sym.is_reserved_for_es3() || !is_valid_ident(&sym) { PropName::Str(Str { span, value: sym, has_escape: false, }) } else { PropName::Ident(Ident::new(sym, span)) } } PropName::Ident(i) => { let Ident { sym, span, .. } = i; if sym.is_reserved_for_es3() || sym.contains('-') || sym.contains('.') { PropName::Str(Str { span, value: sym, has_escape: false, }) } else { PropName::Ident(Ident { span, sym, ..i }) } } _ => n, } } } #[cfg(test)] mod tests { use super::*; test!( ::swc_ecma_parser::Syntax::default(), |_| PropertyLiteral, babel_basic, r#"var foo = { // changed "bar": function () {}, "1": function () {}, // not changed "default": 1, [a]: 2, foo: 1 };"#, r#"var foo = { bar: function () {}, '1': function () {}, "default": 1, [a]: 2, foo: 1 };"#, ok_if_code_eq ); test!( ::swc_ecma_parser::Syntax::default(), |_| PropertyLiteral, str_lit, r#"'use strict'; var x = { 'foo.bar': true };"#, r#"'use strict'; var x = { 'foo.bar': true };"#, ok_if_code_eq ); }
20.478992
88
0.415265
f84572056cd3e74e5a6d485f77ea001b299256b7
156,250
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, models::*, API_VERSION}; pub mod operations { use super::{models, models::*, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationEntityListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.StorageSync/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationEntityListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod storage_sync_services { use super::{models, models::*, API_VERSION}; pub async fn check_name_availability( operation_config: &crate::OperationConfig, location_name: &str, subscription_id: &str, parameters: &CheckNameAvailabilityParameters, ) -> std::result::Result<CheckNameAvailabilityResult, check_name_availability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.StorageSync/locations/{}/checkNameAvailability", operation_config.base_path(), subscription_id, location_name ); let mut url = url::Url::parse(url_str).map_err(check_name_availability::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(check_name_availability::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(check_name_availability::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(check_name_availability::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(check_name_availability::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: CheckNameAvailabilityResult = serde_json::from_slice(rsp_body) .map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(check_name_availability::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod check_name_availability { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, ) -> std::result::Result<StorageSyncService, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageSyncService = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, parameters: &StorageSyncServiceCreateParameters, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageSyncService = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(StorageSyncService), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, parameters: Option<&StorageSyncServiceUpdateParameters>, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageSyncService = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(StorageSyncService), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<StorageSyncServiceArray, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageSyncServiceArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<StorageSyncServiceArray, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.StorageSync/storageSyncServices", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_subscription::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_subscription::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageSyncServiceArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_subscription::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_subscription { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_link_resources { use super::{models, models::*, API_VERSION}; pub async fn list_by_storage_sync_service( operation_config: &crate::OperationConfig, resource_group_name: &str, storage_sync_service_name: &str, subscription_id: &str, ) -> std::result::Result<PrivateLinkResourceListResult, list_by_storage_sync_service::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/privateLinkResources", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(list_by_storage_sync_service::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_storage_sync_service::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_storage_sync_service::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_storage_sync_service::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateLinkResourceListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_storage_sync_service::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_storage_sync_service { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod private_endpoint_connections { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, storage_sync_service_name: &str, subscription_id: &str, private_endpoint_connection_name: &str, ) -> std::result::Result<PrivateEndpointConnection, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnection = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, resource_group_name: &str, storage_sync_service_name: &str, subscription_id: &str, private_endpoint_connection_name: &str, properties: &PrivateEndpointConnection, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(properties).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnection = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(PrivateEndpointConnection), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, storage_sync_service_name: &str, subscription_id: &str, private_endpoint_connection_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, private_endpoint_connection_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_storage_sync_service( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, ) -> std::result::Result<PrivateEndpointConnectionListResult, list_by_storage_sync_service::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/privateEndpointConnections", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(list_by_storage_sync_service::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_storage_sync_service::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_storage_sync_service::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_storage_sync_service::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnectionListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_storage_sync_service::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_storage_sync_service { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod sync_groups { use super::{models, models::*, API_VERSION}; pub async fn list_by_storage_sync_service( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, ) -> std::result::Result<SyncGroupArray, list_by_storage_sync_service::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(list_by_storage_sync_service::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_storage_sync_service::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_storage_sync_service::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_storage_sync_service::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SyncGroupArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_storage_sync_service::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_storage_sync_service { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, ) -> std::result::Result<SyncGroup, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SyncGroup = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, parameters: &SyncGroupCreateParameters, ) -> std::result::Result<SyncGroup, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SyncGroup = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod cloud_endpoints { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, ) -> std::result::Result<CloudEndpoint, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, cloud_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: CloudEndpoint = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, parameters: &CloudEndpointCreateParameters, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, cloud_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: CloudEndpoint = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(CloudEndpoint), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, cloud_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_sync_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, ) -> std::result::Result<CloudEndpointArray, list_by_sync_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_sync_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_sync_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_sync_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_sync_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: CloudEndpointArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_sync_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_sync_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_sync_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_sync_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn pre_backup( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, parameters: &BackupRequest, ) -> std::result::Result<pre_backup::Response, pre_backup::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}/prebackup" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , cloud_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(pre_backup::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(pre_backup::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(pre_backup::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(pre_backup::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(pre_backup::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(pre_backup::Response::Ok200), http::StatusCode::ACCEPTED => Ok(pre_backup::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| pre_backup::Error::DeserializeError(source, rsp_body.clone()))?; Err(pre_backup::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod pre_backup { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn post_backup( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, parameters: &BackupRequest, ) -> std::result::Result<post_backup::Response, post_backup::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}/postbackup" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , cloud_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(post_backup::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(post_backup::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(post_backup::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(post_backup::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(post_backup::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PostBackupResponse = serde_json::from_slice(rsp_body).map_err(|source| post_backup::Error::DeserializeError(source, rsp_body.clone()))?; Ok(post_backup::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(post_backup::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| post_backup::Error::DeserializeError(source, rsp_body.clone()))?; Err(post_backup::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod post_backup { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(PostBackupResponse), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn pre_restore( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, parameters: &PreRestoreRequest, ) -> std::result::Result<pre_restore::Response, pre_restore::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}/prerestore" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , cloud_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(pre_restore::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(pre_restore::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(pre_restore::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(pre_restore::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(pre_restore::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(pre_restore::Response::Ok200), http::StatusCode::ACCEPTED => Ok(pre_restore::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| pre_restore::Error::DeserializeError(source, rsp_body.clone()))?; Err(pre_restore::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod pre_restore { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn restoreheartbeat( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, ) -> std::result::Result<(), restoreheartbeat::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}/restoreheartbeat" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , cloud_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(restoreheartbeat::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(restoreheartbeat::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(restoreheartbeat::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(restoreheartbeat::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| restoreheartbeat::Error::DeserializeError(source, rsp_body.clone()))?; Err(restoreheartbeat::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod restoreheartbeat { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn post_restore( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, parameters: &PostRestoreRequest, ) -> std::result::Result<post_restore::Response, post_restore::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}/postrestore" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , cloud_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(post_restore::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(post_restore::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(post_restore::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(post_restore::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(post_restore::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(post_restore::Response::Ok200), http::StatusCode::ACCEPTED => Ok(post_restore::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| post_restore::Error::DeserializeError(source, rsp_body.clone()))?; Err(post_restore::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod post_restore { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn trigger_change_detection( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, cloud_endpoint_name: &str, parameters: &TriggerChangeDetectionParameters, ) -> std::result::Result<trigger_change_detection::Response, trigger_change_detection::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/cloudEndpoints/{}/triggerChangeDetection" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , cloud_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(trigger_change_detection::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(trigger_change_detection::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(trigger_change_detection::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(trigger_change_detection::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(trigger_change_detection::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(trigger_change_detection::Response::Ok200), http::StatusCode::ACCEPTED => Ok(trigger_change_detection::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| trigger_change_detection::Error::DeserializeError(source, rsp_body.clone()))?; Err(trigger_change_detection::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod trigger_change_detection { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod server_endpoints { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, server_endpoint_name: &str, ) -> std::result::Result<ServerEndpoint, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/serverEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ServerEndpoint = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, server_endpoint_name: &str, parameters: &ServerEndpointCreateParameters, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/serverEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ServerEndpoint = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(ServerEndpoint), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, server_endpoint_name: &str, parameters: Option<&ServerEndpointUpdateParameters>, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/serverEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ServerEndpoint = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(update::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(ServerEndpoint), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, server_endpoint_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/serverEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name, server_endpoint_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_sync_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, ) -> std::result::Result<ServerEndpointArray, list_by_sync_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/serverEndpoints", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, sync_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_sync_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_sync_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_sync_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_sync_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ServerEndpointArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_sync_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_sync_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_sync_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_sync_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn recall_action( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, sync_group_name: &str, server_endpoint_name: &str, parameters: &RecallActionParameters, ) -> std::result::Result<recall_action::Response, recall_action::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/syncGroups/{}/serverEndpoints/{}/recallAction" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , sync_group_name , server_endpoint_name) ; let mut url = url::Url::parse(url_str).map_err(recall_action::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(recall_action::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(recall_action::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(recall_action::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(recall_action::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(recall_action::Response::Ok200), http::StatusCode::ACCEPTED => Ok(recall_action::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| recall_action::Error::DeserializeError(source, rsp_body.clone()))?; Err(recall_action::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod recall_action { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod registered_servers { use super::{models, models::*, API_VERSION}; pub async fn list_by_storage_sync_service( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, ) -> std::result::Result<RegisteredServerArray, list_by_storage_sync_service::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/registeredServers", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(list_by_storage_sync_service::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_storage_sync_service::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_storage_sync_service::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_storage_sync_service::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: RegisteredServerArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_storage_sync_service::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_storage_sync_service { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, server_id: &str, ) -> std::result::Result<RegisteredServer, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/registeredServers/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, server_id ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: RegisteredServer = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, server_id: &str, parameters: &RegisteredServerCreateParameters, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/registeredServers/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, server_id ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: RegisteredServer = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => Ok(create::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(RegisteredServer), Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, server_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/registeredServers/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, server_id ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn trigger_rollover( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, server_id: &str, parameters: &TriggerRolloverRequest, ) -> std::result::Result<trigger_rollover::Response, trigger_rollover::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/registeredServers/{}/triggerRollover" , operation_config . base_path () , subscription_id , resource_group_name , storage_sync_service_name , server_id) ; let mut url = url::Url::parse(url_str).map_err(trigger_rollover::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(trigger_rollover::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(trigger_rollover::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(trigger_rollover::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(trigger_rollover::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(trigger_rollover::Response::Ok200), http::StatusCode::ACCEPTED => Ok(trigger_rollover::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| trigger_rollover::Error::DeserializeError(source, rsp_body.clone()))?; Err(trigger_rollover::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod trigger_rollover { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod workflows { use super::{models, models::*, API_VERSION}; pub async fn list_by_storage_sync_service( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, ) -> std::result::Result<WorkflowArray, list_by_storage_sync_service::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/workflows", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name ); let mut url = url::Url::parse(url_str).map_err(list_by_storage_sync_service::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_storage_sync_service::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_storage_sync_service::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_storage_sync_service::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: WorkflowArray = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| list_by_storage_sync_service::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_storage_sync_service::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_storage_sync_service { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, workflow_id: &str, ) -> std::result::Result<Workflow, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/workflows/{}", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, workflow_id ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Workflow = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn abort( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, storage_sync_service_name: &str, workflow_id: &str, ) -> std::result::Result<(), abort::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/storageSyncServices/{}/workflows/{}/abort", operation_config.base_path(), subscription_id, resource_group_name, storage_sync_service_name, workflow_id ); let mut url = url::Url::parse(url_str).map_err(abort::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(abort::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(abort::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(abort::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| abort::Error::DeserializeError(source, rsp_body.clone()))?; Err(abort::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod abort { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod operation_status { use super::{models, models::*, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, location_name: &str, workflow_id: &str, operation_id: &str, ) -> std::result::Result<OperationStatus, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.StorageSync/locations/{}/workflows/{}/operations/{}", operation_config.base_path(), subscription_id, resource_group_name, location_name, workflow_id, operation_id ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationStatus = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub async fn location_operation_status( operation_config: &crate::OperationConfig, subscription_id: &str, location_name: &str, operation_id: &str, ) -> std::result::Result<LocationOperationStatus, location_operation_status::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.StorageSync/locations/{}/operations/{}", operation_config.base_path(), subscription_id, location_name, operation_id ); let mut url = url::Url::parse(url_str).map_err(location_operation_status::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(location_operation_status::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(location_operation_status::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(location_operation_status::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LocationOperationStatus = serde_json::from_slice(rsp_body) .map_err(|source| location_operation_status::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: StorageSyncError = serde_json::from_slice(rsp_body) .map_err(|source| location_operation_status::Error::DeserializeError(source, rsp_body.clone()))?; Err(location_operation_status::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod location_operation_status { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::StorageSyncError, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } }
48.570096
328
0.593466
dee6c847d82d7623e07c7a1c6de3693e15aac089
112
#![no_std] pub mod fb_color; pub mod fonts; pub mod framebuffer; pub use fb_color::*; pub use framebuffer::*;
12.444444
23
0.705357
f762859d08f467c021347423157e85979dd9865f
2,433
// verification-helper: PROBLEM https://yukicoder.me/problems/no/880 use lib::ds::segtree::beats::*; use lib::int::gcd::*; use lib::io::*; use lib::iter::Itertools; #[derive(Debug, Clone, Copy)] struct E { len: usize, sum: u64, max: u32, lcm: u32, } #[derive(Debug, Clone, Copy)] enum F { Asgn(u32), Gcd(u32), Unit, } use F::*; struct M; impl Monoid<E> for M { fn unit(&self) -> E { E { len: 0, sum: 0, max: 0, lcm: 1 } } fn op(&self, x: E, y: E) -> E { if x.len == 0 { y } else if y.len == 0 { x } else { E { len: x.len + y.len, sum: x.sum + y.sum, max: x.max.max(y.max), lcm: lcm(x.lcm, y.lcm), } } } } struct A; impl Monoid<F> for A { fn unit(&self) -> F { Unit } fn op(&self, x: F, y: F) -> F { match y { Asgn(_) => y, Gcd(y) => match x { Asgn(a) => Asgn(gcd(a, y)), Gcd(x) => Gcd(gcd(x, y)), _ => Gcd(y), }, _ => x, } } } fn lcm(x: u32, y: u32) -> u32 { let lcm = x as u64 * y as u64 / gcd(x, y) as u64; (1 << 30).min(lcm) as u32 } fn fill(a: u32, len: usize) -> E { E { len, sum: a as u64 * len as u64, max: a, lcm: a } } fn act(e: E, a: F) -> Option<E> { match a { Asgn(a) => Some(fill(a, e.len)), Gcd(a) => if e.len == 1 { Some(fill(gcd(e.max, a), 1)) } else if e.lcm != 1 << 30 && a % e.lcm == 0 { Some(e) } else { None }, _ => Some(e), } } fn main() { let mut io = IO::new(); let [n, q]: [usize; 2] = io.scan(); let a = io .scan_iter::<u32>(n) .map(|a| E { len: 1, sum: a as u64, max: a, lcm: a }) .collect_vec(); let mut st = SegmentTreeBeats::from_slice(&a, M, A, act); for _ in 0..q { let (c, Usize1(l), r) = io.scan(); match c { 1 => { st.act_over(l, r, Asgn(io.scan())); }, 2 => { st.act_over(l, r, Gcd(io.scan())); }, 3 => { io.println(st.ask(l, r).max); }, _ => { io.println(st.ask(l, r).sum); }, } } }
21.530973
68
0.37649
0aa6bda922fea3db847f1831055073fdf131f7cd
1,959
use std::env; use std::{fs, str}; use std::fs::File; use std::path::PathBuf; use std::process::{Command, Stdio}; use std::io::{Write, BufRead, BufReader}; pub fn create_tmp_folder() -> PathBuf { let mut tmp_dir = env::temp_dir(); let r: u64 = rand::random(); tmp_dir.push(format!("gaps_rs_{}", r)); fs::create_dir(&tmp_dir).unwrap(); tmp_dir } // Requires single_quartet_check to be in path pub fn qcheck(qtreefile: &str, fastafile: &str, nwkfile: &str) -> (u64, u64) { let output = Command::new("single_quartet_check") .arg(fastafile) .arg(qtreefile) .arg(nwkfile) .output() .expect("Failed to execute qcheck"); let lines = str::from_utf8(&output.stdout).unwrap() .split('\n').collect::<Vec<_>>(); lines[0..lines.len()-1].iter() .fold((0, 0), |acc, line| { if line == &"0" { return (acc.0 + 1, acc.1); } if line == &"1" { return (acc.0, acc.1 + 1); } panic!("Invalid qcheck output") }) } pub fn rfdist(infile: &str) -> u64 { // Create temporary folder let tmp_folder = create_tmp_folder(); // Copy input file to intree let mut intree_file = tmp_folder.clone(); intree_file.push("intree"); fs::copy(infile, intree_file).unwrap(); // Execute phylip treedist let mut child = Command::new("treedist") // .arg("treedist") .current_dir(&tmp_folder) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .spawn().unwrap(); // Write commands to stdin let child_stdin = child.stdin.as_mut().unwrap(); child_stdin.write_all(b"D\nY\n").unwrap(); child.wait_with_output().unwrap(); // Extract result let mut outfile = tmp_folder.clone(); outfile.push("outfile"); let lines = BufReader::new(File::open(outfile).expect("Unable to open file")).lines(); let last_line = lines.last().unwrap().unwrap(); let result = last_line.split(' ') .collect::<Vec<&str>>() .last() .unwrap() .parse::<u64>() .unwrap(); // Delete temporary folder fs::remove_dir_all(tmp_folder).unwrap(); result }
24.797468
87
0.647779
e2fedb2ab6cff1e882e41ced7f2db6ac134cf6dc
251
use grb::prelude::*; mod utils; fn main() -> grb::Result<()> { create_model!(_g, m, x, y, z); c!(x + y == 1 - z); c!(x + y >= 1 - z); c!(x + y <= 1 - z); c!(x - y in 0..1); c!(x in ..1); c!(y - x in ..); c!(x in -2.3..1); Ok(()) }
15.6875
32
0.378486
d59e57cae9452f580271bb806b4b3dc98f3d261f
1,590
use std::cmp; /// Sorts a mutable slice using in-place insertion sort algorithm. /// /// Time complexity is `O(n^2)`, where `n` is the number of elements. /// Space complexity is `O(1)` as it sorts elements in-place. pub fn insertion_sort<T>(arr: &mut [T]) where T: cmp::PartialOrd + Copy, { for i in 1..arr.len() { let cur = arr[i]; let mut j = i - 1; while arr[j] > cur { arr.swap(j + 1, j); if j == 0 { break; } j -= 1; } } } #[cfg(test)] mod tests { use super::super::is_sorted; use super::*; #[test] fn empty() { let mut arr: [u8; 0] = []; insertion_sort(&mut arr); assert!(is_sorted(&arr)); } #[test] fn one_element() { let mut arr: [char; 1] = ['a']; insertion_sort(&mut arr); assert!(is_sorted(&arr)); } #[test] fn already_sorted() { let mut arr: [&str; 3] = ["a", "b", "c"]; insertion_sort(&mut arr); assert!(is_sorted(&arr)); } #[test] fn basic() { let mut arr: [&str; 4] = ["d", "a", "c", "b"]; insertion_sort(&mut arr); assert!(is_sorted(&arr)); } #[test] fn odd_number_of_elements() { let mut arr: Vec<&str> = vec!["d", "a", "c", "e", "b"]; insertion_sort(&mut arr); assert!(is_sorted(&arr)); } #[test] fn repeated_elements() { let mut arr: Vec<usize> = vec![542, 542, 542, 542]; insertion_sort(&mut arr); assert!(is_sorted(&arr)); } }
22.083333
69
0.479874
5d521913b48f3b1d30ce2689d90a784ee949ef87
4,997
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. extern crate build_helper; extern crate gcc; use std::env; use std::path::PathBuf; use std::process::Command; use build_helper::run; fn main() { println!("cargo:rustc-cfg=cargobuild"); let target = env::var("TARGET").unwrap(); let host = env::var("HOST").unwrap(); let build_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap()); let src_dir = env::current_dir().unwrap(); if let Some(jemalloc) = env::var_os("JEMALLOC_OVERRIDE") { let jemalloc = PathBuf::from(jemalloc); println!("cargo:rustc-link-search=native={}", jemalloc.parent().unwrap().display()); let stem = jemalloc.file_stem().unwrap().to_str().unwrap(); let name = jemalloc.file_name().unwrap().to_str().unwrap(); let kind = if name.ends_with(".a") {"static"} else {"dylib"}; println!("cargo:rustc-link-lib={}={}", kind, &stem[3..]); return } let compiler = gcc::Config::new().get_compiler(); let ar = build_helper::cc2ar(compiler.path(), &target); let cflags = compiler.args().iter().map(|s| s.to_str().unwrap()) .collect::<Vec<_>>().join(" "); let mut cmd = Command::new("sh"); cmd.arg(src_dir.join("../jemalloc/configure").to_str().unwrap() .replace("C:\\", "/c/") .replace("\\", "/")) .current_dir(&build_dir) .env("CC", compiler.path()) .env("EXTRA_CFLAGS", cflags) .env("AR", &ar) .env("RANLIB", format!("{} s", ar.display())); if target.contains("windows") { // A bit of history here, this used to be --enable-lazy-lock added in // #14006 which was filed with jemalloc in jemalloc/jemalloc#83 which // was also reported to MinGW: // // http://sourceforge.net/p/mingw-w64/bugs/395/ // // When updating jemalloc to 4.0, however, it was found that binaries // would exit with the status code STATUS_RESOURCE_NOT_OWNED indicating // that a thread was unlocking a mutex it never locked. Disabling this // "lazy lock" option seems to fix the issue, but it was enabled by // default for MinGW targets in 13473c7 for jemalloc. // // As a result of all that, force disabling lazy lock on Windows, and // after reading some code it at least *appears* that the initialization // of mutexes is otherwise ok in jemalloc, so shouldn't cause problems // hopefully... // // tl;dr: make windows behave like other platforms by disabling lazy // locking, but requires passing an option due to a historical // default with jemalloc. cmd.arg("--disable-lazy-lock"); } else if target.contains("ios") { cmd.arg("--disable-tls"); } else if target.contains("android") { // We force android to have prefixed symbols because apparently // replacement of the libc allocator doesn't quite work. When this was // tested (unprefixed symbols), it was found that the `realpath` // function in libc would allocate with libc malloc (not jemalloc // malloc), and then the standard library would free with jemalloc free, // causing a segfault. // // If the test suite passes, however, without symbol prefixes then we // should be good to go! cmd.arg("--with-jemalloc-prefix=je_"); cmd.arg("--disable-tls"); } else if target.contains("dragonfly") { cmd.arg("--with-jemalloc-prefix=je_"); } if cfg!(feature = "debug-jemalloc") { cmd.arg("--enable-debug"); } // Turn off broken quarantine (see jemalloc/jemalloc#161) cmd.arg("--disable-fill"); cmd.arg(format!("--host={}", build_helper::gnu_target(&target))); cmd.arg(format!("--build={}", build_helper::gnu_target(&host))); run(&mut cmd); run(Command::new("make") .current_dir(&build_dir) .arg("build_lib_static") .arg("-j").arg(env::var("NUM_JOBS").unwrap())); if target.contains("windows") { println!("cargo:rustc-link-lib=static=jemalloc"); } else { println!("cargo:rustc-link-lib=static=jemalloc_pic"); } println!("cargo:rustc-link-search=native={}/lib", build_dir.display()); if target.contains("android") { println!("cargo:rustc-link-lib=gcc"); } else if !target.contains("windows") && !target.contains("musl") { println!("cargo:rustc-link-lib=pthread"); } }
41.641667
80
0.610566
5699a7e147cdf4646158fc844d93de56d40c448b
2,461
use super::bst::*; use super::fmt_display::*; #[test] fn insert() { let mut bst = Bst::<i32>::new(); bst.insert(55); bst.insert(5); bst.insert(27); bst.insert(100); bst.insert(23); assert_eq!(format!("{}", bst), "5 23 27 55 100 "); } #[test] fn delete() { let mut bst = Bst::<i32>::new(); bst.insert(15); bst.insert(22); bst.insert(7); bst.insert(5); bst.insert(11); bst.insert(9); bst.insert(13); bst.insert(8); bst.insert(10); bst.insert(12); bst.insert(14); assert_eq!(format!("{}", bst), "5 7 8 9 10 11 12 13 14 15 22 "); bst.delete(11); assert_eq!(format!("{}", bst), "5 7 8 9 10 12 13 14 15 22 "); bst.delete(22); assert_eq!(format!("{}", bst), "5 7 8 9 10 12 13 14 15 "); bst.delete(10); assert_eq!(format!("{}", bst), "5 7 8 9 12 13 14 15 "); bst.delete(9); assert_eq!(format!("{}", bst), "5 7 8 12 13 14 15 "); bst.delete(15); assert_eq!(format!("{}", bst), "5 7 8 12 13 14 "); } #[test] fn search() { let mut bst = Bst::<i32>::new(); assert_eq!(bst.search(1), false); bst.insert(15); bst.insert(22); bst.insert(7); bst.insert(5); bst.insert(11); bst.insert(9); bst.insert(13); bst.insert(8); bst.insert(10); bst.insert(12); bst.insert(14); assert_eq!(format!("{}", bst), "5 7 8 9 10 11 12 13 14 15 22 "); assert_eq!(bst.search(1), false); assert_eq!(bst.search(5), true); assert_eq!(bst.search(7), true); assert_eq!(bst.search(8), true); assert_eq!(bst.search(9), true); assert_eq!(bst.search(10), true); assert_eq!(bst.search(11), true); assert_eq!(bst.search(12), true); assert_eq!(bst.search(13), true); assert_eq!(bst.search(14), true); assert_eq!(bst.search(15), true); assert_eq!(bst.search(22), true); bst.delete(11); assert_eq!(format!("{}", bst), "5 7 8 9 10 12 13 14 15 22 "); assert_eq!(bst.search(11), false); bst.delete(22); assert_eq!(format!("{}", bst), "5 7 8 9 10 12 13 14 15 "); assert_eq!(bst.search(22), false); bst.delete(10); assert_eq!(format!("{}", bst), "5 7 8 9 12 13 14 15 "); assert_eq!(bst.search(10), false); bst.delete(9); assert_eq!(format!("{}", bst), "5 7 8 12 13 14 15 "); assert_eq!(bst.search(9), false); bst.delete(15); assert_eq!(format!("{}", bst), "5 7 8 12 13 14 "); assert_eq!(bst.search(15), false); }
23.663462
68
0.549777
716566ef33ae17254eaeb4041687e5c069c89b4d
792
extern crate agg; use agg::Render; #[test] fn t12_clip_box() { let (w,h) = (100,100); let pixf = agg::Pixfmt::<agg::Rgb8>::new(w,h); let mut ren_base = agg::RenderingBase::new(pixf); ren_base.clear( agg::Rgba8::new(255, 255, 255, 255) ); let mut ren = agg::RenderingScanlineAASolid::with_base(&mut ren_base); ren.color( agg::Rgba8::new(255,0,0,255) ); let mut ras = agg::RasterizerScanline::new(); ras.clip_box(40.0, 0.0, w as f64-40.0, h as f64); ras.move_to(10.0, 10.0); ras.line_to(50.0, 90.0); ras.line_to(90.0, 10.0); agg::render_scanlines(&mut ras, &mut ren); ren.to_file("tests/tmp/agg_test_12.png").unwrap(); assert_eq!(agg::ppm::img_diff("tests/tmp/agg_test_12.png", "images/agg_test_12.png").unwrap(), true); }
22.628571
105
0.625
28fd4d5d342fd89a2fbce475781e80021145db9c
185
Izrazi dužinu u naznačenoj jedinici. @repeat(5)@ @center@ @answ@ @hspacept(3)@ = @hspacept(3)@ @lib.check_number(value,40)@ @measure[indr]@ @/repeat@
18.5
90
0.567568
fc6a83e5a140f95bc3ac60b9d972806ae6a0950f
2,091
#![no_std] #![feature(proc_macro_hygiene)] extern crate ontio_std as ostd; use ostd::abi::{Sink, Source}; use ostd::contract::ong; use ostd::contract::ont; use ostd::runtime; use ostd::runtime::panic; #[no_mangle] pub fn invoke() { let input = runtime::input(); let mut source = Source::new(&input); let action: &[u8] = source.read().unwrap(); let mut sink = Sink::new(12); match action { b"ontTransferV2" => { let (from, to, amount) = source.read().unwrap(); sink.write(ont::v2::transfer(from, to, amount)); } b"ontBalanceOfV2" => { let from = source.read().unwrap(); sink.write(ont::v2::balance_of(from)); } b"ontApproveV2" => { let (from, to, amount) = source.read().unwrap(); sink.write(ont::v2::approve(from, to, amount)); } b"ontAllowanceV2" => { let (from, to) = source.read().unwrap(); sink.write(ont::v2::allowance(from, to)); } b"ontTransferFromV2" => { let (spender, from, to, amount) = source.read().unwrap(); sink.write(ont::v2::transfer_from(spender, from, to, amount)); } b"ongTransferV2" => { let (from, to, amount) = source.read().unwrap(); sink.write(ong::v2::transfer(from, to, amount)); } b"ongBalanceOfV2" => { let from = source.read().unwrap(); sink.write(ong::v2::balance_of(from)); } b"ongApproveV2" => { let (from, to, amount) = source.read().unwrap(); sink.write(ong::v2::approve(from, to, amount)); } b"ongAllowanceV2" => { let (from, to) = source.read().unwrap(); sink.write(ong::v2::allowance(from, to)); } b"ongTransferFromV2" => { let (spender, from, to, amount) = source.read().unwrap(); sink.write(ong::v2::transfer_from(spender, from, to, amount)); } _ => panic("unsupported action!"), } runtime::ret(sink.bytes()) }
32.671875
74
0.524629
ac38da7d8bebe45269b41d637e71e2bcdf67f1f8
383
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT #[kani::proof] fn main() { let bitpattern = unsafe { std::mem::transmute::<f32, u32>(1.0) }; assert!(bitpattern == 0x3F800000); let f = unsafe { let i: u32 = 0x3F800000; std::mem::transmute::<u32, f32>(i) }; assert!(f == 1.0); }
25.533333
69
0.5953
0ea6c17d45c88a2800ad9d8289ba61af52229695
1,466
use crate::{utils::println, User}; use std::{env, net}; pub enum Command { Connect(String), Open, } pub enum InputError { InvalidCmd(String), NoCommand, } impl InputError { pub fn to_string(&self) -> String { match self { InputError::InvalidCmd(msg) => msg.to_string(), InputError::NoCommand => "No command given".to_string(), } } } pub fn parse_command(input: &mut env::Args) -> Result<Command, InputError> { input.next(); match input.next().as_deref() { Some("open") => Ok(Command::Open), Some("connect") => { let host = input.next().ok_or(InputError::InvalidCmd( "connect requires IP address".to_string(), ))?; Ok(Command::Connect(host)) } Some(cmd) => Err(InputError::InvalidCmd(format!("invalid command: {}", cmd))), None => Err(InputError::NoCommand), } } pub fn connect(addr: &str) { let stream = net::TcpStream::connect(addr).expect("Failed to connect to server"); let user = User::new_get_name(stream); user.start_session(); } pub fn open() { let url = format!("{}:{}", "0.0.0.0", 8080); let socket = net::TcpListener::bind(url).unwrap(); println("Waiting for connection..."); let (stream, addr) = socket.accept().unwrap(); println(format!("Connected to {}", addr).as_str()); let user = User::new_get_name(stream); user.start_session() }
25.275862
86
0.582538
3345cdce71dcef4406a394fae17024380b873dfc
228
# this is a single line comment ## block comments this should be commented out ## # The at sign should is aliased to this @myProperly = false # It also works correctly when a period is appended @.myProperty = false
15.2
51
0.710526
fffa1447f80a10ee12544e8dfd6e7ab421d49f58
2,325
use std::io::Cursor; use std::net::ToSocketAddrs; use byteorder::{LittleEndian, ReadBytesExt}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use crate::errors::{Error, Result}; use crate::{A2SClient, ReadCString}; const PLAYER_REQUEST: [u8; 5] = [0xff, 0xff, 0xff, 0xff, 0x55]; #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Players { pub count: u8, pub players: Vec<Player>, } #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct Player { // Index of player chunk starting from 0. // This seems to be always 0? pub index: u8, // Name of the player. pub name: String, // Player's score (usually "frags" or "kills".) pub score: u32, // Time (in seconds) player has been connected to the server. pub duration: f32, // The Ship additional player info pub the_ship: Option<TheShipPlayer>, } #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] pub struct TheShipPlayer { pub deaths: u32, pub money: u32, } impl A2SClient { pub fn players<A: ToSocketAddrs>(&self, addr: A) -> Result<Players> { let data = self.do_challenge_request(addr, &PLAYER_REQUEST)?; let mut data = Cursor::new(data); if data.read_u8()? != 0x44 { return Err(Error::InvalidResponse); } let player_count = data.read_u8()?; let mut players: Vec<Player> = Vec::with_capacity(player_count as usize); for _ in 0..player_count { players.push(Player { index: data.read_u8()?, name: data.read_cstring()?, score: data.read_u32::<LittleEndian>()?, duration: data.read_f32::<LittleEndian>()?, the_ship: { if self.app_id == 2400 { Some(TheShipPlayer { deaths: data.read_u32::<LittleEndian>()?, money: data.read_u32::<LittleEndian>()?, }) } else { None } }, }) } Ok(Players { count: player_count, players, }) } }
26.123596
81
0.552688
9100e58be17b9ca1776dbc508e9ab8bc436ddc24
4,868
use crate::{func::FuncRef, module::check_limits, Error}; use alloc::{rc::Rc, vec::Vec}; use core::{cell::RefCell, fmt, u32}; use parity_wasm::elements::ResizableLimits; /// Reference to a table (See [`TableInstance`] for details). /// /// This reference has a reference-counting semantics. /// /// [`TableInstance`]: struct.TableInstance.html /// #[derive(Clone, Debug)] pub struct TableRef(Rc<TableInstance>); impl ::core::ops::Deref for TableRef { type Target = TableInstance; fn deref(&self) -> &TableInstance { &self.0 } } /// Runtime representation of a table. /// /// A table is a array of untyped functions. It allows wasm code to call functions /// indirectly through a dynamic index into a table. For example, this allows emulating function /// pointers by way of table indices. /// /// Table is created with an initial size but can be grown dynamically via [`grow`] method. /// Growth can be limited by an optional maximum size. /// /// In future, a table might be extended to be able to hold not only functions but different types. /// /// [`grow`]: #method.grow /// pub struct TableInstance { /// Table limits. limits: ResizableLimits, /// Table memory buffer. buffer: RefCell<Vec<Option<FuncRef>>>, } impl fmt::Debug for TableInstance { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("TableInstance") .field("limits", &self.limits) .field("buffer.len", &self.buffer.borrow().len()) .finish() } } impl TableInstance { /// Allocate a table instance. /// /// The table allocated with initial size, specified by `initial_size`. /// Maximum size can be specified by `maximum_size`. /// /// All table elements are allocated uninitialized. /// /// # Errors /// /// Returns `Err` if `initial_size` is greater than `maximum_size`. pub fn alloc(initial_size: u32, maximum_size: Option<u32>) -> Result<TableRef, Error> { let table = TableInstance::new(ResizableLimits::new(initial_size, maximum_size))?; Ok(TableRef(Rc::new(table))) } fn new(limits: ResizableLimits) -> Result<TableInstance, Error> { check_limits(&limits)?; Ok(TableInstance { buffer: RefCell::new(vec![None; limits.initial() as usize]), limits, }) } /// Return table limits. pub(crate) fn limits(&self) -> &ResizableLimits { &self.limits } /// Returns size this table was created with. pub fn initial_size(&self) -> u32 { self.limits.initial() } /// Returns maximum size `TableInstance` can grow to. pub fn maximum_size(&self) -> Option<u32> { self.limits.maximum() } /// Returns current size of the table. pub fn current_size(&self) -> u32 { self.buffer.borrow().len() as u32 } /// Increases the size of the table by given number of elements. /// /// # Errors /// /// Returns `Err` if tried to allocate more elements than permited by limit. pub fn grow(&self, by: u32) -> Result<(), Error> { let mut buffer = self.buffer.borrow_mut(); let maximum_size = self.maximum_size().unwrap_or(u32::MAX); let new_size = self .current_size() .checked_add(by) .and_then(|new_size| { if maximum_size < new_size { None } else { Some(new_size) } }) .ok_or_else(|| { Error::Table(format!( "Trying to grow table by {} items when there are already {} items", by, self.current_size(), )) })?; buffer.resize(new_size as usize, None); Ok(()) } /// Get the specific value in the table pub fn get(&self, offset: u32) -> Result<Option<FuncRef>, Error> { let buffer = self.buffer.borrow(); let buffer_len = buffer.len(); let table_elem = buffer.get(offset as usize).cloned().ok_or_else(|| { Error::Table(format!( "trying to read table item with index {} when there are only {} items", offset, buffer_len )) })?; Ok(table_elem) } /// Set the table element to the specified function. pub fn set(&self, offset: u32, value: Option<FuncRef>) -> Result<(), Error> { let mut buffer = self.buffer.borrow_mut(); let buffer_len = buffer.len(); let table_elem = buffer.get_mut(offset as usize).ok_or_else(|| { Error::Table(format!( "trying to update table item with index {} when there are only {} items", offset, buffer_len )) })?; *table_elem = value; Ok(()) } }
32.238411
99
0.581348
ef33eb5944c7255c57918cca0128851a7aa1e17a
90,478
//! Implementation of the various distribution aspects of the compiler. //! //! This module is responsible for creating tarballs of the standard library, //! compiler, and documentation. This ends up being what we distribute to //! everyone as well. //! //! No tarball is actually created literally in this file, but rather we shell //! out to `rust-installer` still. This may one day be replaced with bits and //! pieces of `rustup.rs`! use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; use std::process::{Command, Stdio}; use build_helper::{output, t}; use crate::builder::{Builder, RunConfig, ShouldRun, Step}; use crate::cache::{Interned, INTERNER}; use crate::channel; use crate::compile; use crate::tool::{self, Tool}; use crate::util::{exe, is_dylib, timeit}; use crate::{Compiler, Mode, LLVM_TOOLS}; use time::{self, Timespec}; pub fn pkgname(builder: &Builder<'_>, component: &str) -> String { if component == "cargo" { format!("{}-{}", component, builder.cargo_package_vers()) } else if component == "rls" { format!("{}-{}", component, builder.rls_package_vers()) } else if component == "clippy" { format!("{}-{}", component, builder.clippy_package_vers()) } else if component == "miri" { format!("{}-{}", component, builder.miri_package_vers()) } else if component == "rustfmt" { format!("{}-{}", component, builder.rustfmt_package_vers()) } else if component == "llvm-tools" { format!("{}-{}", component, builder.llvm_tools_package_vers()) } else if component == "lldb" { format!("{}-{}", component, builder.lldb_package_vers()) } else { assert!(component.starts_with("rust")); format!("{}-{}", component, builder.rust_package_vers()) } } fn distdir(builder: &Builder<'_>) -> PathBuf { builder.out.join("dist") } pub fn tmpdir(builder: &Builder<'_>) -> PathBuf { builder.out.join("tmp/dist") } fn rust_installer(builder: &Builder<'_>) -> Command { builder.tool_cmd(Tool::RustInstaller) } fn missing_tool(tool_name: &str, skip: bool) { if skip { println!("Unable to build {}, skipping dist", tool_name) } else { panic!("Unable to build {}", tool_name) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Docs { pub host: Interned<String>, } impl Step for Docs { type Output = PathBuf; const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/doc") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Docs { host: run.target }); } /// Builds the `rust-docs` installer component. fn run(self, builder: &Builder<'_>) -> PathBuf { let host = self.host; let name = pkgname(builder, "rust-docs"); if !builder.config.docs { return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); } builder.default_doc(None); builder.info(&format!("Dist docs ({})", host)); let _time = timeit(builder); let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); let dst = image.join("share/doc/rust/html"); t!(fs::create_dir_all(&dst)); let src = builder.doc_out(host); builder.cp_r(&src, &dst); builder.install(&builder.src.join("src/doc/robots.txt"), &dst, 0o644); let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust-Documentation") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-documentation-is-installed.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rust-docs") .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--bulk-dirs=share/doc/rust/html"); builder.run(&mut cmd); builder.remove_dir(&image); distdir(builder).join(format!("{}-{}.tar.gz", name, host)) } } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct RustcDocs { pub host: Interned<String>, } impl Step for RustcDocs { type Output = PathBuf; const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/librustc") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(RustcDocs { host: run.target }); } /// Builds the `rustc-docs` installer component. fn run(self, builder: &Builder<'_>) -> PathBuf { let host = self.host; let name = pkgname(builder, "rustc-docs"); if !builder.config.compiler_docs { return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); } builder.default_doc(None); let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); let dst = image.join("share/doc/rust/html"); t!(fs::create_dir_all(&dst)); let src = builder.compiler_doc_out(host); builder.cp_r(&src, &dst); let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rustc-Documentation") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rustc-documentation-is-installed.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rustc-docs") .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--bulk-dirs=share/doc/rust/html"); builder.info(&format!("Dist compiler docs ({})", host)); let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); distdir(builder).join(format!("{}-{}.tar.gz", name, host)) } } fn find_files(files: &[&str], path: &[PathBuf]) -> Vec<PathBuf> { let mut found = Vec::with_capacity(files.len()); for file in files { let file_path = path.iter().map(|dir| dir.join(file)).find(|p| p.exists()); if let Some(file_path) = file_path { found.push(file_path); } else { panic!("Could not find '{}' in {:?}", file, path); } } found } fn make_win_dist( rust_root: &Path, plat_root: &Path, target_triple: Interned<String>, builder: &Builder<'_>, ) { //Ask gcc where it keeps its stuff let mut cmd = Command::new(builder.cc(target_triple)); cmd.arg("-print-search-dirs"); let gcc_out = output(&mut cmd); let mut bin_path: Vec<_> = env::split_paths(&env::var_os("PATH").unwrap_or_default()).collect(); let mut lib_path = Vec::new(); for line in gcc_out.lines() { let idx = line.find(':').unwrap(); let key = &line[..idx]; let trim_chars: &[_] = &[' ', '=']; let value = line[(idx + 1)..].trim_start_matches(trim_chars).split(';').map(PathBuf::from); if key == "programs" { bin_path.extend(value); } else if key == "libraries" { lib_path.extend(value); } } let compiler = if target_triple == "i686-pc-windows-gnu" { "i686-w64-mingw32-gcc.exe" } else if target_triple == "x86_64-pc-windows-gnu" { "x86_64-w64-mingw32-gcc.exe" } else { "gcc.exe" }; let target_tools = [compiler, "ld.exe", "dlltool.exe", "libwinpthread-1.dll"]; let mut rustc_dlls = vec!["libwinpthread-1.dll"]; if target_triple.starts_with("i686-") { rustc_dlls.push("libgcc_s_dw2-1.dll"); } else { rustc_dlls.push("libgcc_s_seh-1.dll"); } let target_libs = [ //MinGW libs "libgcc.a", "libgcc_eh.a", "libgcc_s.a", "libm.a", "libmingw32.a", "libmingwex.a", "libstdc++.a", "libiconv.a", "libmoldname.a", "libpthread.a", //Windows import libs "libadvapi32.a", "libbcrypt.a", "libcomctl32.a", "libcomdlg32.a", "libcredui.a", "libcrypt32.a", "libdbghelp.a", "libgdi32.a", "libimagehlp.a", "libiphlpapi.a", "libkernel32.a", "libmsimg32.a", "libmsvcrt.a", "libodbc32.a", "libole32.a", "liboleaut32.a", "libopengl32.a", "libpsapi.a", "librpcrt4.a", "libsecur32.a", "libsetupapi.a", "libshell32.a", "libsynchronization.a", "libuser32.a", "libuserenv.a", "libuuid.a", "libwinhttp.a", "libwinmm.a", "libwinspool.a", "libws2_32.a", "libwsock32.a", ]; //Find mingw artifacts we want to bundle let target_tools = find_files(&target_tools, &bin_path); let rustc_dlls = find_files(&rustc_dlls, &bin_path); let target_libs = find_files(&target_libs, &lib_path); // Copy runtime dlls next to rustc.exe let dist_bin_dir = rust_root.join("bin/"); fs::create_dir_all(&dist_bin_dir).expect("creating dist_bin_dir failed"); for src in rustc_dlls { builder.copy_to_folder(&src, &dist_bin_dir); } //Copy platform tools to platform-specific bin directory let target_bin_dir = plat_root.join("lib").join("rustlib").join(target_triple).join("bin"); fs::create_dir_all(&target_bin_dir).expect("creating target_bin_dir failed"); for src in target_tools { builder.copy_to_folder(&src, &target_bin_dir); } // Warn windows-gnu users that the bundled GCC cannot compile C files builder.create( &target_bin_dir.join("GCC-WARNING.txt"), "gcc.exe contained in this folder cannot be used for compiling C files - it is only\ used as a linker. In order to be able to compile projects containing C code use\ the GCC provided by MinGW or Cygwin.", ); //Copy platform libs to platform-specific lib directory let target_lib_dir = plat_root.join("lib").join("rustlib").join(target_triple).join("lib"); fs::create_dir_all(&target_lib_dir).expect("creating target_lib_dir failed"); for src in target_libs { builder.copy_to_folder(&src, &target_lib_dir); } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Mingw { pub host: Interned<String>, } impl Step for Mingw { type Output = Option<PathBuf>; const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Mingw { host: run.target }); } /// Builds the `rust-mingw` installer component. /// /// This contains all the bits and pieces to run the MinGW Windows targets /// without any extra installed software (e.g., we bundle gcc, libraries, etc). fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let host = self.host; if !host.contains("pc-windows-gnu") { return None; } builder.info(&format!("Dist mingw ({})", host)); let _time = timeit(builder); let name = pkgname(builder, "rust-mingw"); let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); t!(fs::create_dir_all(&image)); // The first argument is a "temporary directory" which is just // thrown away (this contains the runtime DLLs included in the rustc package // above) and the second argument is where to place all the MinGW components // (which is what we want). make_win_dist(&tmpdir(builder), &image, host, &builder); let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust-MinGW") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-MinGW-is-installed.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rust-mingw") .arg("--legacy-manifest-dirs=rustlib,cargo"); builder.run(&mut cmd); t!(fs::remove_dir_all(&image)); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, host))) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rustc { pub compiler: Compiler, } impl Step for Rustc { type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/librustc") } fn make_run(run: RunConfig<'_>) { run.builder .ensure(Rustc { compiler: run.builder.compiler(run.builder.top_stage, run.target) }); } /// Creates the `rustc` installer component. fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let host = self.compiler.host; let name = pkgname(builder, "rustc"); let image = tmpdir(builder).join(format!("{}-{}-image", name, host)); let _ = fs::remove_dir_all(&image); let overlay = tmpdir(builder).join(format!("{}-{}-overlay", name, host)); let _ = fs::remove_dir_all(&overlay); // Prepare the rustc "image", what will actually end up getting installed prepare_image(builder, compiler, &image); // Prepare the overlay which is part of the tarball but won't actually be // installed let cp = |file: &str| { builder.install(&builder.src.join(file), &overlay, 0o644); }; cp("COPYRIGHT"); cp("LICENSE-APACHE"); cp("LICENSE-MIT"); cp("README.md"); // tiny morsel of metadata is used by rust-packaging let version = builder.rust_version(); builder.create(&overlay.join("version"), &version); if let Some(sha) = builder.rust_sha() { builder.create(&overlay.join("git-commit-hash"), &sha); } // On MinGW we've got a few runtime DLL dependencies that we need to // include. The first argument to this script is where to put these DLLs // (the image we're creating), and the second argument is a junk directory // to ignore all other MinGW stuff the script creates. // // On 32-bit MinGW we're always including a DLL which needs some extra // licenses to distribute. On 64-bit MinGW we don't actually distribute // anything requiring us to distribute a license, but it's likely the // install will *also* include the rust-mingw package, which also needs // licenses, so to be safe we just include it here in all MinGW packages. if host.contains("pc-windows-gnu") { make_win_dist(&image, &tmpdir(builder), host, builder); let dst = image.join("share/doc"); t!(fs::create_dir_all(&dst)); builder.cp_r(&builder.src.join("src/etc/third-party"), &dst); } // Finally, wrap everything up in a nice tarball! let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-roll.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, host)) .arg("--component-name=rustc") .arg("--legacy-manifest-dirs=rustlib,cargo"); builder.info(&format!("Dist rustc stage{} ({})", compiler.stage, host)); let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); builder.remove_dir(&overlay); return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); fn prepare_image(builder: &Builder<'_>, compiler: Compiler, image: &Path) { let host = compiler.host; let src = builder.sysroot(compiler); // Copy rustc/rustdoc binaries t!(fs::create_dir_all(image.join("bin"))); builder.cp_r(&src.join("bin"), &image.join("bin")); builder.install(&builder.rustdoc(compiler), &image.join("bin"), 0o755); let libdir_relative = builder.libdir_relative(compiler); // Copy runtime DLLs needed by the compiler if libdir_relative.to_str() != Some("bin") { let libdir = builder.rustc_libdir(compiler); for entry in builder.read_dir(&libdir) { let name = entry.file_name(); if let Some(s) = name.to_str() { if is_dylib(s) { // Don't use custom libdir here because ^lib/ will be resolved again // with installer builder.install(&entry.path(), &image.join("lib"), 0o644); } } } } // Copy libLLVM.so to the lib dir as well, if needed. While not // technically needed by rustc itself it's needed by lots of other // components like the llvm tools and LLD. LLD is included below and // tools/LLDB come later, so let's just throw it in the rustc // component for now. maybe_install_llvm_dylib(builder, host, image); // Copy over lld if it's there if builder.config.lld_enabled { let exe = exe("rust-lld", &compiler.host); let src = builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin").join(&exe); // for the rationale about this rename check `compile::copy_lld_to_sysroot` let dst = image.join("lib/rustlib").join(&*host).join("bin").join(&exe); t!(fs::create_dir_all(&dst.parent().unwrap())); builder.copy(&src, &dst); } // Man pages t!(fs::create_dir_all(image.join("share/man/man1"))); let man_src = builder.src.join("src/doc/man"); let man_dst = image.join("share/man/man1"); // Reproducible builds: If SOURCE_DATE_EPOCH is set, use that as the time. let time = env::var("SOURCE_DATE_EPOCH") .map(|timestamp| { let epoch = timestamp .parse() .map_err(|err| format!("could not parse SOURCE_DATE_EPOCH: {}", err)) .unwrap(); time::at(Timespec::new(epoch, 0)) }) .unwrap_or_else(|_| time::now()); let month_year = t!(time::strftime("%B %Y", &time)); // don't use our `bootstrap::util::{copy, cp_r}`, because those try // to hardlink, and we don't want to edit the source templates for file_entry in builder.read_dir(&man_src) { let page_src = file_entry.path(); let page_dst = man_dst.join(file_entry.file_name()); t!(fs::copy(&page_src, &page_dst)); // template in month/year and version number builder.replace_in_file( &page_dst, &[ ("<INSERT DATE HERE>", &month_year), ("<INSERT VERSION HERE>", channel::CFG_RELEASE_NUM), ], ); } // Debugger scripts builder .ensure(DebuggerScripts { sysroot: INTERNER.intern_path(image.to_owned()), host }); // Misc license info let cp = |file: &str| { builder.install(&builder.src.join(file), &image.join("share/doc/rust"), 0o644); }; cp("COPYRIGHT"); cp("LICENSE-APACHE"); cp("LICENSE-MIT"); cp("README.md"); } } } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct DebuggerScripts { pub sysroot: Interned<PathBuf>, pub host: Interned<String>, } impl Step for DebuggerScripts { type Output = (); fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/lldb_batchmode.py") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(DebuggerScripts { sysroot: run.builder.sysroot(run.builder.compiler(run.builder.top_stage, run.host)), host: run.target, }); } /// Copies debugger scripts for `target` into the `sysroot` specified. fn run(self, builder: &Builder<'_>) { let host = self.host; let sysroot = self.sysroot; let dst = sysroot.join("lib/rustlib/etc"); t!(fs::create_dir_all(&dst)); let cp_debugger_script = |file: &str| { builder.install(&builder.src.join("src/etc/").join(file), &dst, 0o644); }; if host.contains("windows-msvc") { // windbg debugger scripts builder.install( &builder.src.join("src/etc/rust-windbg.cmd"), &sysroot.join("bin"), 0o755, ); cp_debugger_script("natvis/intrinsic.natvis"); cp_debugger_script("natvis/liballoc.natvis"); cp_debugger_script("natvis/libcore.natvis"); cp_debugger_script("natvis/libstd.natvis"); } else { cp_debugger_script("debugger_pretty_printers_common.py"); // gdb debugger scripts builder.install(&builder.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), 0o755); builder.install(&builder.src.join("src/etc/rust-gdbgui"), &sysroot.join("bin"), 0o755); cp_debugger_script("gdb_load_rust_pretty_printers.py"); cp_debugger_script("gdb_rust_pretty_printing.py"); // lldb debugger scripts builder.install(&builder.src.join("src/etc/rust-lldb"), &sysroot.join("bin"), 0o755); cp_debugger_script("lldb_rust_formatters.py"); } } } fn skip_host_target_lib(builder: &Builder<'_>, compiler: Compiler) -> bool { // The only true set of target libraries came from the build triple, so // let's reduce redundant work by only producing archives from that host. if compiler.host != builder.config.build { builder.info("\tskipping, not a build host"); true } else { false } } /// Copy stamped files into an image's `target/lib` directory. fn copy_target_libs(builder: &Builder<'_>, target: &str, image: &Path, stamp: &Path) { let dst = image.join("lib/rustlib").join(target).join("lib"); t!(fs::create_dir_all(&dst)); for (path, host) in builder.read_stamp_file(stamp) { if !host || builder.config.build == target { builder.copy(&path, &dst.join(path.file_name().unwrap())); } } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Std { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Std { type Output = PathBuf; const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/libstd") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Std { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let target = self.target; let name = pkgname(builder, "rust-std"); let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target)); if skip_host_target_lib(builder, compiler) { return archive; } builder.ensure(compile::Std { compiler, target }); let image = tmpdir(builder).join(format!("{}-{}-image", name, target)); let _ = fs::remove_dir_all(&image); let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target); let stamp = compile::libstd_stamp(builder, compiler_to_use, target); copy_target_libs(builder, &target, &image, &stamp); let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=std-is-standing-at-the-ready.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, target)) .arg(format!("--component-name=rust-std-{}", target)) .arg("--legacy-manifest-dirs=rustlib,cargo"); builder .info(&format!("Dist std stage{} ({} -> {})", compiler.stage, &compiler.host, target)); let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); archive } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct RustcDev { pub compiler: Compiler, pub target: Interned<String>, } impl Step for RustcDev { type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("rustc-dev") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(RustcDev { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let target = self.target; let name = pkgname(builder, "rustc-dev"); let archive = distdir(builder).join(format!("{}-{}.tar.gz", name, target)); if skip_host_target_lib(builder, compiler) { return archive; } builder.ensure(compile::Rustc { compiler, target }); let image = tmpdir(builder).join(format!("{}-{}-image", name, target)); let _ = fs::remove_dir_all(&image); let compiler_to_use = builder.compiler_for(compiler.stage, compiler.host, target); let stamp = compile::librustc_stamp(builder, compiler_to_use, target); copy_target_libs(builder, &target, &image, &stamp); let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-develop.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, target)) .arg(format!("--component-name=rustc-dev-{}", target)) .arg("--legacy-manifest-dirs=rustlib,cargo"); builder.info(&format!( "Dist rustc-dev stage{} ({} -> {})", compiler.stage, &compiler.host, target )); let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); archive } } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Analysis { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Analysis { type Output = PathBuf; const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("analysis").default_condition(builder.config.extended) } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Analysis { // Find the actual compiler (handling the full bootstrap option) which // produced the save-analysis data because that data isn't copied // through the sysroot uplifting. compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } /// Creates a tarball of save-analysis metadata, if available. fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let target = self.target; assert!(builder.config.extended); let name = pkgname(builder, "rust-analysis"); if compiler.host != builder.config.build { return distdir(builder).join(format!("{}-{}.tar.gz", name, target)); } builder.ensure(compile::Std { compiler, target }); let image = tmpdir(builder).join(format!("{}-{}-image", name, target)); let src = builder .stage_out(compiler, Mode::Std) .join(target) .join(builder.cargo_dir()) .join("deps"); let image_src = src.join("save-analysis"); let dst = image.join("lib/rustlib").join(target).join("analysis"); t!(fs::create_dir_all(&dst)); builder.info(&format!("image_src: {:?}, dst: {:?}", image_src, dst)); builder.cp_r(&image_src, &dst); let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=save-analysis-saved.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", name, target)) .arg(format!("--component-name=rust-analysis-{}", target)) .arg("--legacy-manifest-dirs=rustlib,cargo"); builder.info("Dist analysis"); let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); distdir(builder).join(format!("{}-{}.tar.gz", name, target)) } } fn copy_src_dirs(builder: &Builder<'_>, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) { fn filter_fn(exclude_dirs: &[&str], dir: &str, path: &Path) -> bool { let spath = match path.to_str() { Some(path) => path, None => return false, }; if spath.ends_with('~') || spath.ends_with(".pyc") { return false; } const LLVM_PROJECTS: &[&str] = &[ "llvm-project/clang", "llvm-project\\clang", "llvm-project/libunwind", "llvm-project\\libunwind", "llvm-project/lld", "llvm-project\\lld", "llvm-project/lldb", "llvm-project\\lldb", "llvm-project/llvm", "llvm-project\\llvm", "llvm-project/compiler-rt", "llvm-project\\compiler-rt", ]; if spath.contains("llvm-project") && !spath.ends_with("llvm-project") && !LLVM_PROJECTS.iter().any(|path| spath.contains(path)) { return false; } const LLVM_TEST: &[&str] = &["llvm-project/llvm/test", "llvm-project\\llvm\\test"]; if LLVM_TEST.iter().any(|path| spath.contains(path)) && (spath.ends_with(".ll") || spath.ends_with(".td") || spath.ends_with(".s")) { return false; } let full_path = Path::new(dir).join(path); if exclude_dirs.iter().any(|excl| full_path == Path::new(excl)) { return false; } let excludes = [ "CVS", "RCS", "SCCS", ".git", ".gitignore", ".gitmodules", ".gitattributes", ".cvsignore", ".svn", ".arch-ids", "{arch}", "=RELEASE-ID", "=meta-update", "=update", ".bzr", ".bzrignore", ".bzrtags", ".hg", ".hgignore", ".hgrags", "_darcs", ]; !path.iter().map(|s| s.to_str().unwrap()).any(|s| excludes.contains(&s)) } // Copy the directories using our filter for item in src_dirs { let dst = &dst_dir.join(item); t!(fs::create_dir_all(dst)); builder .cp_filtered(&builder.src.join(item), dst, &|path| filter_fn(exclude_dirs, item, path)); } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Src; impl Step for Src { /// The output path of the src installer tarball type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Src); } /// Creates the `rust-src` installer component fn run(self, builder: &Builder<'_>) -> PathBuf { let name = pkgname(builder, "rust-src"); let image = tmpdir(builder).join(format!("{}-image", name)); let _ = fs::remove_dir_all(&image); let dst = image.join("lib/rustlib/src"); let dst_src = dst.join("rust"); t!(fs::create_dir_all(&dst_src)); let src_files = ["Cargo.lock"]; // This is the reduced set of paths which will become the rust-src component // (essentially libstd and all of its path dependencies) let std_src_dirs = [ "src/build_helper", "src/liballoc", "src/libcore", "src/libpanic_abort", "src/libpanic_unwind", "src/libstd", "src/libunwind", "src/libtest", "src/libterm", "src/libprofiler_builtins", "src/stdarch", "src/libproc_macro", "src/tools/rustc-std-workspace-core", "src/tools/rustc-std-workspace-alloc", "src/tools/rustc-std-workspace-std", ]; copy_src_dirs(builder, &std_src_dirs[..], &[], &dst_src); for file in src_files.iter() { builder.copy(&builder.src.join(file), &dst_src.join(file)); } // Create source tarball in rust-installer format let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Awesome-Source.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}", name)) .arg("--component-name=rust-src") .arg("--legacy-manifest-dirs=rustlib,cargo"); builder.info("Dist src"); let _time = timeit(builder); builder.run(&mut cmd); builder.remove_dir(&image); distdir(builder).join(&format!("{}.tar.gz", name)) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct PlainSourceTarball; impl Step for PlainSourceTarball { /// Produces the location of the tarball generated type Output = PathBuf; const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src").default_condition(builder.config.rust_dist_src) } fn make_run(run: RunConfig<'_>) { run.builder.ensure(PlainSourceTarball); } /// Creates the plain source tarball fn run(self, builder: &Builder<'_>) -> PathBuf { // Make sure that the root folder of tarball has the correct name let plain_name = format!("{}-src", pkgname(builder, "rustc")); let plain_dst_src = tmpdir(builder).join(&plain_name); let _ = fs::remove_dir_all(&plain_dst_src); t!(fs::create_dir_all(&plain_dst_src)); // This is the set of root paths which will become part of the source package let src_files = [ "COPYRIGHT", "LICENSE-APACHE", "LICENSE-MIT", "CONTRIBUTING.md", "README.md", "RELEASES.md", "configure", "x.py", "config.toml.example", "Cargo.toml", "Cargo.lock", ]; let src_dirs = ["src"]; copy_src_dirs(builder, &src_dirs[..], &[], &plain_dst_src); // Copy the files normally for item in &src_files { builder.copy(&builder.src.join(item), &plain_dst_src.join(item)); } // Create the version file builder.create(&plain_dst_src.join("version"), &builder.rust_version()); if let Some(sha) = builder.rust_sha() { builder.create(&plain_dst_src.join("git-commit-hash"), &sha); } // If we're building from git sources, we need to vendor a complete distribution. if builder.rust_info.is_git() { // Vendor all Cargo dependencies let mut cmd = Command::new(&builder.initial_cargo); cmd.arg("vendor").current_dir(&plain_dst_src); builder.run(&mut cmd); } // Create plain source tarball let plain_name = format!("rustc-{}-src", builder.rust_package_vers()); let mut tarball = distdir(builder).join(&format!("{}.tar.gz", plain_name)); tarball.set_extension(""); // strip .gz tarball.set_extension(""); // strip .tar if let Some(dir) = tarball.parent() { builder.create_dir(&dir); } builder.info("running installer"); let mut cmd = rust_installer(builder); cmd.arg("tarball") .arg("--input") .arg(&plain_name) .arg("--output") .arg(&tarball) .arg("--work-dir=.") .current_dir(tmpdir(builder)); builder.info("Create plain source tarball"); let _time = timeit(builder); builder.run(&mut cmd); distdir(builder).join(&format!("{}.tar.gz", plain_name)) } } // We have to run a few shell scripts, which choke quite a bit on both `\` // characters and on `C:\` paths, so normalize both of them away. pub fn sanitize_sh(path: &Path) -> String { let path = path.to_str().unwrap().replace("\\", "/"); return change_drive(&path).unwrap_or(path); fn change_drive(s: &str) -> Option<String> { let mut ch = s.chars(); let drive = ch.next().unwrap_or('C'); if ch.next() != Some(':') { return None; } if ch.next() != Some('/') { return None; } Some(format!("/{}/{}", drive, &s[drive.len_utf8() + 2..])) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Cargo { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Cargo { type Output = PathBuf; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("cargo") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Cargo { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let target = self.target; let src = builder.src.join("src/tools/cargo"); let etc = src.join("src/etc"); let release_num = builder.release_num("cargo"); let name = pkgname(builder, "cargo"); let version = builder.cargo_info.version(builder, &release_num); let tmp = tmpdir(builder); let image = tmp.join("cargo-image"); drop(fs::remove_dir_all(&image)); builder.create_dir(&image); // Prepare the image directory builder.create_dir(&image.join("share/zsh/site-functions")); builder.create_dir(&image.join("etc/bash_completion.d")); let cargo = builder.ensure(tool::Cargo { compiler, target }); builder.install(&cargo, &image.join("bin"), 0o755); for man in t!(etc.join("man").read_dir()) { let man = t!(man); builder.install(&man.path(), &image.join("share/man/man1"), 0o644); } builder.install(&etc.join("_cargo"), &image.join("share/zsh/site-functions"), 0o644); builder.copy(&etc.join("cargo.bashcomp.sh"), &image.join("etc/bash_completion.d/cargo")); let doc = image.join("share/doc/cargo"); builder.install(&src.join("README.md"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); builder.install(&src.join("LICENSE-THIRD-PARTY"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("cargo-overlay"); drop(fs::remove_dir_all(&overlay)); builder.create_dir(&overlay); builder.install(&src.join("README.md"), &overlay, 0o644); builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644); builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644); builder.install(&src.join("LICENSE-THIRD-PARTY"), &overlay, 0o644); builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-roll.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--component-name=cargo") .arg("--legacy-manifest-dirs=rustlib,cargo"); builder.info(&format!("Dist cargo stage{} ({})", compiler.stage, target)); let _time = timeit(builder); builder.run(&mut cmd); distdir(builder).join(format!("{}-{}.tar.gz", name, target)) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rls { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Rls { type Output = Option<PathBuf>; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("rls") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rls { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let compiler = self.compiler; let target = self.target; assert!(builder.config.extended); let src = builder.src.join("src/tools/rls"); let release_num = builder.release_num("rls"); let name = pkgname(builder, "rls"); let version = builder.rls_info.version(builder, &release_num); let tmp = tmpdir(builder); let image = tmp.join("rls-image"); drop(fs::remove_dir_all(&image)); t!(fs::create_dir_all(&image)); // Prepare the image directory // We expect RLS to build, because we've exited this step above if tool // state for RLS isn't testing. let rls = builder .ensure(tool::Rls { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("RLS", builder.build.config.missing_tools); None })?; builder.install(&rls, &image.join("bin"), 0o755); let doc = image.join("share/doc/rls"); builder.install(&src.join("README.md"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("rls-overlay"); drop(fs::remove_dir_all(&overlay)); t!(fs::create_dir_all(&overlay)); builder.install(&src.join("README.md"), &overlay, 0o644); builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644); builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644); builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=RLS-ready-to-serve.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=rls-preview"); builder.info(&format!("Dist RLS stage{} ({})", compiler.stage, target)); let _time = timeit(builder); builder.run(&mut cmd); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Clippy { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Clippy { type Output = Option<PathBuf>; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("clippy") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Clippy { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let compiler = self.compiler; let target = self.target; assert!(builder.config.extended); let src = builder.src.join("src/tools/clippy"); let release_num = builder.release_num("clippy"); let name = pkgname(builder, "clippy"); let version = builder.clippy_info.version(builder, &release_num); let tmp = tmpdir(builder); let image = tmp.join("clippy-image"); drop(fs::remove_dir_all(&image)); builder.create_dir(&image); // Prepare the image directory // We expect clippy to build, because we've exited this step above if tool // state for clippy isn't testing. let clippy = builder .ensure(tool::Clippy { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("clippy", builder.build.config.missing_tools); None })?; let cargoclippy = builder .ensure(tool::CargoClippy { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("cargo clippy", builder.build.config.missing_tools); None })?; builder.install(&clippy, &image.join("bin"), 0o755); builder.install(&cargoclippy, &image.join("bin"), 0o755); let doc = image.join("share/doc/clippy"); builder.install(&src.join("README.md"), &doc, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("clippy-overlay"); drop(fs::remove_dir_all(&overlay)); t!(fs::create_dir_all(&overlay)); builder.install(&src.join("README.md"), &overlay, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=clippy-ready-to-serve.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=clippy-preview"); builder.info(&format!("Dist clippy stage{} ({})", compiler.stage, target)); let _time = timeit(builder); builder.run(&mut cmd); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Miri { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Miri { type Output = Option<PathBuf>; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("miri") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Miri { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let compiler = self.compiler; let target = self.target; assert!(builder.config.extended); let src = builder.src.join("src/tools/miri"); let release_num = builder.release_num("miri"); let name = pkgname(builder, "miri"); let version = builder.miri_info.version(builder, &release_num); let tmp = tmpdir(builder); let image = tmp.join("miri-image"); drop(fs::remove_dir_all(&image)); builder.create_dir(&image); // Prepare the image directory // We're always fine with miri not building, we don't ship it on // beta/stable anyway. let miri = builder .ensure(tool::Miri { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("miri", true); None })?; let cargomiri = builder .ensure(tool::CargoMiri { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("cargo miri", true); None })?; builder.install(&miri, &image.join("bin"), 0o755); builder.install(&cargomiri, &image.join("bin"), 0o755); let doc = image.join("share/doc/miri"); builder.install(&src.join("README.md"), &doc, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("miri-overlay"); drop(fs::remove_dir_all(&overlay)); t!(fs::create_dir_all(&overlay)); builder.install(&src.join("README.md"), &overlay, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=miri-ready-to-serve.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=miri-preview"); builder.info(&format!("Dist miri stage{} ({})", compiler.stage, target)); let _time = timeit(builder); builder.run(&mut cmd); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rustfmt { pub compiler: Compiler, pub target: Interned<String>, } impl Step for Rustfmt { type Output = Option<PathBuf>; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("rustfmt") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustfmt { compiler: run.builder.compiler_for( run.builder.top_stage, run.builder.config.build, run.target, ), target: run.target, }); } fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let compiler = self.compiler; let target = self.target; let src = builder.src.join("src/tools/rustfmt"); let release_num = builder.release_num("rustfmt"); let name = pkgname(builder, "rustfmt"); let version = builder.rustfmt_info.version(builder, &release_num); let tmp = tmpdir(builder); let image = tmp.join("rustfmt-image"); drop(fs::remove_dir_all(&image)); builder.create_dir(&image); // Prepare the image directory let rustfmt = builder .ensure(tool::Rustfmt { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("Rustfmt", builder.build.config.missing_tools); None })?; let cargofmt = builder .ensure(tool::Cargofmt { compiler, target, extra_features: Vec::new() }) .or_else(|| { missing_tool("Cargofmt", builder.build.config.missing_tools); None })?; builder.install(&rustfmt, &image.join("bin"), 0o755); builder.install(&cargofmt, &image.join("bin"), 0o755); let doc = image.join("share/doc/rustfmt"); builder.install(&src.join("README.md"), &doc, 0o644); builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); // Prepare the overlay let overlay = tmp.join("rustfmt-overlay"); drop(fs::remove_dir_all(&overlay)); builder.create_dir(&overlay); builder.install(&src.join("README.md"), &overlay, 0o644); builder.install(&src.join("LICENSE-MIT"), &overlay, 0o644); builder.install(&src.join("LICENSE-APACHE"), &overlay, 0o644); builder.create(&overlay.join("version"), &version); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=rustfmt-ready-to-fmt.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=rustfmt-preview"); builder.info(&format!("Dist Rustfmt stage{} ({})", compiler.stage, target)); let _time = timeit(builder); builder.run(&mut cmd); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Extended { stage: u32, host: Interned<String>, target: Interned<String>, } impl Step for Extended { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("extended").default_condition(builder.config.extended) } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Extended { stage: run.builder.top_stage, host: run.builder.config.build, target: run.target, }); } /// Creates a combined installer for the specified target in the provided stage. fn run(self, builder: &Builder<'_>) { let target = self.target; let stage = self.stage; let compiler = builder.compiler_for(self.stage, self.host, self.target); builder.info(&format!("Dist extended stage{} ({})", compiler.stage, target)); let rustc_installer = builder.ensure(Rustc { compiler: builder.compiler(stage, target) }); let cargo_installer = builder.ensure(Cargo { compiler, target }); let rustfmt_installer = builder.ensure(Rustfmt { compiler, target }); let rls_installer = builder.ensure(Rls { compiler, target }); let llvm_tools_installer = builder.ensure(LlvmTools { target }); let clippy_installer = builder.ensure(Clippy { compiler, target }); let miri_installer = builder.ensure(Miri { compiler, target }); let lldb_installer = builder.ensure(Lldb { target }); let mingw_installer = builder.ensure(Mingw { host: target }); let analysis_installer = builder.ensure(Analysis { compiler, target }); let docs_installer = builder.ensure(Docs { host: target }); let std_installer = builder.ensure(Std { compiler: builder.compiler(stage, target), target }); let tmp = tmpdir(builder); let overlay = tmp.join("extended-overlay"); let etc = builder.src.join("src/etc/installer"); let work = tmp.join("work"); let _ = fs::remove_dir_all(&overlay); builder.install(&builder.src.join("COPYRIGHT"), &overlay, 0o644); builder.install(&builder.src.join("LICENSE-APACHE"), &overlay, 0o644); builder.install(&builder.src.join("LICENSE-MIT"), &overlay, 0o644); let version = builder.rust_version(); builder.create(&overlay.join("version"), &version); if let Some(sha) = builder.rust_sha() { builder.create(&overlay.join("git-commit-hash"), &sha); } builder.install(&etc.join("README.md"), &overlay, 0o644); // When rust-std package split from rustc, we needed to ensure that during // upgrades rustc was upgraded before rust-std. To avoid rustc clobbering // the std files during uninstall. To do this ensure that rustc comes // before rust-std in the list below. let mut tarballs = Vec::new(); tarballs.push(rustc_installer); tarballs.push(cargo_installer); tarballs.extend(rls_installer.clone()); tarballs.extend(clippy_installer.clone()); tarballs.extend(miri_installer.clone()); tarballs.extend(rustfmt_installer.clone()); tarballs.extend(llvm_tools_installer); tarballs.extend(lldb_installer); tarballs.push(analysis_installer); tarballs.push(std_installer); if builder.config.docs { tarballs.push(docs_installer); } if target.contains("pc-windows-gnu") { tarballs.push(mingw_installer.unwrap()); } let mut input_tarballs = tarballs[0].as_os_str().to_owned(); for tarball in &tarballs[1..] { input_tarballs.push(","); input_tarballs.push(tarball); } builder.info("building combined installer"); let mut cmd = rust_installer(builder); cmd.arg("combine") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=Rust-is-ready-to-roll.") .arg("--work-dir") .arg(&work) .arg("--output-dir") .arg(&distdir(builder)) .arg(format!("--package-name={}-{}", pkgname(builder, "rust"), target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--input-tarballs") .arg(input_tarballs) .arg("--non-installed-overlay") .arg(&overlay); let time = timeit(&builder); builder.run(&mut cmd); drop(time); let mut license = String::new(); license += &builder.read(&builder.src.join("COPYRIGHT")); license += &builder.read(&builder.src.join("LICENSE-APACHE")); license += &builder.read(&builder.src.join("LICENSE-MIT")); license.push_str("\n"); license.push_str("\n"); let rtf = r"{\rtf1\ansi\deff0{\fonttbl{\f0\fnil\fcharset0 Arial;}}\nowwrap\fs18"; let mut rtf = rtf.to_string(); rtf.push_str("\n"); for line in license.lines() { rtf.push_str(line); rtf.push_str("\\line "); } rtf.push_str("}"); fn filter(contents: &str, marker: &str) -> String { let start = format!("tool-{}-start", marker); let end = format!("tool-{}-end", marker); let mut lines = Vec::new(); let mut omitted = false; for line in contents.lines() { if line.contains(&start) { omitted = true; } else if line.contains(&end) { omitted = false; } else if !omitted { lines.push(line); } } lines.join("\n") } let xform = |p: &Path| { let mut contents = t!(fs::read_to_string(p)); if rls_installer.is_none() { contents = filter(&contents, "rls"); } if clippy_installer.is_none() { contents = filter(&contents, "clippy"); } if miri_installer.is_none() { contents = filter(&contents, "miri"); } if rustfmt_installer.is_none() { contents = filter(&contents, "rustfmt"); } let ret = tmp.join(p.file_name().unwrap()); t!(fs::write(&ret, &contents)); ret }; if target.contains("apple-darwin") { builder.info("building pkg installer"); let pkg = tmp.join("pkg"); let _ = fs::remove_dir_all(&pkg); let pkgbuild = |component: &str| { let mut cmd = Command::new("pkgbuild"); cmd.arg("--identifier") .arg(format!("org.rust-lang.{}", component)) .arg("--scripts") .arg(pkg.join(component)) .arg("--nopayload") .arg(pkg.join(component).with_extension("pkg")); builder.run(&mut cmd); }; let prepare = |name: &str| { builder.create_dir(&pkg.join(name)); builder.cp_r( &work.join(&format!("{}-{}", pkgname(builder, name), target)), &pkg.join(name), ); builder.install(&etc.join("pkg/postinstall"), &pkg.join(name), 0o755); pkgbuild(name); }; prepare("rustc"); prepare("cargo"); prepare("rust-docs"); prepare("rust-std"); prepare("rust-analysis"); if rls_installer.is_some() { prepare("rls"); } if clippy_installer.is_some() { prepare("clippy"); } if miri_installer.is_some() { prepare("miri"); } // create an 'uninstall' package builder.install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755); pkgbuild("uninstall"); builder.create_dir(&pkg.join("res")); builder.create(&pkg.join("res/LICENSE.txt"), &license); builder.install(&etc.join("gfx/rust-logo.png"), &pkg.join("res"), 0o644); let mut cmd = Command::new("productbuild"); cmd.arg("--distribution") .arg(xform(&etc.join("pkg/Distribution.xml"))) .arg("--resources") .arg(pkg.join("res")) .arg(distdir(builder).join(format!("{}-{}.pkg", pkgname(builder, "rust"), target))) .arg("--package-path") .arg(&pkg); let _time = timeit(builder); builder.run(&mut cmd); } if target.contains("windows") { let exe = tmp.join("exe"); let _ = fs::remove_dir_all(&exe); let prepare = |name: &str| { builder.create_dir(&exe.join(name)); let dir = if name == "rust-std" || name == "rust-analysis" { format!("{}-{}", name, target) } else if name == "rls" { "rls-preview".to_string() } else if name == "clippy" { "clippy-preview".to_string() } else if name == "miri" { "miri-preview".to_string() } else { name.to_string() }; builder.cp_r( &work.join(&format!("{}-{}", pkgname(builder, name), target)).join(dir), &exe.join(name), ); builder.remove(&exe.join(name).join("manifest.in")); }; prepare("rustc"); prepare("cargo"); prepare("rust-analysis"); prepare("rust-docs"); prepare("rust-std"); if rls_installer.is_some() { prepare("rls"); } if clippy_installer.is_some() { prepare("clippy"); } if miri_installer.is_some() { prepare("miri"); } if target.contains("windows-gnu") { prepare("rust-mingw"); } builder.install(&xform(&etc.join("exe/rust.iss")), &exe, 0o644); builder.install(&etc.join("exe/modpath.iss"), &exe, 0o644); builder.install(&etc.join("exe/upgrade.iss"), &exe, 0o644); builder.install(&etc.join("gfx/rust-logo.ico"), &exe, 0o644); builder.create(&exe.join("LICENSE.txt"), &license); // Generate exe installer builder.info("building `exe` installer with `iscc`"); let mut cmd = Command::new("iscc"); cmd.arg("rust.iss").arg("/Q").current_dir(&exe); if target.contains("windows-gnu") { cmd.arg("/dMINGW"); } add_env(builder, &mut cmd, target); let time = timeit(builder); builder.run(&mut cmd); drop(time); builder.install( &exe.join(format!("{}-{}.exe", pkgname(builder, "rust"), target)), &distdir(builder), 0o755, ); // Generate msi installer let wix = PathBuf::from(env::var_os("WIX").unwrap()); let heat = wix.join("bin/heat.exe"); let candle = wix.join("bin/candle.exe"); let light = wix.join("bin/light.exe"); let heat_flags = ["-nologo", "-gg", "-sfrag", "-srd", "-sreg"]; builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rustc") .args(&heat_flags) .arg("-cg") .arg("RustcGroup") .arg("-dr") .arg("Rustc") .arg("-var") .arg("var.RustcDir") .arg("-out") .arg(exe.join("RustcGroup.wxs")), ); builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-docs") .args(&heat_flags) .arg("-cg") .arg("DocsGroup") .arg("-dr") .arg("Docs") .arg("-var") .arg("var.DocsDir") .arg("-out") .arg(exe.join("DocsGroup.wxs")) .arg("-t") .arg(etc.join("msi/squash-components.xsl")), ); builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("cargo") .args(&heat_flags) .arg("-cg") .arg("CargoGroup") .arg("-dr") .arg("Cargo") .arg("-var") .arg("var.CargoDir") .arg("-out") .arg(exe.join("CargoGroup.wxs")) .arg("-t") .arg(etc.join("msi/remove-duplicates.xsl")), ); builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-std") .args(&heat_flags) .arg("-cg") .arg("StdGroup") .arg("-dr") .arg("Std") .arg("-var") .arg("var.StdDir") .arg("-out") .arg(exe.join("StdGroup.wxs")), ); if rls_installer.is_some() { builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rls") .args(&heat_flags) .arg("-cg") .arg("RlsGroup") .arg("-dr") .arg("Rls") .arg("-var") .arg("var.RlsDir") .arg("-out") .arg(exe.join("RlsGroup.wxs")) .arg("-t") .arg(etc.join("msi/remove-duplicates.xsl")), ); } if clippy_installer.is_some() { builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("clippy") .args(&heat_flags) .arg("-cg") .arg("ClippyGroup") .arg("-dr") .arg("Clippy") .arg("-var") .arg("var.ClippyDir") .arg("-out") .arg(exe.join("ClippyGroup.wxs")) .arg("-t") .arg(etc.join("msi/remove-duplicates.xsl")), ); } if miri_installer.is_some() { builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("miri") .args(&heat_flags) .arg("-cg") .arg("MiriGroup") .arg("-dr") .arg("Miri") .arg("-var") .arg("var.MiriDir") .arg("-out") .arg(exe.join("MiriGroup.wxs")) .arg("-t") .arg(etc.join("msi/remove-duplicates.xsl")), ); } builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-analysis") .args(&heat_flags) .arg("-cg") .arg("AnalysisGroup") .arg("-dr") .arg("Analysis") .arg("-var") .arg("var.AnalysisDir") .arg("-out") .arg(exe.join("AnalysisGroup.wxs")) .arg("-t") .arg(etc.join("msi/remove-duplicates.xsl")), ); if target.contains("windows-gnu") { builder.run( Command::new(&heat) .current_dir(&exe) .arg("dir") .arg("rust-mingw") .args(&heat_flags) .arg("-cg") .arg("GccGroup") .arg("-dr") .arg("Gcc") .arg("-var") .arg("var.GccDir") .arg("-out") .arg(exe.join("GccGroup.wxs")), ); } let candle = |input: &Path| { let output = exe.join(input.file_stem().unwrap()).with_extension("wixobj"); let arch = if target.contains("x86_64") { "x64" } else { "x86" }; let mut cmd = Command::new(&candle); cmd.current_dir(&exe) .arg("-nologo") .arg("-dRustcDir=rustc") .arg("-dDocsDir=rust-docs") .arg("-dCargoDir=cargo") .arg("-dStdDir=rust-std") .arg("-dAnalysisDir=rust-analysis") .arg("-arch") .arg(&arch) .arg("-out") .arg(&output) .arg(&input); add_env(builder, &mut cmd, target); if rls_installer.is_some() { cmd.arg("-dRlsDir=rls"); } if clippy_installer.is_some() { cmd.arg("-dClippyDir=clippy"); } if miri_installer.is_some() { cmd.arg("-dMiriDir=miri"); } if target.contains("windows-gnu") { cmd.arg("-dGccDir=rust-mingw"); } builder.run(&mut cmd); }; candle(&xform(&etc.join("msi/rust.wxs"))); candle(&etc.join("msi/ui.wxs")); candle(&etc.join("msi/rustwelcomedlg.wxs")); candle("RustcGroup.wxs".as_ref()); candle("DocsGroup.wxs".as_ref()); candle("CargoGroup.wxs".as_ref()); candle("StdGroup.wxs".as_ref()); if rls_installer.is_some() { candle("RlsGroup.wxs".as_ref()); } if clippy_installer.is_some() { candle("ClippyGroup.wxs".as_ref()); } if miri_installer.is_some() { candle("MiriGroup.wxs".as_ref()); } candle("AnalysisGroup.wxs".as_ref()); if target.contains("windows-gnu") { candle("GccGroup.wxs".as_ref()); } builder.create(&exe.join("LICENSE.rtf"), &rtf); builder.install(&etc.join("gfx/banner.bmp"), &exe, 0o644); builder.install(&etc.join("gfx/dialogbg.bmp"), &exe, 0o644); builder.info(&format!("building `msi` installer with {:?}", light)); let filename = format!("{}-{}.msi", pkgname(builder, "rust"), target); let mut cmd = Command::new(&light); cmd.arg("-nologo") .arg("-ext") .arg("WixUIExtension") .arg("-ext") .arg("WixUtilExtension") .arg("-out") .arg(exe.join(&filename)) .arg("rust.wixobj") .arg("ui.wixobj") .arg("rustwelcomedlg.wixobj") .arg("RustcGroup.wixobj") .arg("DocsGroup.wixobj") .arg("CargoGroup.wixobj") .arg("StdGroup.wixobj") .arg("AnalysisGroup.wixobj") .current_dir(&exe); if rls_installer.is_some() { cmd.arg("RlsGroup.wixobj"); } if clippy_installer.is_some() { cmd.arg("ClippyGroup.wixobj"); } if miri_installer.is_some() { cmd.arg("MiriGroup.wixobj"); } if target.contains("windows-gnu") { cmd.arg("GccGroup.wixobj"); } // ICE57 wrongly complains about the shortcuts cmd.arg("-sice:ICE57"); let _time = timeit(builder); builder.run(&mut cmd); if !builder.config.dry_run { t!(fs::rename(exe.join(&filename), distdir(builder).join(&filename))); } } } } fn add_env(builder: &Builder<'_>, cmd: &mut Command, target: Interned<String>) { let mut parts = channel::CFG_RELEASE_NUM.split('.'); cmd.env("CFG_RELEASE_INFO", builder.rust_version()) .env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM) .env("CFG_RELEASE", builder.rust_release()) .env("CFG_VER_MAJOR", parts.next().unwrap()) .env("CFG_VER_MINOR", parts.next().unwrap()) .env("CFG_VER_PATCH", parts.next().unwrap()) .env("CFG_VER_BUILD", "0") // just needed to build .env("CFG_PACKAGE_VERS", builder.rust_package_vers()) .env("CFG_PACKAGE_NAME", pkgname(builder, "rust")) .env("CFG_BUILD", target) .env("CFG_CHANNEL", &builder.config.channel); if target.contains("windows-gnu") { cmd.env("CFG_MINGW", "1").env("CFG_ABI", "GNU"); } else { cmd.env("CFG_MINGW", "0").env("CFG_ABI", "MSVC"); } if target.contains("x86_64") { cmd.env("CFG_PLATFORM", "x64"); } else { cmd.env("CFG_PLATFORM", "x86"); } } #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct HashSign; impl Step for HashSign { type Output = (); const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("hash-and-sign") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(HashSign); } fn run(self, builder: &Builder<'_>) { // This gets called by `promote-release` // (https://github.com/rust-lang/rust-central-station/tree/master/promote-release). let mut cmd = builder.tool_cmd(Tool::BuildManifest); if builder.config.dry_run { return; } let sign = builder.config.dist_sign_folder.as_ref().unwrap_or_else(|| { panic!("\n\nfailed to specify `dist.sign-folder` in `config.toml`\n\n") }); let addr = builder.config.dist_upload_addr.as_ref().unwrap_or_else(|| { panic!("\n\nfailed to specify `dist.upload-addr` in `config.toml`\n\n") }); let pass = if env::var("BUILD_MANIFEST_DISABLE_SIGNING").is_err() { let file = builder.config.dist_gpg_password_file.as_ref().unwrap_or_else(|| { panic!("\n\nfailed to specify `dist.gpg-password-file` in `config.toml`\n\n") }); t!(fs::read_to_string(&file)) } else { String::new() }; let today = output(Command::new("date").arg("+%Y-%m-%d")); cmd.arg(sign); cmd.arg(distdir(builder)); cmd.arg(today.trim()); cmd.arg(builder.rust_package_vers()); cmd.arg(addr); cmd.arg(builder.package_vers(&builder.release_num("cargo"))); cmd.arg(builder.package_vers(&builder.release_num("rls"))); cmd.arg(builder.package_vers(&builder.release_num("clippy"))); cmd.arg(builder.package_vers(&builder.release_num("miri"))); cmd.arg(builder.package_vers(&builder.release_num("rustfmt"))); cmd.arg(builder.llvm_tools_package_vers()); cmd.arg(builder.lldb_package_vers()); builder.create_dir(&distdir(builder)); let mut child = t!(cmd.stdin(Stdio::piped()).spawn()); t!(child.stdin.take().unwrap().write_all(pass.as_bytes())); let status = t!(child.wait()); assert!(status.success()); } } // Maybe add libLLVM.so to the lib-dir. It will only have been built if // LLVM tools are linked dynamically. // // We add this to both the libdir of the rustc binary itself (for it to load at // runtime) and also to the target directory so it can find it at link-time. // // Note: This function does no yet support Windows but we also don't support // linking LLVM tools dynamically on Windows yet. pub fn maybe_install_llvm_dylib(builder: &Builder<'_>, target: Interned<String>, sysroot: &Path) { let src_libdir = builder.llvm_out(target).join("lib"); let dst_libdir1 = sysroot.join("lib/rustlib").join(&*target).join("lib"); let dst_libdir2 = sysroot.join(builder.sysroot_libdir_relative(Compiler { stage: 1, host: target })); t!(fs::create_dir_all(&dst_libdir1)); t!(fs::create_dir_all(&dst_libdir2)); if target.contains("apple-darwin") { let llvm_dylib_path = src_libdir.join("libLLVM.dylib"); if llvm_dylib_path.exists() { builder.install(&llvm_dylib_path, &dst_libdir1, 0o644); builder.install(&llvm_dylib_path, &dst_libdir2, 0o644); } return; } // Usually libLLVM.so is a symlink to something like libLLVM-6.0.so. // Since tools link to the latter rather than the former, we have to // follow the symlink to find out what to distribute. let llvm_dylib_path = src_libdir.join("libLLVM.so"); if llvm_dylib_path.exists() { let llvm_dylib_path = llvm_dylib_path.canonicalize().unwrap_or_else(|e| { panic!("dist: Error calling canonicalize path `{}`: {}", llvm_dylib_path.display(), e); }); builder.install(&llvm_dylib_path, &dst_libdir1, 0o644); builder.install(&llvm_dylib_path, &dst_libdir2, 0o644); } } #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct LlvmTools { pub target: Interned<String>, } impl Step for LlvmTools { type Output = Option<PathBuf>; const ONLY_HOSTS: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("llvm-tools") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(LlvmTools { target: run.target }); } fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let target = self.target; assert!(builder.config.extended); /* run only if llvm-config isn't used */ if let Some(config) = builder.config.target_config.get(&target) { if let Some(ref _s) = config.llvm_config { builder.info(&format!("Skipping LlvmTools ({}): external LLVM", target)); return None; } } builder.info(&format!("Dist LlvmTools ({})", target)); let _time = timeit(builder); let src = builder.src.join("src/llvm-project/llvm"); let name = pkgname(builder, "llvm-tools"); let tmp = tmpdir(builder); let image = tmp.join("llvm-tools-image"); drop(fs::remove_dir_all(&image)); // Prepare the image directory let src_bindir = builder.llvm_out(target).join("bin"); let dst_bindir = image.join("lib/rustlib").join(&*target).join("bin"); t!(fs::create_dir_all(&dst_bindir)); for tool in LLVM_TOOLS { let exe = src_bindir.join(exe(tool, &target)); builder.install(&exe, &dst_bindir, 0o755); } // Prepare the overlay let overlay = tmp.join("llvm-tools-overlay"); drop(fs::remove_dir_all(&overlay)); builder.create_dir(&overlay); builder.install(&src.join("README.txt"), &overlay, 0o644); builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644); builder.create(&overlay.join("version"), &builder.llvm_tools_vers()); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=llvm-tools-installed.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=llvm-tools-preview"); builder.run(&mut cmd); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } } #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Lldb { pub target: Interned<String>, } impl Step for Lldb { type Output = Option<PathBuf>; const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/llvm-project/lldb").path("src/tools/lldb") } fn make_run(run: RunConfig<'_>) { run.builder.ensure(Lldb { target: run.target }); } fn run(self, builder: &Builder<'_>) -> Option<PathBuf> { let target = self.target; if builder.config.dry_run { return None; } let bindir = builder.llvm_out(target).join("bin"); let lldb_exe = bindir.join(exe("lldb", &target)); if !lldb_exe.exists() { return None; } builder.info(&format!("Dist Lldb ({})", target)); let src = builder.src.join("src/llvm-project/lldb"); let name = pkgname(builder, "lldb"); let tmp = tmpdir(builder); let image = tmp.join("lldb-image"); drop(fs::remove_dir_all(&image)); // Prepare the image directory let root = image.join("lib/rustlib").join(&*target); let dst = root.join("bin"); t!(fs::create_dir_all(&dst)); for program in &["lldb", "lldb-argdumper", "lldb-mi", "lldb-server"] { let exe = bindir.join(exe(program, &target)); builder.install(&exe, &dst, 0o755); } // The libraries. let libdir = builder.llvm_out(target).join("lib"); let dst = root.join("lib"); t!(fs::create_dir_all(&dst)); for entry in t!(fs::read_dir(&libdir)) { let entry = entry.unwrap(); if let Ok(name) = entry.file_name().into_string() { if name.starts_with("liblldb.") && !name.ends_with(".a") { if t!(entry.file_type()).is_symlink() { builder.copy_to_folder(&entry.path(), &dst); } else { builder.install(&entry.path(), &dst, 0o755); } } } } // The lldb scripts might be installed in lib/python$version // or in lib64/python$version. If lib64 exists, use it; // otherwise lib. let libdir = builder.llvm_out(target).join("lib64"); let (libdir, libdir_name) = if libdir.exists() { (libdir, "lib64") } else { (builder.llvm_out(target).join("lib"), "lib") }; for entry in t!(fs::read_dir(&libdir)) { let entry = t!(entry); if let Ok(name) = entry.file_name().into_string() { if name.starts_with("python") { let dst = root.join(libdir_name).join(entry.file_name()); t!(fs::create_dir_all(&dst)); builder.cp_r(&entry.path(), &dst); break; } } } // Prepare the overlay let overlay = tmp.join("lldb-overlay"); drop(fs::remove_dir_all(&overlay)); builder.create_dir(&overlay); builder.install(&src.join("LICENSE.TXT"), &overlay, 0o644); builder.create(&overlay.join("version"), &builder.lldb_vers()); // Generate the installer tarball let mut cmd = rust_installer(builder); cmd.arg("generate") .arg("--product-name=Rust") .arg("--rel-manifest-dir=rustlib") .arg("--success-message=lldb-installed.") .arg("--image-dir") .arg(&image) .arg("--work-dir") .arg(&tmpdir(builder)) .arg("--output-dir") .arg(&distdir(builder)) .arg("--non-installed-overlay") .arg(&overlay) .arg(format!("--package-name={}-{}", name, target)) .arg("--legacy-manifest-dirs=rustlib,cargo") .arg("--component-name=lldb-preview"); builder.run(&mut cmd); Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) } }
36.351145
100
0.533025
dd7c05aef9692e10f385d23ac771a1bf9de3fa55
17,675
use super::{apply::apply, intros::intros, Result}; use crate::{ analysis::logic::{LogicArena, LogicBuilder, LogicValue}, app_ref, brain::{Term, TermRef}, interactive::Frame, library::prelude::set, term_ref, }; use typed_arena::Arena; #[derive(Debug, Clone)] enum EnsembleTree<'a> { Set(TermRef), Empty, Singleton(TermRef), Union(&'a EnsembleTree<'a>, &'a EnsembleTree<'a>), Intersection(&'a EnsembleTree<'a>, &'a EnsembleTree<'a>), Setminus(&'a EnsembleTree<'a>, &'a EnsembleTree<'a>), Eq(&'a EnsembleTree<'a>, &'a EnsembleTree<'a>), Included(&'a EnsembleTree<'a>, &'a EnsembleTree<'a>), Inset(TermRef, &'a EnsembleTree<'a>), Outset(TermRef, &'a EnsembleTree<'a>), } use EnsembleTree::*; type EnsembleArena<'a> = &'a Arena<EnsembleTree<'a>>; #[derive(Debug, Clone)] enum EnsembleStatement { IsMember(TermRef, TermRef), IsNotMember(TermRef, TermRef), IsSubset(TermRef, TermRef), IsNotSubset(TermRef, TermRef), } use std::collections::HashMap; struct Identifier { map: HashMap<TermRef, usize>, id_counter: usize, } impl Identifier { pub fn new() -> Identifier { Identifier { map: HashMap::new(), id_counter: 1, } } pub fn get(&mut self, v: &TermRef) -> usize { match self.map.get(v) { Some(&x) => x, None => { self.map.insert(v.clone(), self.id_counter); self.id_counter += 1; self.id_counter - 1 } } } } fn from_set_type<'a>( t: &TermRef, arena: EnsembleArena<'a>, sets_id: &mut Identifier, ) -> &'a EnsembleTree<'a> { if let Term::App { func, op: op2 } = t.as_ref() { if let Term::Axiom { unique_name, .. } = func.as_ref() { if unique_name == "set_empty" { return arena.alloc(EnsembleTree::Empty); } } if let Term::App { func, op: op1 } = func.as_ref() { if let Term::Axiom { unique_name, .. } = func.as_ref() { if unique_name == "set_singleton" { return arena.alloc(EnsembleTree::Singleton(op2.clone())); } } if let Term::App { func, op: _ } = func.as_ref() { if let Term::Axiom { unique_name, .. } = func.as_ref() { if unique_name == "union" { let set_a = from_set_type(op1, arena, sets_id); let set_b = from_set_type(op2, arena, sets_id); return arena.alloc(Union(set_a, set_b)); } if unique_name == "intersection" { let set_a = from_set_type(op1, arena, sets_id); let set_b = from_set_type(op2, arena, sets_id); return arena.alloc(Intersection(set_a, set_b)); } if unique_name == "setminus" { let set_a = from_set_type(op1, arena, sets_id); let set_b = from_set_type(op2, arena, sets_id); return arena.alloc(Setminus(set_a, set_b)); } } } } } return arena.alloc(Set(t.clone())); } fn from_prop_type<'a>( t: TermRef, arena: EnsembleArena<'a>, elements_id: &mut Identifier, sets_id: &mut Identifier, ) -> Option<(&'a EnsembleTree<'a>, TermRef)> { if let Term::Forall(a) = t.as_ref() { if let Term::Axiom { unique_name, .. } = a.body.as_ref() { if unique_name == "False" { if let Some((Inset(x, set_a), ty)) = from_prop_type(a.var_ty.clone(), arena, elements_id, sets_id) { return Some((arena.alloc(Outset(x.clone(), set_a)), ty)); } } //the Included -> false or eq -> false type prop is one work } } if let Term::App { func, op: op2 } = t.as_ref() { if let Term::App { func, op: op1 } = func.as_ref() { if let Term::App { func, op: ty } = func.as_ref() { if let Term::Axiom { unique_name, .. } = func.as_ref() { if unique_name == "inset" { let tree = Inset(op1.clone(), from_set_type(op2, arena, sets_id)); return Some((arena.alloc(tree), term_ref!(app_ref!(set(), ty)))); } if unique_name == "included" { let tree = Included( from_set_type(op1, arena, sets_id), from_set_type(op2, arena, sets_id), ); return Some((arena.alloc(tree), term_ref!(app_ref!(set(), ty)))); } if unique_name == "eq" { let tree = Eq( from_set_type(op1, arena, sets_id), from_set_type(op2, arena, sets_id), ); return Some((arena.alloc(tree), ty.clone())); } } } } } None } fn convert( term: TermRef, logic_arena: LogicArena<'_, EnsembleStatement>, ) -> LogicValue<'_, EnsembleStatement> { let my_arena = Arena::new(); let exp = if let Some(x) = from_prop_type( term, &my_arena, &mut Identifier::new(), &mut Identifier::new(), ) { x.0 } else { return LogicValue::unknown(); }; fn f<'a>( exp: &EnsembleTree<'_>, arena: LogicArena<'a, EnsembleStatement>, ) -> LogicValue<'a, EnsembleStatement> { match exp { Empty | Singleton(_) | Set(_) | Union(_, _) | Intersection(_, _) | Setminus(_, _) => { unreachable!() } Eq(x, y) => { let l = f(&Included(x, y), arena); let r = f(&Included(y, x), arena); l.and(r, arena) } Included(Empty, _) => LogicValue::True, Included(Singleton(a), x) => f(&Inset(a.clone(), x), arena), Included(x, Intersection(a, b)) => { let l = f(&Included(x, a), arena); let r = f(&Included(x, b), arena); l.and(r, arena) } Included(Union(a, b), x) => { let l = f(&Included(a, x), arena); let r = f(&Included(b, x), arena); l.and(dbg!(r), arena) } Included(Set(a), Set(b)) => { LogicValue::from(EnsembleStatement::IsSubset(a.clone(), b.clone())) } Included(_, Union(..) | Setminus(..) | Empty | Singleton(_)) | Included(Intersection(..) | Setminus(..), _) => LogicValue::unknown(), Included(..) => unreachable!(), Inset(_, Empty) => LogicValue::False, Inset(a, Singleton(b)) => { if a == b { LogicValue::True } else { // not everything is a set, but it has no problem to imagine as so, because // input is type checked and there is no wrong types mixing sets and non sets let l = LogicValue::from(EnsembleStatement::IsSubset(a.clone(), b.clone())); let r = LogicValue::from(EnsembleStatement::IsSubset(b.clone(), a.clone())); l.and(r, arena) } } Inset(x, Union(a, b)) => { let l = f(&Inset(x.clone(), a), arena); let r = f(&Inset(x.clone(), b), arena); l.or(r, arena) } Inset(x, Intersection(a, b)) => { let l = f(&Inset(x.clone(), a), arena); let r = f(&Inset(x.clone(), b), arena); l.and(r, arena) } Inset(x, Setminus(a, b)) => { let l = f(&Inset(x.clone(), a), arena); let r = f(&Inset(x.clone(), b), arena); l.and(r.not(arena), arena) } Inset(x, Set(a)) => LogicValue::from(EnsembleStatement::IsMember(x.clone(), a.clone())), Inset(..) => unreachable!(), Outset(_, _) => todo!(), } } f(exp, logic_arena) } enum InternedStatement { IsMember(usize, usize), IsNotMember(usize, usize), IsSubset(usize, usize), IsNotSubset(usize, usize), } impl InternedStatement { fn intern_array(a: &[EnsembleStatement]) -> (usize, Vec<Self>) { let mut interner = Identifier::new(); let r = a .iter() .map(|x| match x { EnsembleStatement::IsMember(a, b) => { let a = interner.get(a); let b = interner.get(b); Self::IsMember(a, b) } EnsembleStatement::IsNotMember(a, b) => { let a = interner.get(a); let b = interner.get(b); Self::IsNotMember(a, b) } EnsembleStatement::IsSubset(a, b) => { let a = interner.get(a); let b = interner.get(b); Self::IsSubset(a, b) } EnsembleStatement::IsNotSubset(a, b) => { let a = interner.get(a); let b = interner.get(b); Self::IsNotSubset(a, b) } }) .collect(); (interner.id_counter, r) } } fn check_contradiction(a: &[EnsembleStatement]) -> bool { use InternedStatement::*; let (mut cnt_vars, a) = InternedStatement::intern_array(a); let mut m = HashMap::<(usize, usize), bool>::new(); let mut edges = vec![vec![]; cnt_vars]; for x in &a { match x { IsMember(a, b) => { if m.get(&(*a, *b)) == Some(&false) { return true; } m.insert((*a, *b), true); } IsNotMember(a, b) => { if m.get(&(*a, *b)) == Some(&true) { return true; } m.insert((*a, *b), false); } IsSubset(a, b) => { edges[*a].push(*b); } IsNotSubset(a, b) => { let new_var = cnt_vars; cnt_vars += 1; m.insert((new_var, *a), true); m.insert((new_var, *b), false); } } } let (mut scc, edges) = strong_components(&edges); for x in scc.len()..cnt_vars { scc.push(x); } let it = m.into_iter().map(|((x, y), b)| ((scc[x], scc[y]), b)); let mut m = HashMap::new(); for ((a, b), v) in it { if m.get(&(a, b)) == Some(&!v) { return true; } m.insert((a, b), v); } for _ in 0..cnt_vars { let it = m.clone().into_iter(); for ((a, b), v) in it { if v { for c in &edges[b] { if m.get(&(a, *c)) == Some(&false) { return true; } m.insert((a, *c), true); } } } } false } fn strong_components(edges: &[Vec<usize>]) -> (Vec<usize>, Vec<Vec<usize>>) { let n = edges.len(); let mut reachable = vec![vec![false; n]; n]; fn dfs(i: usize, edges: &[Vec<usize>], mark: &mut Vec<bool>) { if mark[i] { return; } mark[i] = true; for j in &edges[i] { dfs(*j, edges, mark); } } for (x, r) in reachable.iter_mut().enumerate() { dfs(x, edges, r); } let scc = (0..n) .map(|x| { (0..x) .find(|y| reachable[x][*y] && reachable[*y][x]) .unwrap_or(x) }) .collect::<Vec<_>>(); let reach_edges = (0..n) .map(|x| { if scc[x] != x { return vec![]; } scc.iter() .enumerate() .filter(|(a, b)| a == *b && reachable[x][*a]) .map(|x| x.0) .collect() }) .collect(); (scc, reach_edges) } fn negator(x: EnsembleStatement) -> EnsembleStatement { use EnsembleStatement::*; match x { IsMember(a, b) => IsNotMember(a, b), IsNotMember(a, b) => IsMember(a, b), IsSubset(a, b) => IsNotSubset(a, b), IsNotSubset(a, b) => IsSubset(a, b), } } fn pre_process_frame(frame: Frame) -> Frame { let mut intros_flag = false; let frame = match apply(frame.clone(), vec!["included_fold"].into_iter()) { Ok(x) if x.len() == 1 => { intros_flag = true; x.into_iter().next().unwrap() } _ => frame, }; let frame = match apply(frame.clone(), vec!["set_equality_forall"].into_iter()) { Ok(x) if x.len() == 1 => { intros_flag = true; x.into_iter().next().unwrap() } _ => frame, }; if intros_flag { match intros(frame.clone(), vec![].into_iter()) { Ok(x) if x.len() == 1 => x.into_iter().next().unwrap(), _ => frame, } } else { frame } } pub fn auto_set(frame: Frame) -> Result<Vec<Frame>> { let frame = pre_process_frame(frame); LogicBuilder::build_tactic("auto_set", frame, convert, check_contradiction, negator) } #[cfg(test)] mod tests { use crate::interactive::tests::{run_interactive_to_end, run_interactive_to_fail}; fn success(goal: &str) { run_interactive_to_end(goal, "intros\nauto_set"); } fn fail(goal: &str) { run_interactive_to_fail(goal, "intros", "auto_set"); } #[test] fn success1() { success("∀ T: U, ∀ a: T, ∀ A B C: set T, a ∈ C ∩ B -> a ∈ A ∩ C -> a ∈ C ∩ B ∩ A"); } #[test] fn subset_trans() { success("∀ T: U, ∀ A B C: set T, A ⊆ B → B ⊆ C → A ⊆ C"); } #[test] fn union() { success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A ∪ B → a ∈ A ∨ a ∈ B"); success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A ∨ a ∈ B → a ∈ A ∪ B"); fail("∀ T: U, ∀ A B C: set T, A ⊆ C ∪ B -> A ⊆ C ∨ A ⊆ B"); success("∀ T: U, ∀ A B: set T, A ⊆ A ∪ B"); } #[test] fn intersect() { success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A ∩ B → a ∈ A"); fail("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A → a ∈ A ∩ B"); success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A ∩ B → a ∈ B"); success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A ∧ a ∈ B → a ∈ A ∩ B"); success("∀ T: U, ∀ A B C: set T, A ⊆ C ∩ B -> A ⊆ C ∧ A ⊆ B"); } #[test] fn intersect_union() { success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A ∩ B -> a ∈ A ∪ B "); } #[test] fn with_logic() { success( "∀ T: U, ∀ a: T, ∀ A1 A2 B1 B2: set T,\ a ∈ A1 ∩ B1 ∨ a ∈ A2 ∩ B2 -> a ∈ A1 ∪ A2 ∧ a ∈ B1 ∪ B2", ); } #[test] fn set_minus() { success("∀ T: U, ∀ A B: set T, B ⊆ A -> ∀ x: T, x ∈ A -> x ∈ B ∪ A ∖ B"); } #[test] fn empty() { success("∀ T: U, ∀ a: T, a ∈ {} -> False"); success("∀ T: U, ∀ A: set T, {} ⊆ A"); } #[test] fn equality() { success("∀ T: U, ∀ A B: set T, A = B -> A ∈ {B}"); success("∀ T: U, ∀ a: T, ∀ A B: set T, A = B -> a ∈ A -> a ∈ B"); success("∀ T: U, ∀ S: set (set T), ∀ A B: set T, A = B -> A ∈ S -> B ∈ S"); success( "∀ T: U, ∀ S: set (set T), ∀ A B C: set T,\ A ⊆ B -> B ⊆ C -> C ⊆ A -> A ∈ S -> B ∈ S ∧ C ∈ S", ); } #[test] fn singleton() { success("2 ∈ {2}"); success("2 ∈ {1, 2, 3}"); success("∀ T: U, ∀ a: T, a ∈ {a}"); fail("∀ T: U, ∀ a b: T, a ∈ {b}"); success("∀ T: U, ∀ a b: T, a ∈ {a, b}"); success("∀ T: U, ∀ a: T, ∀ A B: set T, a ∈ A -> {a} ⊆ A"); success("∀ T: U, ∀ a b: T, ∀ A B: set T, a ∈ A -> b ∈ A -> {a, b} ⊆ A"); success("{2} ⊆ {2}"); success("{2} ⊆ {2, 3}"); fail("{2, 3} ⊆ {2}"); success("{2, 3} ⊆ {2, 3}"); success("{2, 3} ⊆ {2, 5, 3}"); success("∀ T: U, ∀ a b: T, a ∈ {b} -> a = b"); success("∀ a: ℤ, a ∈ {2} -> a = 2"); success("∀ a: ℤ, a ∈ {2, 3} -> a = 2 ∨ a = 3"); } #[test] fn a_random_test() { success( "∀ T: U, ∀ a: T, ∀ A B C D E F: set T,\ a ∈ C -> a ∈ E -> a ∈ F -> a ∈ (A ∪ (B ∪ C)) ∩ (D ∪ (E ∩ F))", ); fail( "∀ T: U, ∀ a: T, ∀ A B C D E F: set T,\ a ∈ C -> a ∈ E -> a ∈ (A ∪ (B ∪ C)) ∩ (D ∪ (E ∩ F))", ); } #[test] fn remove_element() { success("∀ T: U, ∀ a: T, ∀ S: set T, a ∈ S → {a} ∪ S ∖ {a} = S") } #[test] fn dont_solve_forall_without_intros() { run_interactive_to_fail( "∀ T: U, ∀ A B C: set T, A ⊆ B -> B ⊆ C -> A ⊆ C", "", "auto_set", ); } #[test] fn imp_and_subset() { success( "∀ T: U, ∀ A B C D: set T, A ⊆ B -> (A ⊆ B -> B ⊆ C) ->\ (B ⊆ C -> C ⊆ D) -> A ⊆ D", ); fail( "∀ T: U, ∀ A B C D: set T, (A ⊆ B -> B ⊆ C) ->\ (B ⊆ C -> C ⊆ D) -> A ⊆ D", ); success( "∀ A: U, ∀ P Q R S: set A, ∀ a: A, (a ∈ R -> a ∈ S) ->\ a ∈ R -> ((a ∈ S -> False) ∨ a ∈ Q) -> a ∈ Q ", ); } }
32.731481
100
0.425177
0e39399ae808e4c618525c7eab685a6f3870104e
1,898
//! Types for the [`m.room.message.feedback`] event. //! //! [`m.room.message.feedback`]: https://spec.matrix.org/v1.1/client-server-api/#mroommessagefeedback use ruma_events_macros::EventContent; use ruma_identifiers::EventId; use ruma_serde::StringEnum; use serde::{Deserialize, Serialize}; /// The content of an `m.room.message.feedback` event. /// /// An acknowledgement of a message. /// /// N.B.: Usage of this event is discouraged in favor of the receipts module. Most clients will /// not recognize this event. #[derive(Clone, Debug, Deserialize, Serialize, EventContent)] #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] #[ruma_event(type = "m.room.message.feedback", kind = Message)] pub struct RoomMessageFeedbackEventContent { /// The event that this feedback is related to. pub target_event_id: Box<EventId>, /// The type of feedback. #[serde(rename = "type")] pub feedback_type: FeedbackType, } impl RoomMessageFeedbackEventContent { /// Create a `RoomMessageFeedbackEventContent` from the given target event id and feedback type. pub fn new(target_event_id: Box<EventId>, feedback_type: FeedbackType) -> Self { Self { target_event_id, feedback_type } } } /// A type of feedback. /// /// This type can hold an arbitrary string. To check for formats that are not available as a /// documented variant here, use its string representation, obtained through `.as_str()`. #[derive(Clone, Debug, PartialEq, Eq, StringEnum)] #[ruma_enum(rename_all = "snake_case")] #[non_exhaustive] pub enum FeedbackType { /// Sent when a message is received. Delivered, /// Sent when a message has been observed by the end user. Read, #[doc(hidden)] _Custom(String), } impl FeedbackType { /// Creates a string slice from this `FeedbackType`. pub fn as_str(&self) -> &str { self.as_ref() } }
32.169492
101
0.705479
1dad45bf62547b1dfe79763b32b595f4bee68708
151,661
use std::collections::HashMap; use std::cell::RefCell; use std::default::Default; use std::collections::BTreeMap; use serde_json as json; use std::io; use std::fs; use std::mem; use std::thread::sleep; use crate::client; // ############## // UTILITIES ### // ############ /// Identifies the an OAuth2 authorization scope. /// A scope is needed when requesting an /// [authorization token](https://developers.google.com/youtube/v3/guides/authentication). #[derive(PartialEq, Eq, Hash)] pub enum Scope { /// View and manage your advertising data in DoubleClick Search Full, } impl AsRef<str> for Scope { fn as_ref(&self) -> &str { match *self { Scope::Full => "https://www.googleapis.com/auth/doubleclicksearch", } } } impl Default for Scope { fn default() -> Scope { Scope::Full } } // ######## // HUB ### // ###### /// Central instance to access all Doubleclicksearch related resource activities /// /// # Examples /// /// Instantiate a new hub /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_doubleclicksearch2 as doubleclicksearch2; /// use doubleclicksearch2::{Result, Error}; /// # async fn dox() { /// use std::default::Default; /// use oauth2; /// use doubleclicksearch2::Doubleclicksearch; /// /// // Get an ApplicationSecret instance by some means. It contains the `client_id` and /// // `client_secret`, among other things. /// let secret: oauth2::ApplicationSecret = Default::default(); /// // Instantiate the authenticator. It will choose a suitable authentication flow for you, /// // unless you replace `None` with the desired Flow. /// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about /// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and /// // retrieve them from storage. /// let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// secret, /// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// ).build().await.unwrap(); /// let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.reports().get_file("reportId", -27) /// .doit().await; /// /// match result { /// Err(e) => match e { /// // The Error enum provides details about what exactly happened. /// // You can also just use its `Debug`, `Display` or `Error` traits /// Error::HttpError(_) /// |Error::Io(_) /// |Error::MissingAPIKey /// |Error::MissingToken(_) /// |Error::Cancelled /// |Error::UploadSizeLimitExceeded(_, _) /// |Error::Failure(_) /// |Error::BadRequest(_) /// |Error::FieldClash(_) /// |Error::JsonDecodeError(_, _) => println!("{}", e), /// }, /// Ok(res) => println!("Success: {:?}", res), /// } /// # } /// ``` #[derive(Clone)] pub struct Doubleclicksearch<> { client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>, auth: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>, _user_agent: String, _base_url: String, _root_url: String, } impl<'a, > client::Hub for Doubleclicksearch<> {} impl<'a, > Doubleclicksearch<> { pub fn new(client: hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>, authenticator: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>) -> Doubleclicksearch<> { Doubleclicksearch { client, auth: authenticator, _user_agent: "google-api-rust-client/2.0.8".to_string(), _base_url: "https://doubleclicksearch.googleapis.com/".to_string(), _root_url: "https://doubleclicksearch.googleapis.com/".to_string(), } } pub fn conversion(&'a self) -> ConversionMethods<'a> { ConversionMethods { hub: &self } } pub fn reports(&'a self) -> ReportMethods<'a> { ReportMethods { hub: &self } } pub fn saved_columns(&'a self) -> SavedColumnMethods<'a> { SavedColumnMethods { hub: &self } } /// Set the user-agent header field to use in all requests to the server. /// It defaults to `google-api-rust-client/2.0.8`. /// /// Returns the previously set user-agent. pub fn user_agent(&mut self, agent_name: String) -> String { mem::replace(&mut self._user_agent, agent_name) } /// Set the base url to use in all requests to the server. /// It defaults to `https://doubleclicksearch.googleapis.com/`. /// /// Returns the previously set base url. pub fn base_url(&mut self, new_base_url: String) -> String { mem::replace(&mut self._base_url, new_base_url) } /// Set the root url to use in all requests to the server. /// It defaults to `https://doubleclicksearch.googleapis.com/`. /// /// Returns the previously set root url. pub fn root_url(&mut self, new_root_url: String) -> String { mem::replace(&mut self._root_url, new_root_url) } } // ############ // SCHEMAS ### // ########## /// A message containing availability data relevant to DoubleClick Search. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Availability { /// DS advertiser ID. #[serde(rename="advertiserId")] pub advertiser_id: Option<String>, /// DS agency ID. #[serde(rename="agencyId")] pub agency_id: Option<String>, /// The time by which all conversions have been uploaded, in epoch millis UTC. #[serde(rename="availabilityTimestamp")] pub availability_timestamp: Option<String>, /// The numeric segmentation identifier (for example, DoubleClick Search Floodlight activity ID). #[serde(rename="segmentationId")] pub segmentation_id: Option<String>, /// The friendly segmentation identifier (for example, DoubleClick Search Floodlight activity name). #[serde(rename="segmentationName")] pub segmentation_name: Option<String>, /// The segmentation type that this availability is for (its default value is `FLOODLIGHT`). #[serde(rename="segmentationType")] pub segmentation_type: Option<String>, } impl client::Part for Availability {} /// A conversion containing data relevant to DoubleClick Search. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Conversion { /// DS ad group ID. #[serde(rename="adGroupId")] pub ad_group_id: Option<String>, /// DS ad ID. #[serde(rename="adId")] pub ad_id: Option<String>, /// DS advertiser ID. #[serde(rename="advertiserId")] pub advertiser_id: Option<String>, /// DS agency ID. #[serde(rename="agencyId")] pub agency_id: Option<String>, /// Available to advertisers only after contacting DoubleClick Search customer support. #[serde(rename="attributionModel")] pub attribution_model: Option<String>, /// DS campaign ID. #[serde(rename="campaignId")] pub campaign_id: Option<String>, /// Sales channel for the product. Acceptable values are: - "`local`": a physical store - "`online`": an online store pub channel: Option<String>, /// DS click ID for the conversion. #[serde(rename="clickId")] pub click_id: Option<String>, /// For offline conversions, advertisers provide this ID. Advertisers can specify any ID that is meaningful to them. Each conversion in a request must specify a unique ID, and the combination of ID and timestamp must be unique amongst all conversions within the advertiser. For online conversions, DS copies the `dsConversionId` or `floodlightOrderId` into this property depending on the advertiser's Floodlight instructions. #[serde(rename="conversionId")] pub conversion_id: Option<String>, /// The time at which the conversion was last modified, in epoch millis UTC. #[serde(rename="conversionModifiedTimestamp")] pub conversion_modified_timestamp: Option<String>, /// The time at which the conversion took place, in epoch millis UTC. #[serde(rename="conversionTimestamp")] pub conversion_timestamp: Option<String>, /// Available to advertisers only after contacting DoubleClick Search customer support. #[serde(rename="countMillis")] pub count_millis: Option<String>, /// DS criterion (keyword) ID. #[serde(rename="criterionId")] pub criterion_id: Option<String>, /// The currency code for the conversion's revenue. Should be in ISO 4217 alphabetic (3-char) format. #[serde(rename="currencyCode")] pub currency_code: Option<String>, /// Custom dimensions for the conversion, which can be used to filter data in a report. #[serde(rename="customDimension")] pub custom_dimension: Option<Vec<CustomDimension>>, /// Custom metrics for the conversion. #[serde(rename="customMetric")] pub custom_metric: Option<Vec<CustomMetric>>, /// The type of device on which the conversion occurred. #[serde(rename="deviceType")] pub device_type: Option<String>, /// ID that DoubleClick Search generates for each conversion. #[serde(rename="dsConversionId")] pub ds_conversion_id: Option<String>, /// DS engine account ID. #[serde(rename="engineAccountId")] pub engine_account_id: Option<String>, /// The Floodlight order ID provided by the advertiser for the conversion. #[serde(rename="floodlightOrderId")] pub floodlight_order_id: Option<String>, /// ID that DS generates and uses to uniquely identify the inventory account that contains the product. #[serde(rename="inventoryAccountId")] pub inventory_account_id: Option<String>, /// The country registered for the Merchant Center feed that contains the product. Use an ISO 3166 code to specify a country. #[serde(rename="productCountry")] pub product_country: Option<String>, /// DS product group ID. #[serde(rename="productGroupId")] pub product_group_id: Option<String>, /// The product ID (SKU). #[serde(rename="productId")] pub product_id: Option<String>, /// The language registered for the Merchant Center feed that contains the product. Use an ISO 639 code to specify a language. #[serde(rename="productLanguage")] pub product_language: Option<String>, /// The quantity of this conversion, in millis. #[serde(rename="quantityMillis")] pub quantity_millis: Option<String>, /// The revenue amount of this `TRANSACTION` conversion, in micros (value multiplied by 1000000, no decimal). For example, to specify a revenue value of "10" enter "10000000" (10 million) in your request. #[serde(rename="revenueMicros")] pub revenue_micros: Option<String>, /// The numeric segmentation identifier (for example, DoubleClick Search Floodlight activity ID). #[serde(rename="segmentationId")] pub segmentation_id: Option<String>, /// The friendly segmentation identifier (for example, DoubleClick Search Floodlight activity name). #[serde(rename="segmentationName")] pub segmentation_name: Option<String>, /// The segmentation type of this conversion (for example, `FLOODLIGHT`). #[serde(rename="segmentationType")] pub segmentation_type: Option<String>, /// The state of the conversion, that is, either `ACTIVE` or `REMOVED`. Note: state DELETED is deprecated. pub state: Option<String>, /// The ID of the local store for which the product was advertised. Applicable only when the channel is "`local`". #[serde(rename="storeId")] pub store_id: Option<String>, /// The type of the conversion, that is, either `ACTION` or `TRANSACTION`. An `ACTION` conversion is an action by the user that has no monetarily quantifiable value, while a `TRANSACTION` conversion is an action that does have a monetarily quantifiable value. Examples are email list signups (`ACTION`) versus ecommerce purchases (`TRANSACTION`). #[serde(rename="type")] pub type_: Option<String>, } impl client::Part for Conversion {} /// A list of conversions. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [get conversion](ConversionGetCall) (response) /// * [insert conversion](ConversionInsertCall) (request|response) /// * [update conversion](ConversionUpdateCall) (request|response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ConversionList { /// The conversions being requested. pub conversion: Option<Vec<Conversion>>, /// Identifies this as a ConversionList resource. Value: the fixed string doubleclicksearch#conversionList. pub kind: Option<String>, } impl client::RequestValue for ConversionList {} impl client::ResponseResult for ConversionList {} /// A message containing the custom dimension. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct CustomDimension { /// Custom dimension name. pub name: Option<String>, /// Custom dimension value. pub value: Option<String>, } impl client::Part for CustomDimension {} /// A message containing the custom metric. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct CustomMetric { /// Custom metric name. pub name: Option<String>, /// Custom metric numeric value. pub value: Option<f64>, } impl client::Part for CustomMetric {} /// A DoubleClick Search report. This object contains the report request, some report metadata such as currency code, and the generated report rows or report files. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [generate reports](ReportGenerateCall) (response) /// * [get reports](ReportGetCall) (response) /// * [get file reports](ReportGetFileCall) (none) /// * [request reports](ReportRequestCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Report { /// Asynchronous report only. Contains a list of generated report files once the report has successfully completed. pub files: Option<Vec<ReportFiles>>, /// Asynchronous report only. Id of the report. pub id: Option<String>, /// Asynchronous report only. True if and only if the report has completed successfully and the report files are ready to be downloaded. #[serde(rename="isReportReady")] pub is_report_ready: Option<bool>, /// Identifies this as a Report resource. Value: the fixed string `doubleclicksearch#report`. pub kind: Option<String>, /// The request that created the report. Optional fields not specified in the original request are filled with default values. pub request: Option<ReportRequest>, /// The number of report rows generated by the report, not including headers. #[serde(rename="rowCount")] pub row_count: Option<i32>, /// Synchronous report only. Generated report rows. pub rows: Option<Vec<ReportRow>>, /// The currency code of all monetary values produced in the report, including values that are set by users (e.g., keyword bid settings) and metrics (e.g., cost and revenue). The currency code of a report is determined by the `statisticsCurrency` field of the report request. #[serde(rename="statisticsCurrencyCode")] pub statistics_currency_code: Option<String>, /// If all statistics of the report are sourced from the same time zone, this would be it. Otherwise the field is unset. #[serde(rename="statisticsTimeZone")] pub statistics_time_zone: Option<String>, } impl client::Resource for Report {} impl client::ResponseResult for Report {} /// A request object used to create a DoubleClick Search report. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportApiColumnSpec { /// Name of a DoubleClick Search column to include in the report. #[serde(rename="columnName")] pub column_name: Option<String>, /// Segments a report by a custom dimension. The report must be scoped to an advertiser or lower, and the custom dimension must already be set up in DoubleClick Search. The custom dimension name, which appears in DoubleClick Search, is case sensitive.\ If used in a conversion report, returns the value of the specified custom dimension for the given conversion, if set. This column does not segment the conversion report. #[serde(rename="customDimensionName")] pub custom_dimension_name: Option<String>, /// Name of a custom metric to include in the report. The report must be scoped to an advertiser or lower, and the custom metric must already be set up in DoubleClick Search. The custom metric name, which appears in DoubleClick Search, is case sensitive. #[serde(rename="customMetricName")] pub custom_metric_name: Option<String>, /// Inclusive day in YYYY-MM-DD format. When provided, this overrides the overall time range of the report for this column only. Must be provided together with `startDate`. #[serde(rename="endDate")] pub end_date: Option<String>, /// Synchronous report only. Set to `true` to group by this column. Defaults to `false`. #[serde(rename="groupByColumn")] pub group_by_column: Option<bool>, /// Text used to identify this column in the report output; defaults to `columnName` or `savedColumnName` when not specified. This can be used to prevent collisions between DoubleClick Search columns and saved columns with the same name. #[serde(rename="headerText")] pub header_text: Option<String>, /// The platform that is used to provide data for the custom dimension. Acceptable values are "floodlight". #[serde(rename="platformSource")] pub platform_source: Option<String>, /// Returns metrics only for a specific type of product activity. Accepted values are: - "`sold`": returns metrics only for products that were sold - "`advertised`": returns metrics only for products that were advertised in a Shopping campaign, and that might or might not have been sold #[serde(rename="productReportPerspective")] pub product_report_perspective: Option<String>, /// Name of a saved column to include in the report. The report must be scoped at advertiser or lower, and this saved column must already be created in the DoubleClick Search UI. #[serde(rename="savedColumnName")] pub saved_column_name: Option<String>, /// Inclusive date in YYYY-MM-DD format. When provided, this overrides the overall time range of the report for this column only. Must be provided together with `endDate`. #[serde(rename="startDate")] pub start_date: Option<String>, } impl client::Part for ReportApiColumnSpec {} /// A request object used to create a DoubleClick Search report. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [generate reports](ReportGenerateCall) (request) /// * [request reports](ReportRequestCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportRequest { /// The columns to include in the report. This includes both DoubleClick Search columns and saved columns. For DoubleClick Search columns, only the `columnName` parameter is required. For saved columns only the `savedColumnName` parameter is required. Both `columnName` and `savedColumnName` cannot be set in the same stanza.\ The maximum number of columns per request is 300. pub columns: Option<Vec<ReportApiColumnSpec>>, /// Format that the report should be returned in. Currently `csv` or `tsv` is supported. #[serde(rename="downloadFormat")] pub download_format: Option<String>, /// A list of filters to be applied to the report.\ The maximum number of filters per request is 300. pub filters: Option<Vec<ReportRequestFilters>>, /// Determines if removed entities should be included in the report. Defaults to `false`. Deprecated, please use `includeRemovedEntities` instead. #[serde(rename="includeDeletedEntities")] pub include_deleted_entities: Option<bool>, /// Determines if removed entities should be included in the report. Defaults to `false`. #[serde(rename="includeRemovedEntities")] pub include_removed_entities: Option<bool>, /// Asynchronous report only. The maximum number of rows per report file. A large report is split into many files based on this field. Acceptable values are `1000000` to `100000000`, inclusive. #[serde(rename="maxRowsPerFile")] pub max_rows_per_file: Option<i32>, /// Synchronous report only. A list of columns and directions defining sorting to be performed on the report rows.\ The maximum number of orderings per request is 300. #[serde(rename="orderBy")] pub order_by: Option<Vec<ReportRequestOrderBy>>, /// The reportScope is a set of IDs that are used to determine which subset of entities will be returned in the report. The full lineage of IDs from the lowest scoped level desired up through agency is required. #[serde(rename="reportScope")] pub report_scope: Option<ReportRequestReportScope>, /// Determines the type of rows that are returned in the report. For example, if you specify `reportType: keyword`, each row in the report will contain data about a keyword. See the [Types of Reports](/search-ads/v2/report-types/) reference for the columns that are available for each type. #[serde(rename="reportType")] pub report_type: Option<String>, /// Synchronous report only. The maximum number of rows to return; additional rows are dropped. Acceptable values are `0` to `10000`, inclusive. Defaults to `10000`. #[serde(rename="rowCount")] pub row_count: Option<i32>, /// Synchronous report only. Zero-based index of the first row to return. Acceptable values are `0` to `50000`, inclusive. Defaults to `0`. #[serde(rename="startRow")] pub start_row: Option<i32>, /// Specifies the currency in which monetary will be returned. Possible values are: `usd`, `agency` (valid if the report is scoped to agency or lower), `advertiser` (valid if the report is scoped to * advertiser or lower), or `account` (valid if the report is scoped to engine account or lower). #[serde(rename="statisticsCurrency")] pub statistics_currency: Option<String>, /// If metrics are requested in a report, this argument will be used to restrict the metrics to a specific time range. #[serde(rename="timeRange")] pub time_range: Option<ReportRequestTimeRange>, /// If `true`, the report would only be created if all the requested stat data are sourced from a single timezone. Defaults to `false`. #[serde(rename="verifySingleTimeZone")] pub verify_single_time_zone: Option<bool>, } impl client::RequestValue for ReportRequest {} /// A row in a DoubleClick Search report. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportRow(Option<HashMap<String, String>>); impl client::Part for ReportRow {} /// A saved column /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [list saved columns](SavedColumnListCall) (none) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SavedColumn { /// Identifies this as a SavedColumn resource. Value: the fixed string doubleclicksearch#savedColumn. pub kind: Option<String>, /// The name of the saved column. #[serde(rename="savedColumnName")] pub saved_column_name: Option<String>, /// The type of data this saved column will produce. #[serde(rename="type")] pub type_: Option<String>, } impl client::Resource for SavedColumn {} /// A list of saved columns. Advertisers create saved columns to report on Floodlight activities, Google Analytics goals, or custom KPIs. To request reports with saved columns, you'll need the saved column names that are available from this list. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [list saved columns](SavedColumnListCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SavedColumnList { /// The saved columns being requested. pub items: Option<Vec<SavedColumn>>, /// Identifies this as a SavedColumnList resource. Value: the fixed string doubleclicksearch#savedColumnList. pub kind: Option<String>, } impl client::ResponseResult for SavedColumnList {} /// The request to update availability. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [update availability conversion](ConversionUpdateAvailabilityCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct UpdateAvailabilityRequest { /// The availabilities being requested. pub availabilities: Option<Vec<Availability>>, } impl client::RequestValue for UpdateAvailabilityRequest {} /// The response to a update availability request. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [update availability conversion](ConversionUpdateAvailabilityCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct UpdateAvailabilityResponse { /// The availabilities being returned. pub availabilities: Option<Vec<Availability>>, } impl client::ResponseResult for UpdateAvailabilityResponse {} /// Asynchronous report only. Contains a list of generated report files once the report has successfully completed. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportFiles { /// The size of this report file in bytes. #[serde(rename="byteCount")] pub byte_count: Option<String>, /// Use this url to download the report file. pub url: Option<String>, } impl client::NestedType for ReportFiles {} impl client::Part for ReportFiles {} /// A list of filters to be applied to the report.\ The maximum number of filters per request is 300. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportRequestFilters { /// Column to perform the filter on. This can be a DoubleClick Search column or a saved column. pub column: Option<ReportApiColumnSpec>, /// Operator to use in the filter. See the filter reference for a list of available operators. pub operator: Option<String>, /// A list of values to filter the column value against.\ The maximum number of filter values per request is 300. pub values: Option<Vec<String>>, } impl client::NestedType for ReportRequestFilters {} impl client::Part for ReportRequestFilters {} /// Synchronous report only. A list of columns and directions defining sorting to be performed on the report rows.\ The maximum number of orderings per request is 300. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportRequestOrderBy { /// Column to perform the sort on. This can be a DoubleClick Search-defined column or a saved column. pub column: Option<ReportApiColumnSpec>, /// The sort direction, which is either `ascending` or `descending`. #[serde(rename="sortOrder")] pub sort_order: Option<String>, } impl client::NestedType for ReportRequestOrderBy {} impl client::Part for ReportRequestOrderBy {} /// The reportScope is a set of IDs that are used to determine which subset of entities will be returned in the report. The full lineage of IDs from the lowest scoped level desired up through agency is required. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportRequestReportScope { /// DS ad group ID. #[serde(rename="adGroupId")] pub ad_group_id: Option<String>, /// DS ad ID. #[serde(rename="adId")] pub ad_id: Option<String>, /// DS advertiser ID. #[serde(rename="advertiserId")] pub advertiser_id: Option<String>, /// DS agency ID. #[serde(rename="agencyId")] pub agency_id: Option<String>, /// DS campaign ID. #[serde(rename="campaignId")] pub campaign_id: Option<String>, /// DS engine account ID. #[serde(rename="engineAccountId")] pub engine_account_id: Option<String>, /// DS keyword ID. #[serde(rename="keywordId")] pub keyword_id: Option<String>, } impl client::NestedType for ReportRequestReportScope {} impl client::Part for ReportRequestReportScope {} /// If metrics are requested in a report, this argument will be used to restrict the metrics to a specific time range. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ReportRequestTimeRange { /// Inclusive UTC timestamp in RFC format, e.g., `2013-07-16T10:16:23.555Z`. See additional references on how changed attribute reports work. #[serde(rename="changedAttributesSinceTimestamp")] pub changed_attributes_since_timestamp: Option<String>, /// Inclusive UTC timestamp in RFC format, e.g., `2013-07-16T10:16:23.555Z`. See additional references on how changed metrics reports work. #[serde(rename="changedMetricsSinceTimestamp")] pub changed_metrics_since_timestamp: Option<String>, /// Inclusive date in YYYY-MM-DD format. #[serde(rename="endDate")] pub end_date: Option<String>, /// Inclusive date in YYYY-MM-DD format. #[serde(rename="startDate")] pub start_date: Option<String>, } impl client::NestedType for ReportRequestTimeRange {} impl client::Part for ReportRequestTimeRange {} // ################### // MethodBuilders ### // ################# /// A builder providing access to all methods supported on *conversion* resources. /// It is not used directly, but through the `Doubleclicksearch` hub. /// /// # Example /// /// Instantiate a resource builder /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_doubleclicksearch2 as doubleclicksearch2; /// /// # async fn dox() { /// use std::default::Default; /// use oauth2; /// use doubleclicksearch2::Doubleclicksearch; /// /// let secret: oauth2::ApplicationSecret = Default::default(); /// let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// secret, /// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// ).build().await.unwrap(); /// let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders* /// // like `get(...)`, `insert(...)`, `update(...)` and `update_availability(...)` /// // to build up your call. /// let rb = hub.conversion(); /// # } /// ``` pub struct ConversionMethods<'a> where { hub: &'a Doubleclicksearch<>, } impl<'a> client::MethodsBuilder for ConversionMethods<'a> {} impl<'a> ConversionMethods<'a> { /// Create a builder to help you perform the following task: /// /// Retrieves a list of conversions from a DoubleClick Search engine account. /// /// # Arguments /// /// * `agencyId` - Numeric ID of the agency. /// * `advertiserId` - Numeric ID of the advertiser. /// * `engineAccountId` - Numeric ID of the engine account. /// * `endDate` - Last date (inclusive) on which to retrieve conversions. Format is yyyymmdd. /// * `rowCount` - The number of conversions to return per call. /// * `startDate` - First date (inclusive) on which to retrieve conversions. Format is yyyymmdd. /// * `startRow` - The 0-based starting index for retrieving conversions results. pub fn get(&self, agency_id: &str, advertiser_id: &str, engine_account_id: &str, end_date: i32, row_count: i32, start_date: i32, start_row: u32) -> ConversionGetCall<'a> { ConversionGetCall { hub: self.hub, _agency_id: agency_id.to_string(), _advertiser_id: advertiser_id.to_string(), _engine_account_id: engine_account_id.to_string(), _end_date: end_date, _row_count: row_count, _start_date: start_date, _start_row: start_row, _criterion_id: Default::default(), _campaign_id: Default::default(), _ad_id: Default::default(), _ad_group_id: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Inserts a batch of new conversions into DoubleClick Search. /// /// # Arguments /// /// * `request` - No description provided. pub fn insert(&self, request: ConversionList) -> ConversionInsertCall<'a> { ConversionInsertCall { hub: self.hub, _request: request, _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates a batch of conversions in DoubleClick Search. /// /// # Arguments /// /// * `request` - No description provided. pub fn update(&self, request: ConversionList) -> ConversionUpdateCall<'a> { ConversionUpdateCall { hub: self.hub, _request: request, _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates the availabilities of a batch of floodlight activities in DoubleClick Search. /// /// # Arguments /// /// * `request` - No description provided. pub fn update_availability(&self, request: UpdateAvailabilityRequest) -> ConversionUpdateAvailabilityCall<'a> { ConversionUpdateAvailabilityCall { hub: self.hub, _request: request, _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } } /// A builder providing access to all methods supported on *report* resources. /// It is not used directly, but through the `Doubleclicksearch` hub. /// /// # Example /// /// Instantiate a resource builder /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_doubleclicksearch2 as doubleclicksearch2; /// /// # async fn dox() { /// use std::default::Default; /// use oauth2; /// use doubleclicksearch2::Doubleclicksearch; /// /// let secret: oauth2::ApplicationSecret = Default::default(); /// let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// secret, /// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// ).build().await.unwrap(); /// let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders* /// // like `generate(...)`, `get(...)`, `get_file(...)` and `request(...)` /// // to build up your call. /// let rb = hub.reports(); /// # } /// ``` pub struct ReportMethods<'a> where { hub: &'a Doubleclicksearch<>, } impl<'a> client::MethodsBuilder for ReportMethods<'a> {} impl<'a> ReportMethods<'a> { /// Create a builder to help you perform the following task: /// /// Generates and returns a report immediately. /// /// # Arguments /// /// * `request` - No description provided. pub fn generate(&self, request: ReportRequest) -> ReportGenerateCall<'a> { ReportGenerateCall { hub: self.hub, _request: request, _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Polls for the status of a report request. /// /// # Arguments /// /// * `reportId` - ID of the report request being polled. pub fn get(&self, report_id: &str) -> ReportGetCall<'a> { ReportGetCall { hub: self.hub, _report_id: report_id.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Downloads a report file encoded in UTF-8. /// /// # Arguments /// /// * `reportId` - ID of the report. /// * `reportFragment` - The index of the report fragment to download. pub fn get_file(&self, report_id: &str, report_fragment: i32) -> ReportGetFileCall<'a> { ReportGetFileCall { hub: self.hub, _report_id: report_id.to_string(), _report_fragment: report_fragment, _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Inserts a report request into the reporting system. /// /// # Arguments /// /// * `request` - No description provided. pub fn request(&self, request: ReportRequest) -> ReportRequestCall<'a> { ReportRequestCall { hub: self.hub, _request: request, _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } } /// A builder providing access to all methods supported on *savedColumn* resources. /// It is not used directly, but through the `Doubleclicksearch` hub. /// /// # Example /// /// Instantiate a resource builder /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_doubleclicksearch2 as doubleclicksearch2; /// /// # async fn dox() { /// use std::default::Default; /// use oauth2; /// use doubleclicksearch2::Doubleclicksearch; /// /// let secret: oauth2::ApplicationSecret = Default::default(); /// let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// secret, /// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// ).build().await.unwrap(); /// let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders* /// // like `list(...)` /// // to build up your call. /// let rb = hub.saved_columns(); /// # } /// ``` pub struct SavedColumnMethods<'a> where { hub: &'a Doubleclicksearch<>, } impl<'a> client::MethodsBuilder for SavedColumnMethods<'a> {} impl<'a> SavedColumnMethods<'a> { /// Create a builder to help you perform the following task: /// /// Retrieve the list of saved columns for a specified advertiser. /// /// # Arguments /// /// * `agencyId` - DS ID of the agency. /// * `advertiserId` - DS ID of the advertiser. pub fn list(&self, agency_id: &str, advertiser_id: &str) -> SavedColumnListCall<'a> { SavedColumnListCall { hub: self.hub, _agency_id: agency_id.to_string(), _advertiser_id: advertiser_id.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } } // ################### // CallBuilders ### // ################# /// Retrieves a list of conversions from a DoubleClick Search engine account. /// /// A builder for the *get* method supported by a *conversion* resource. /// It is not used directly, but through a `ConversionMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.conversion().get("agencyId", "advertiserId", "engineAccountId", -59, -52, -20, 46) /// .criterion_id("gubergren") /// .campaign_id("Lorem") /// .ad_id("gubergren") /// .ad_group_id("eos") /// .doit().await; /// # } /// ``` pub struct ConversionGetCall<'a> where { hub: &'a Doubleclicksearch<>, _agency_id: String, _advertiser_id: String, _engine_account_id: String, _end_date: i32, _row_count: i32, _start_date: i32, _start_row: u32, _criterion_id: Option<String>, _campaign_id: Option<String>, _ad_id: Option<String>, _ad_group_id: Option<String>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ConversionGetCall<'a> {} impl<'a> ConversionGetCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ConversionList)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.conversion.get", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(13 + self._additional_params.len()); params.push(("agencyId", self._agency_id.to_string())); params.push(("advertiserId", self._advertiser_id.to_string())); params.push(("engineAccountId", self._engine_account_id.to_string())); params.push(("endDate", self._end_date.to_string())); params.push(("rowCount", self._row_count.to_string())); params.push(("startDate", self._start_date.to_string())); params.push(("startRow", self._start_row.to_string())); if let Some(value) = self._criterion_id { params.push(("criterionId", value.to_string())); } if let Some(value) = self._campaign_id { params.push(("campaignId", value.to_string())); } if let Some(value) = self._ad_id { params.push(("adId", value.to_string())); } if let Some(value) = self._ad_group_id { params.push(("adGroupId", value.to_string())); } for &field in ["alt", "agencyId", "advertiserId", "engineAccountId", "endDate", "rowCount", "startDate", "startRow", "criterionId", "campaignId", "adId", "adGroupId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/agency/{agencyId}/advertiser/{advertiserId}/engine/{engineAccountId}/conversion"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{agencyId}", "agencyId"), ("{advertiserId}", "advertiserId"), ("{engineAccountId}", "engineAccountId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(3); for param_name in ["engineAccountId", "advertiserId", "agencyId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Numeric ID of the agency. /// /// Sets the *agency id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn agency_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._agency_id = new_value.to_string(); self } /// Numeric ID of the advertiser. /// /// Sets the *advertiser id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn advertiser_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._advertiser_id = new_value.to_string(); self } /// Numeric ID of the engine account. /// /// Sets the *engine account id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn engine_account_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._engine_account_id = new_value.to_string(); self } /// Last date (inclusive) on which to retrieve conversions. Format is yyyymmdd. /// /// Sets the *end date* query property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn end_date(mut self, new_value: i32) -> ConversionGetCall<'a> { self._end_date = new_value; self } /// The number of conversions to return per call. /// /// Sets the *row count* query property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn row_count(mut self, new_value: i32) -> ConversionGetCall<'a> { self._row_count = new_value; self } /// First date (inclusive) on which to retrieve conversions. Format is yyyymmdd. /// /// Sets the *start date* query property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn start_date(mut self, new_value: i32) -> ConversionGetCall<'a> { self._start_date = new_value; self } /// The 0-based starting index for retrieving conversions results. /// /// Sets the *start row* query property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn start_row(mut self, new_value: u32) -> ConversionGetCall<'a> { self._start_row = new_value; self } /// Numeric ID of the criterion. /// /// Sets the *criterion id* query property to the given value. pub fn criterion_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._criterion_id = Some(new_value.to_string()); self } /// Numeric ID of the campaign. /// /// Sets the *campaign id* query property to the given value. pub fn campaign_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._campaign_id = Some(new_value.to_string()); self } /// Numeric ID of the ad. /// /// Sets the *ad id* query property to the given value. pub fn ad_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._ad_id = Some(new_value.to_string()); self } /// Numeric ID of the ad group. /// /// Sets the *ad group id* query property to the given value. pub fn ad_group_id(mut self, new_value: &str) -> ConversionGetCall<'a> { self._ad_group_id = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ConversionGetCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ConversionGetCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ConversionGetCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Inserts a batch of new conversions into DoubleClick Search. /// /// A builder for the *insert* method supported by a *conversion* resource. /// It is not used directly, but through a `ConversionMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// use doubleclicksearch2::api::ConversionList; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = ConversionList::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.conversion().insert(req) /// .doit().await; /// # } /// ``` pub struct ConversionInsertCall<'a> where { hub: &'a Doubleclicksearch<>, _request: ConversionList, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ConversionInsertCall<'a> {} impl<'a> ConversionInsertCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ConversionList)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.conversion.insert", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); for &field in ["alt"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/conversion"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type.to_string())) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: ConversionList) -> ConversionInsertCall<'a> { self._request = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ConversionInsertCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ConversionInsertCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ConversionInsertCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates a batch of conversions in DoubleClick Search. /// /// A builder for the *update* method supported by a *conversion* resource. /// It is not used directly, but through a `ConversionMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// use doubleclicksearch2::api::ConversionList; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = ConversionList::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.conversion().update(req) /// .doit().await; /// # } /// ``` pub struct ConversionUpdateCall<'a> where { hub: &'a Doubleclicksearch<>, _request: ConversionList, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ConversionUpdateCall<'a> {} impl<'a> ConversionUpdateCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ConversionList)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.conversion.update", http_method: hyper::Method::PUT }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); for &field in ["alt"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/conversion"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::PUT).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type.to_string())) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: ConversionList) -> ConversionUpdateCall<'a> { self._request = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ConversionUpdateCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ConversionUpdateCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ConversionUpdateCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates the availabilities of a batch of floodlight activities in DoubleClick Search. /// /// A builder for the *updateAvailability* method supported by a *conversion* resource. /// It is not used directly, but through a `ConversionMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// use doubleclicksearch2::api::UpdateAvailabilityRequest; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = UpdateAvailabilityRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.conversion().update_availability(req) /// .doit().await; /// # } /// ``` pub struct ConversionUpdateAvailabilityCall<'a> where { hub: &'a Doubleclicksearch<>, _request: UpdateAvailabilityRequest, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ConversionUpdateAvailabilityCall<'a> {} impl<'a> ConversionUpdateAvailabilityCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, UpdateAvailabilityResponse)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.conversion.updateAvailability", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); for &field in ["alt"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/conversion/updateAvailability"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type.to_string())) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: UpdateAvailabilityRequest) -> ConversionUpdateAvailabilityCall<'a> { self._request = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ConversionUpdateAvailabilityCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ConversionUpdateAvailabilityCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ConversionUpdateAvailabilityCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Generates and returns a report immediately. /// /// A builder for the *generate* method supported by a *report* resource. /// It is not used directly, but through a `ReportMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// use doubleclicksearch2::api::ReportRequest; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = ReportRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.reports().generate(req) /// .doit().await; /// # } /// ``` pub struct ReportGenerateCall<'a> where { hub: &'a Doubleclicksearch<>, _request: ReportRequest, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ReportGenerateCall<'a> {} impl<'a> ReportGenerateCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Report)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.reports.generate", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); for &field in ["alt"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/reports/generate"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type.to_string())) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: ReportRequest) -> ReportGenerateCall<'a> { self._request = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ReportGenerateCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ReportGenerateCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ReportGenerateCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Polls for the status of a report request. /// /// A builder for the *get* method supported by a *report* resource. /// It is not used directly, but through a `ReportMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.reports().get("reportId") /// .doit().await; /// # } /// ``` pub struct ReportGetCall<'a> where { hub: &'a Doubleclicksearch<>, _report_id: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ReportGetCall<'a> {} impl<'a> ReportGetCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Report)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.reports.get", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("reportId", self._report_id.to_string())); for &field in ["alt", "reportId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/reports/{reportId}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{reportId}", "reportId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["reportId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// ID of the report request being polled. /// /// Sets the *report id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn report_id(mut self, new_value: &str) -> ReportGetCall<'a> { self._report_id = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ReportGetCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ReportGetCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ReportGetCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Downloads a report file encoded in UTF-8. /// /// This method supports **media download**. To enable it, adjust the builder like this: /// `.param("alt", "media")`. /// /// A builder for the *getFile* method supported by a *report* resource. /// It is not used directly, but through a `ReportMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.reports().get_file("reportId", -55) /// .doit().await; /// # } /// ``` pub struct ReportGetFileCall<'a> where { hub: &'a Doubleclicksearch<>, _report_id: String, _report_fragment: i32, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ReportGetFileCall<'a> {} impl<'a> ReportGetFileCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<hyper::Response<hyper::body::Body>> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.reports.getFile", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("reportId", self._report_id.to_string())); params.push(("reportFragment", self._report_fragment.to_string())); for &field in ["reportId", "reportFragment"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/reports/{reportId}/files/{reportFragment}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{reportId}", "reportId"), ("{reportFragment}", "reportFragment")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2); for param_name in ["reportFragment", "reportId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = res; dlg.finished(true); return Ok(result_value) } } } } /// ID of the report. /// /// Sets the *report id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn report_id(mut self, new_value: &str) -> ReportGetFileCall<'a> { self._report_id = new_value.to_string(); self } /// The index of the report fragment to download. /// /// Sets the *report fragment* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn report_fragment(mut self, new_value: i32) -> ReportGetFileCall<'a> { self._report_fragment = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ReportGetFileCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ReportGetFileCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ReportGetFileCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Inserts a report request into the reporting system. /// /// A builder for the *request* method supported by a *report* resource. /// It is not used directly, but through a `ReportMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// use doubleclicksearch2::api::ReportRequest; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = ReportRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.reports().request(req) /// .doit().await; /// # } /// ``` pub struct ReportRequestCall<'a> where { hub: &'a Doubleclicksearch<>, _request: ReportRequest, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for ReportRequestCall<'a> {} impl<'a> ReportRequestCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Report)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.reports.request", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); for &field in ["alt"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/reports"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type.to_string())) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: ReportRequest) -> ReportRequestCall<'a> { self._request = new_value; self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ReportRequestCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ReportRequestCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ReportRequestCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Retrieve the list of saved columns for a specified advertiser. /// /// A builder for the *list* method supported by a *savedColumn* resource. /// It is not used directly, but through a `SavedColumnMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_doubleclicksearch2 as doubleclicksearch2; /// # async fn dox() { /// # use std::default::Default; /// # use oauth2; /// # use doubleclicksearch2::Doubleclicksearch; /// /// # let secret: oauth2::ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = Doubleclicksearch::new(hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.saved_columns().list("agencyId", "advertiserId") /// .doit().await; /// # } /// ``` pub struct SavedColumnListCall<'a> where { hub: &'a Doubleclicksearch<>, _agency_id: String, _advertiser_id: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a> client::CallBuilder for SavedColumnListCall<'a> {} impl<'a> SavedColumnListCall<'a> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, SavedColumnList)> { use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "doubleclicksearch.savedColumns.list", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("agencyId", self._agency_id.to_string())); params.push(("advertiserId", self._advertiser_id.to_string())); for &field in ["alt", "agencyId", "advertiserId"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "doubleclicksearch/v2/agency/{agencyId}/advertiser/{advertiserId}/savedcolumns"; if self._scopes.len() == 0 { self._scopes.insert(Scope::Full.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{agencyId}", "agencyId"), ("{advertiserId}", "advertiserId")].iter() { let mut replace_with: Option<&str> = None; for &(name, ref value) in params.iter() { if name == param_name { replace_with = Some(value); break; } } url = url.replace(find_this, replace_with.expect("to find substitution value in params")); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(2); for param_name in ["advertiserId", "agencyId"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let token = match self.hub.auth.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let client = &self.hub.client; dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { if !res.status().is_success() { let res_body_string = client::get_body_as_string(res.body_mut()).await; let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&res, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(res)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { let res_body_string = client::get_body_as_string(res.body_mut()).await; match json::from_str(&res_body_string) { Ok(decoded) => (res, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// DS ID of the agency. /// /// Sets the *agency id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn agency_id(mut self, new_value: &str) -> SavedColumnListCall<'a> { self._agency_id = new_value.to_string(); self } /// DS ID of the advertiser. /// /// Sets the *advertiser id* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn advertiser_id(mut self, new_value: &str) -> SavedColumnListCall<'a> { self._advertiser_id = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> SavedColumnListCall<'a> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> SavedColumnListCall<'a> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::Full`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> SavedColumnListCall<'a> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } }
44.449297
429
0.594431
87df00fcce58736421c9d5096bc574777fef5561
657
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // error-pattern:index out of bounds: the len is 1024 but the index is -1 fn main() { let v = vec::from_fn(1024u, {|n| n}); // this should trip a bounds check error!(v[-1i8]); }
38.647059
73
0.70624
91722a4b62a2e86ca5c0f5712cea4a5a6caee99e
7,024
use crate::iter::adapters::{zip::try_get_unchecked, SourceIter, TrustedRandomAccess}; use crate::iter::{FusedIterator, InPlaceIterable, TrustedLen}; use crate::ops::Try; /// An iterator that yields the current count and the element during iteration. /// /// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its /// documentation for more. /// /// [`enumerate`]: Iterator::enumerate /// [`Iterator`]: trait.Iterator.html #[derive(Clone, Debug)] #[must_use = "iterators are lazy and do nothing unless consumed"] #[stable(feature = "rust1", since = "1.0.0")] pub struct Enumerate<I> { iter: I, count: usize, } impl<I> Enumerate<I> { pub(in crate::iter) fn new(iter: I) -> Enumerate<I> { Enumerate { iter, count: 0 } } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> Iterator for Enumerate<I> where I: Iterator, { type Item = (usize, <I as Iterator>::Item); /// # Overflow Behavior /// /// The method does no guarding against overflows, so enumerating more than /// `usize::MAX` elements either produces the wrong result or panics. If /// debug assertions are enabled, a panic is guaranteed. /// /// # Panics /// /// Might panic if the index of the element overflows a `usize`. #[inline] #[rustc_inherit_overflow_checks] fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> { let a = self.iter.next()?; let i = self.count; self.count += 1; Some((i, a)) } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } #[inline] #[rustc_inherit_overflow_checks] fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { let a = self.iter.nth(n)?; let i = self.count + n; self.count = i + 1; Some((i, a)) } #[inline] fn count(self) -> usize { self.iter.count() } #[inline] fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Output = Acc>, { #[inline] fn enumerate<'a, T, Acc, R>( count: &'a mut usize, mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a, ) -> impl FnMut(Acc, T) -> R + 'a { #[rustc_inherit_overflow_checks] move |acc, item| { let acc = fold(acc, (*count, item)); *count += 1; acc } } self.iter.try_fold(init, enumerate(&mut self.count, fold)) } #[inline] fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc where Fold: FnMut(Acc, Self::Item) -> Acc, { #[inline] fn enumerate<T, Acc>( mut count: usize, mut fold: impl FnMut(Acc, (usize, T)) -> Acc, ) -> impl FnMut(Acc, T) -> Acc { #[rustc_inherit_overflow_checks] move |acc, item| { let acc = fold(acc, (count, item)); count += 1; acc } } self.iter.fold(init, enumerate(self.count, fold)) } #[rustc_inherit_overflow_checks] unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> <Self as Iterator>::Item where Self: TrustedRandomAccess, { // SAFETY: the caller must uphold the contract for // `Iterator::__iterator_get_unchecked`. let value = unsafe { try_get_unchecked(&mut self.iter, idx) }; (self.count + idx, value) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> DoubleEndedIterator for Enumerate<I> where I: ExactSizeIterator + DoubleEndedIterator, { #[inline] fn next_back(&mut self) -> Option<(usize, <I as Iterator>::Item)> { let a = self.iter.next_back()?; let len = self.iter.len(); // Can safely add, `ExactSizeIterator` promises that the number of // elements fits into a `usize`. Some((self.count + len, a)) } #[inline] fn nth_back(&mut self, n: usize) -> Option<(usize, <I as Iterator>::Item)> { let a = self.iter.nth_back(n)?; let len = self.iter.len(); // Can safely add, `ExactSizeIterator` promises that the number of // elements fits into a `usize`. Some((self.count + len, a)) } #[inline] fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R where Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try<Output = Acc>, { // Can safely add and subtract the count, as `ExactSizeIterator` promises // that the number of elements fits into a `usize`. fn enumerate<T, Acc, R>( mut count: usize, mut fold: impl FnMut(Acc, (usize, T)) -> R, ) -> impl FnMut(Acc, T) -> R { move |acc, item| { count -= 1; fold(acc, (count, item)) } } let count = self.count + self.iter.len(); self.iter.try_rfold(init, enumerate(count, fold)) } #[inline] fn rfold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc where Fold: FnMut(Acc, Self::Item) -> Acc, { // Can safely add and subtract the count, as `ExactSizeIterator` promises // that the number of elements fits into a `usize`. fn enumerate<T, Acc>( mut count: usize, mut fold: impl FnMut(Acc, (usize, T)) -> Acc, ) -> impl FnMut(Acc, T) -> Acc { move |acc, item| { count -= 1; fold(acc, (count, item)) } } let count = self.count + self.iter.len(); self.iter.rfold(init, enumerate(count, fold)) } } #[stable(feature = "rust1", since = "1.0.0")] impl<I> ExactSizeIterator for Enumerate<I> where I: ExactSizeIterator, { fn len(&self) -> usize { self.iter.len() } fn is_empty(&self) -> bool { self.iter.is_empty() } } #[doc(hidden)] #[unstable(feature = "trusted_random_access", issue = "none")] unsafe impl<I> TrustedRandomAccess for Enumerate<I> where I: TrustedRandomAccess, { const MAY_HAVE_SIDE_EFFECT: bool = I::MAY_HAVE_SIDE_EFFECT; } #[stable(feature = "fused", since = "1.26.0")] impl<I> FusedIterator for Enumerate<I> where I: FusedIterator {} #[unstable(feature = "trusted_len", issue = "37572")] unsafe impl<I> TrustedLen for Enumerate<I> where I: TrustedLen {} #[unstable(issue = "none", feature = "inplace_iteration")] unsafe impl<S: Iterator, I: Iterator> SourceIter for Enumerate<I> where I: SourceIter<Source = S>, { type Source = S; #[inline] unsafe fn as_inner(&mut self) -> &mut S { // SAFETY: unsafe function forwarding to unsafe function with the same requirements unsafe { SourceIter::as_inner(&mut self.iter) } } } #[unstable(issue = "none", feature = "inplace_iteration")] unsafe impl<I: InPlaceIterable> InPlaceIterable for Enumerate<I> {}
29.512605
91
0.563497
693a87f552c1fd59752e7074e11e3e43f297bfe5
5,015
use futures::{Stream, StreamExt}; use std::collections::HashMap; use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }; use tokio::sync::mpsc; use warp::{sse::ServerSentEvent, Filter}; #[tokio::main] async fn main() { pretty_env_logger::init(); // Keep track of all connected users, key is usize, value // is an event stream sender. let users = Arc::new(Mutex::new(HashMap::new())); // Turn our "state" into a new Filter... let users = warp::any().map(move || users.clone()); // POST /chat -> send message let chat_send = warp::path("chat") .and(warp::post()) .and(warp::path::param::<usize>()) .and(warp::body::content_length_limit(500)) .and( warp::body::bytes().and_then(|body: bytes::Bytes| async move { std::str::from_utf8(&body) .map(String::from) .map_err(|_e| warp::reject::custom(NotUtf8)) }), ) .and(users.clone()) .map(|my_id, msg, users| { user_message(my_id, msg, &users); warp::reply() }); // GET /chat -> messages stream let chat_recv = warp::path("chat").and(warp::get()).and(users).map(|users| { // reply using server-sent events let stream = user_connected(users); warp::sse::reply(warp::sse::keep_alive().stream(stream)) }); // GET / -> index html let index = warp::path::end().map(|| { warp::http::Response::builder() .header("content-type", "text/html; charset=utf-8") .body(INDEX_HTML) }); let routes = index.or(chat_recv).or(chat_send); warp::serve(routes).run(([127, 0, 0, 1], 3030)).await; } /// Our global unique user id counter. static NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1); /// Message variants. #[derive(Debug)] enum Message { UserId(usize), Reply(String), } #[derive(Debug)] struct NotUtf8; impl warp::reject::Reject for NotUtf8 {} /// Our state of currently connected users. /// /// - Key is their id /// - Value is a sender of `Message` type Users = Arc<Mutex<HashMap<usize, mpsc::UnboundedSender<Message>>>>; fn user_connected( users: Users, ) -> impl Stream<Item = Result<impl ServerSentEvent + Send + 'static, warp::Error>> + Send + 'static { // Use a counter to assign a new unique ID for this user. let my_id = NEXT_USER_ID.fetch_add(1, Ordering::Relaxed); eprintln!("new chat user: {}", my_id); // Use an unbounded channel to handle buffering and flushing of messages // to the event source... let (tx, rx) = mpsc::unbounded_channel(); tx.send(Message::UserId(my_id)) // rx is right above, so this cannot fail .unwrap(); // Save the sender in our list of connected users. users.lock().unwrap().insert(my_id, tx); // Convert messages into Server-Sent Events and return resulting stream. rx.map(|msg| match msg { Message::UserId(my_id) => Ok((warp::sse::event("user"), warp::sse::data(my_id)).into_a()), Message::Reply(reply) => Ok(warp::sse::data(reply).into_b()), }) } fn user_message(my_id: usize, msg: String, users: &Users) { let new_msg = format!("<User#{}>: {}", my_id, msg); // New message from this user, send it to everyone else (except same uid)... // // We use `retain` instead of a for loop so that we can reap any user that // appears to have disconnected. users.lock().unwrap().retain(|uid, tx| { if my_id == *uid { // don't send to same user, but do retain true } else { // If not `is_ok`, the SSE stream is gone, and so don't retain tx.send(Message::Reply(new_msg.clone())).is_ok() } }); } static INDEX_HTML: &str = r#" <!DOCTYPE html> <html> <head> <title>Warp Chat</title> </head> <body> <h1>warp chat</h1> <div id="chat"> <p><em>Connecting...</em></p> </div> <input type="text" id="text" /> <button type="button" id="send">Send</button> <script type="text/javascript"> var uri = 'http://' + location.host + '/chat'; var sse = new EventSource(uri); function message(data) { var line = document.createElement('p'); line.innerText = data; chat.appendChild(line); } sse.onopen = function() { chat.innerHTML = "<p><em>Connected!</em></p>"; } var user_id; sse.addEventListener("user", function(msg) { user_id = msg.data; }); sse.onmessage = function(msg) { message(msg.data); }; send.onclick = function() { var msg = text.value; var xhr = new XMLHttpRequest(); xhr.open("POST", uri + '/' + user_id, true); xhr.send(msg); text.value = ''; message('<You>: ' + msg); }; </script> </body> </html> "#;
30.393939
100
0.560718
2658ff103b8928778172e59bc8d0840b0b7a8c15
702
//! Main binary entry point for openapi_v3 implementation. #![allow(missing_docs)] use clap::{App, Arg}; mod server; /// Create custom server, wire it to the autogenerated router, /// and pass it to the web server. <<<<<<< HEAD #[tokio::main] async fn main() { ======= fn main() { >>>>>>> ooof env_logger::init(); let matches = App::new("server") .arg(Arg::with_name("https") .long("https") .help("Whether to use HTTPS or not")) .get_matches(); let addr = "127.0.0.1:8080"; <<<<<<< HEAD server::create(addr, matches.is_present("https")).await; ======= hyper::rt::run(server::create(addr, matches.is_present("https"))); >>>>>>> ooof }
20.647059
70
0.582621
e2c43f91cfce5afe8af4e94b97e976c844de1e4d
1,919
#[macro_use] extern crate log; use std::sync::{Arc, Mutex}; use log::{Level, LevelFilter, Log, Record, Metadata}; use log::MaxLevelFilter; #[cfg(feature = "use_std")] use log::set_boxed_logger; #[cfg(not(feature = "use_std"))] fn set_boxed_logger<M>(make_logger: M) -> Result<(), log::SetLoggerError> where M: FnOnce(MaxLevelFilter) -> Box<Log> { unsafe { log::set_logger(|x| &*Box::into_raw(make_logger(x))) } } struct State { last_log: Mutex<Option<Level>>, filter: MaxLevelFilter, } struct Logger(Arc<State>); impl Log for Logger { fn enabled(&self, _: &Metadata) -> bool { true } fn log(&self, record: &Record) { *self.0.last_log.lock().unwrap() = Some(record.level()); } fn flush(&self) {} } fn main() { let mut a = None; set_boxed_logger(|max| { let me = Arc::new(State { last_log: Mutex::new(None), filter: max, }); a = Some(me.clone()); Box::new(Logger(me)) }).unwrap(); let a = a.unwrap(); test(&a, LevelFilter::Off); test(&a, LevelFilter::Error); test(&a, LevelFilter::Warn); test(&a, LevelFilter::Info); test(&a, LevelFilter::Debug); test(&a, LevelFilter::Trace); } fn test(a: &State, filter: LevelFilter) { a.filter.set(filter); error!(""); last(&a, t(Level::Error, filter)); warn!(""); last(&a, t(Level::Warn, filter)); info!(""); last(&a, t(Level::Info, filter)); debug!(""); if cfg!(debug_assertions) { last(&a, t(Level::Debug, filter)); } else { last(&a, None); } trace!(""); last(&a, None); fn t(lvl: Level, filter: LevelFilter) -> Option<Level> { if lvl <= filter {Some(lvl)} else {None} } } fn last(state: &State, expected: Option<Level>) { let mut lvl = state.last_log.lock().unwrap(); assert_eq!(*lvl, expected); *lvl = None; }
22.313953
73
0.563314
91bddc8506df16d2c7b57b2fe0e1d0ddfec439cd
301
// #![cfg(feature = "alloc")] // #[global_allocator] // static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; // #![feature(result_map_or_else)] #![deny(bare_trait_objects)] #[macro_use] extern crate lazy_static; pub mod expressions; pub mod ffi; mod functions; mod parsing;
20.066667
66
0.69103
f4f410642b5997c00513c349917c08a2a221d5cb
11,882
use std::collections::HashMap; use std::collections::hash_map::Entry; use std::collections::hash_map::RandomState; use std::hash::{BuildHasher, Hash}; use std::iter::{Extend, FromIterator}; use std::default::Default; use std::iter::Iterator; use std::collections::hash_map::IntoIter; #[derive(Debug, Clone, Copy)] struct Data { pub parent: usize, pub rank: u32 } impl Data { pub fn new(id: usize) -> Data { Data { parent: id, rank: 0 } } } /* struct FieldIter<'a, T> where T: 'a + Eq + Hash { field: &'a T } impl<'a, T> FieldIter<'a, T> where T: 'a + Eq + Hash { pub fn new(field: &'a T) -> Self { Self{field} } } */ pub struct SetIter<'a, T> where T:'a + Eq + Hash{ sets: IntoIter<usize, Vec<&'a T>> } impl<'a, T> Iterator for SetIter<'a, T> where T:'a + Eq + Hash { type Item = ::std::vec::IntoIter<&'a T>; fn next<'b>(&'b mut self) -> Option<<Self as Iterator>::Item> { match self.sets.next() { Option::None => None, Option::Some((_key, vect)) => Some(vect.into_iter()) } } } impl<'a, T> SetIter<'a, T> where T:'a+Eq+Hash { pub fn new(sets: IntoIter<usize, Vec<&'a T>>) -> Self { Self{ sets } } } /** Implementation of disjoint set data structure with path compression and union by rank. This data structure is also known as union-find or merge-find set. It tracks a set of elements partitioned into a number of disjoint (non-overlapping) subsets. **More:** <https://en.wikipedia.org/wiki/Disjoint-set_data_structure> # Complexity - Create new subset complexity: O(1) - Union complexity: O(α(n)) ≈ O(1) - Search complexity: O(α(n)) ≈ O(1) - Memory complexity: O(n) where α() - very slowly growing function. α(n) < 4 for any reasonable n. Therefore O(α(n)) ≈ O(1). # Example ``` extern crate algorithm; use algorithm::collections::DisjointSet; use std::iter::FromIterator; fn main(){ let arr = [1,2,3,4,5,6,7]; //creates 7 disjoint sets let mut ds: DisjointSet<i32> = DisjointSet::from_iter(&arr); //you can join existing sets ds.union(1, 2); //or add elements to existing sets ds.union(1,8); //you can check if elements are in the same set assert!(ds.in_union(&2,&8)); assert!(!ds.in_union(&3,&4)); //or if the element has been previously added to the set assert!(ds.contains(&7)); assert!(!ds.contains(&10)); //finally, you can access sets and content of sets using iterator for set in &mut ds { println!("A new set:"); for elem in set.into_iter() { print!("{}, ", elem); } println!(""); } } ``` */ #[derive(Clone, Debug)] pub struct DisjointSet<T, S=RandomState> where T: Eq+Hash , S: BuildHasher{ ids: HashMap<T, usize, S>, data_by_id: Vec<Data> } impl<T, S> DisjointSet<T, S> where T:Eq + Hash , S:BuildHasher{ /// Creates a new, empty `DisjointSet`. pub fn new() -> Self where S: Default{ Default::default() } /** Creates an empty DisjointSet with the specified capacity. The DisjointSet will be able to hold at least capacity elements without reallocating. If capacity is 0, the DisjointSet will not allocate. */ pub fn with_capacity(capacity: usize) -> Self where S: Default{ Self { ids: HashMap::with_capacity_and_hasher(capacity, Default::default()), data_by_id: Vec::with_capacity(capacity) } } /** Creates an empty DisjointSet which will use the given hash builder to hash keys. The created set has the default initial capacity. */ pub fn with_hasher(hash_builder: S) -> Self { Self { ids: HashMap::with_hasher(hash_builder), data_by_id: Vec::new() } } /** Creates an empty Counter with the specified capacity, using hash_builder to hash the keys. The Counter will be able to hold at least capacity elements without reallocating. If capacity is 0, the Counter will not allocate. */ pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { Self { ids: HashMap::with_capacity_and_hasher(capacity, hash_builder), data_by_id: Vec::with_capacity(capacity) } } /** Crates a subset with the provided element. If the given element already exists, nothing happens. **Complexity:**: O(1) */ pub fn make_set(&mut self, val: T) { self.make_or_get_set(val); } /** Joins two subsets using one element from both subsets. If the provided elements do not exist in the collection when this function is called, a new subset with one element gets created prior to joining. **Complexity:** O(α(n)) ≈ O(1) */ pub fn union(&mut self, a :T, b: T) { let a = self.make_or_get_set(a); let b = self.make_or_get_set(b); let mut a_root = Self::find_with_path_compression(&mut self.data_by_id, a); let mut b_root = Self::find_with_path_compression(&mut self.data_by_id, b); if a_root == b_root { return; } if self.data_by_id[a_root].rank < self.data_by_id[b_root].rank { let tmp = a_root; a_root = b_root; b_root = tmp; } self.data_by_id[b_root].parent = a_root; if self.data_by_id[a_root].rank == self.data_by_id[b_root].rank { self.data_by_id[a_root].rank += 1; } } /** Check if the given element has been added to this collection. **Complexity:** O(α(n)) ≈ O(1) */ pub fn contains(&self, val: &T) -> bool { self.ids.contains_key(val) } /** Checks if the given two elements are in the same subset. **Complexity:** O(α(n)) ≈ O(1) */ pub fn in_union(&mut self, a :&T, b: &T) -> bool{ let a = match self.ids.get(a) { Option::None => return false, Option::Some(id) => *id }; let b = match self.ids.get(b) { Option::None => return false, Option::Some(id) => *id }; Self::find_with_path_compression(&mut self.data_by_id, a) == Self::find_with_path_compression(&mut self.data_by_id, b) } pub fn is_empty(&self) -> bool { self.ids.is_empty() } pub fn len(&self) -> usize { self.ids.len() } pub fn clear(&mut self) { self.ids.clear(); self.data_by_id.clear() } pub fn reserve(&mut self, additional: usize) { self.data_by_id.reserve(additional); self.ids.reserve(additional); } fn make_or_get_set(&mut self, val: T) -> usize{ let next_id = self.ids.len(); //insert but do not override existing one match self.ids.entry(val) { Entry::Vacant(entry) => { entry.insert(next_id); //make element its own parent self.data_by_id.push(Data::new(next_id)); next_id }, Entry::Occupied(entry) => *entry.get() } } fn find_with_path_compression(data_by_id: &mut Vec<Data>, id: usize) -> usize{ let mut parent = data_by_id[id].parent; if parent != id{ parent = Self::find_with_path_compression(data_by_id, parent); data_by_id[id].parent = parent; } parent } fn build_sets<'a>(&'a mut self) -> HashMap<usize, Vec<&'a T>> { let mut map : HashMap<usize, Vec<&'a T>> = HashMap::new(); for (ref key, ref val) in self.ids.iter(){ let root = Self::find_with_path_compression(&mut self.data_by_id, **val); map.entry(root).or_insert_with(|| Vec::new()).push(key); } map } } impl<T, S> Default for DisjointSet<T, S> where T: Eq+Hash , S: BuildHasher + Default { fn default() -> Self { Self{ ids: HashMap::default(), data_by_id: Vec::default() } } } impl<T, S> FromIterator<T> for DisjointSet<T, S> where T: Hash + Eq, S: BuildHasher + Default, { /** Creates DisjointSet from provided iterator. Elements become a new subsets with just one element (equivalent to calling make_set() multiple times). */ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { let iter = iter.into_iter(); let mut ds = Self::with_capacity(iter.size_hint().0); for val in iter { ds.make_set(val); } ds } } impl<'a, T, S> FromIterator<&'a T> for DisjointSet<T, S> where T: Hash + Eq + Clone, S: BuildHasher + Default, { /** Creates DisjointSet from provided iterator. Elements become a new subsets with just one element (equivalent to calling make_set() multiple times). */ fn from_iter<I: IntoIterator<Item = &'a T>>(iter: I) -> Self { let iter = iter.into_iter(); let mut ds = Self::with_capacity(iter.size_hint().0); for val in iter.into_iter().map(|ref val| (*val).clone()) { ds.make_set(val) } ds } } impl<T, S> Extend<T> for DisjointSet<T, S> where T: Hash + Eq, S: BuildHasher, { /** Extends collection using the provided iterator. Elements become a new subsets with just one element (equivalent to calling make_set() multiple times). */ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) { let iter = iter.into_iter(); self.reserve(iter.size_hint().0); for val in iter { self.make_set(val) } } } impl<'a, T, S> Extend<&'a T> for DisjointSet<T, S> where T: Hash + Eq + Copy, S: BuildHasher, { /** Extends collection using the provided iterator. Elements become a new subsets with just one element (equivalent to calling make_set() multiple times). */ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { let iter = iter.into_iter(); self.reserve(iter.size_hint().0); for val in iter.map(|&val| val.clone()) { self.make_set(val); } } } impl<'a, T, S> IntoIterator for &'a mut DisjointSet<T, S> where T: Hash + Eq, S: BuildHasher{ type Item = ::std::vec::IntoIter<&'a T>; type IntoIter = SetIter<'a, T>; fn into_iter(self) -> <Self as IntoIterator>::IntoIter { SetIter::new(self.build_sets().into_iter()) } } #[cfg(test)] mod tests { use super::*; #[test] fn create() { let arr = [1,2,3]; let ds: DisjointSet<i32> = DisjointSet::from_iter(&arr); assert_eq!(ds.len(), 3); } #[test] fn contains() { let arr = [1,2,3]; let ds: DisjointSet<i32> = DisjointSet::from_iter(&arr); assert!(ds.contains(&1)); assert!(ds.contains(&2)); assert!(ds.contains(&3)); assert!(!ds.contains(&4)); assert!(!ds.contains(&0)); } #[test] fn make_set(){ let mut ds: DisjointSet<i32> = DisjointSet::new(); ds.make_set(3); ds.make_set(4); assert!(ds.contains(&3)); assert!(ds.contains(&4)); assert!(!ds.contains(&0)); ds.make_set(4); assert!(ds.contains(&4)); } #[test] fn union(){ let mut ds: DisjointSet<i32> = DisjointSet::new(); ds.union(3,4); ds.union(5,6); //union() should create sets: assert!(ds.contains(&3)); assert!(ds.contains(&4)); assert!(ds.contains(&5)); assert!(ds.contains(&6)); //with valid relations: assert!(ds.in_union(&3,&4)); assert!(ds.in_union(&5,&6)); assert!(!ds.in_union(&4,&5)); ds.union(4,5); assert!(ds.in_union(&4, &5)); assert!(ds.in_union(&3, &6)); } }
26.522321
126
0.573304
26c7885ad797770e0b147d2f5ce1e9b682e0a546
677
extern crate timely; extern crate timely_communication; use timely::dataflow::operators::*; use timely_communication::Configuration; use timely::progress::timestamp::RootTimestamp; fn main() { timely::execute(Configuration::Thread, |worker| { let (mut input, mut cap) = worker.dataflow(|scope| { let (input, stream) = scope.new_unordered_input(); stream.inspect_batch(|t, x| println!("{:?} -> {:?}", t, x)); input }); for round in 0..10 { input.session(cap.clone()).give(round); cap = cap.delayed(&RootTimestamp::new(round + 1)); worker.step(); } }).unwrap(); }
29.434783
72
0.583456
ff1732e64edfd1ef4f2110ef0d002d5839c977df
6,983
use std::sync::Arc; use cursive::align::HAlign; use cursive::event::{Event, EventResult, MouseButton, MouseEvent}; use cursive::theme::{ColorStyle, ColorType, PaletteColor}; use cursive::traits::View; use cursive::vec::Vec2; use cursive::Printer; use unicode_width::UnicodeWidthStr; use library::Library; use queue::{Queue, RepeatSetting}; use spotify::{PlayerEvent, Spotify}; pub struct StatusBar { queue: Arc<Queue>, spotify: Arc<Spotify>, library: Arc<Library>, last_size: Vec2, use_nerdfont: bool, } impl StatusBar { pub fn new(queue: Arc<Queue>, library: Arc<Library>, use_nerdfont: bool) -> StatusBar { let spotify = queue.get_spotify(); StatusBar { queue, spotify, library, last_size: Vec2::new(0, 0), use_nerdfont, } } } impl View for StatusBar { fn draw(&self, printer: &Printer<'_, '_>) { if printer.size.x == 0 { return; } let style_bar = ColorStyle::new( ColorType::Color(*printer.theme.palette.custom("statusbar_progress").unwrap()), ColorType::Palette(PaletteColor::Background), ); let style_bar_bg = ColorStyle::new( ColorType::Color( *printer .theme .palette .custom("statusbar_progress_bg") .unwrap(), ), ColorType::Palette(PaletteColor::Background), ); let style = ColorStyle::new( ColorType::Color(*printer.theme.palette.custom("statusbar").unwrap()), ColorType::Color(*printer.theme.palette.custom("statusbar_bg").unwrap()), ); printer.print( (0, 0), &vec![' '; printer.size.x].into_iter().collect::<String>(), ); printer.with_color(style, |printer| { printer.print( (0, 1), &vec![' '; printer.size.x].into_iter().collect::<String>(), ); }); let state_icon = if self.use_nerdfont { match self.spotify.get_current_status() { PlayerEvent::Playing => "\u{f909} ", PlayerEvent::Paused => "\u{f8e3} ", PlayerEvent::Stopped | PlayerEvent::FinishedTrack => "\u{f9da} ", } } else { match self.spotify.get_current_status() { PlayerEvent::Playing => "▶ ", PlayerEvent::Paused => "▮▮", PlayerEvent::Stopped | PlayerEvent::FinishedTrack => "◼ ", } } .to_string(); printer.with_color(style, |printer| { printer.print((1, 1), &state_icon); }); let updating = if !*self.library.is_done.read().unwrap() { if self.use_nerdfont { "\u{f9e5} " } else { "[U] " } } else { "" }; let repeat = if self.use_nerdfont { match self.queue.get_repeat() { RepeatSetting::None => "", RepeatSetting::RepeatPlaylist => "\u{f955} ", RepeatSetting::RepeatTrack => "\u{f957} ", } } else { match self.queue.get_repeat() { RepeatSetting::None => "", RepeatSetting::RepeatPlaylist => "[R] ", RepeatSetting::RepeatTrack => "[R1] ", } }; let shuffle = if self.queue.get_shuffle() { if self.use_nerdfont { "\u{f99c} " } else { "[Z] " } } else { "" }; let volume = format!( " [{}%]", (self.spotify.volume() as f64 / 0xffff as f64 * 100.0) as u16 ); printer.with_color(style_bar_bg, |printer| { printer.print((0, 0), &"┉".repeat(printer.size.x)); }); if let Some(ref t) = self.queue.get_current() { let elapsed = self.spotify.get_current_progress(); let elapsed_ms = elapsed.as_millis() as u32; let formatted_elapsed = format!( "{:02}:{:02}", elapsed.as_secs() / 60, elapsed.as_secs() % 60 ); let saved = if self.library.is_saved_track(t) { if self.use_nerdfont { "\u{f62b} " } else { "✓ " } } else { "" }; let right = updating.to_string() + repeat + shuffle + saved + &format!("{} / {}", formatted_elapsed, t.duration_str()) + &volume; let offset = HAlign::Right.get_offset(right.width(), printer.size.x); printer.with_color(style, |printer| { printer.print((4, 1), &t.to_string()); printer.print((offset, 1), &right); }); printer.with_color(style_bar, |printer| { let duration_width = (((printer.size.x as u32) * elapsed_ms) / t.duration) as usize; printer.print((0, 0), &"━".repeat(duration_width + 1)); }); } else { let right = updating.to_string() + repeat + shuffle + &volume; let offset = HAlign::Right.get_offset(right.width(), printer.size.x); printer.with_color(style, |printer| { printer.print((offset, 1), &right); }); } } fn layout(&mut self, size: Vec2) { self.last_size = size; } fn required_size(&mut self, constraint: Vec2) -> Vec2 { Vec2::new(constraint.x, 2) } fn on_event(&mut self, event: Event) -> EventResult { if let Event::Mouse { offset, position, event, } = event { let position = position - offset; if position.y == 0 { if event == MouseEvent::WheelUp { self.spotify.seek_relative(-500); } if event == MouseEvent::WheelDown { self.spotify.seek_relative(500); } if event == MouseEvent::Press(MouseButton::Left) || event == MouseEvent::Hold(MouseButton::Left) { if let Some(ref t) = self.queue.get_current() { let f: f32 = position.x as f32 / self.last_size.x as f32; let new = t.duration as f32 * f; self.spotify.seek(new as u32); } } } else if event == MouseEvent::Press(MouseButton::Left) { self.queue.toggleplayback(); } EventResult::Consumed(None) } else { EventResult::Ignored } } }
30.762115
100
0.472576
6a555e1b9ea66168f4e67c3ff9f31676ae58bd71
5,030
use crate::{ runtime_error, unexpected_type_error_with_slice, BinaryOp, Value, ValueMap, ValueNumber, }; pub fn make_module() -> ValueMap { use Value::*; let mut result = ValueMap::new(); result.add_fn("assert", |vm, args| { for value in vm.get_args(args).iter() { match value { Bool(b) => { if !b { return runtime_error!("Assertion failed"); } } unexpected => { return unexpected_type_error_with_slice( "test.assert", "Bool as argument", &[unexpected.clone()], ) } } } Ok(Empty) }); result.add_fn("assert_eq", |vm, args| match vm.get_args(args) { [a, b] => { let a = a.clone(); let b = b.clone(); let result = vm.run_binary_op(BinaryOp::Equal, a.clone(), b.clone()); match result { Ok(Bool(true)) => Ok(Empty), Ok(Bool(false)) => { runtime_error!("Assertion failed, '{}' is not equal to '{}'", a, b) } Ok(unexpected) => unexpected_type_error_with_slice( "test.assert_eq", "Bool from equality comparison", &[unexpected], ), Err(e) => Err(e.with_prefix("assert_eq")), } } unexpected => unexpected_type_error_with_slice("test.assert_eq", "two Values", unexpected), }); result.add_fn("assert_ne", |vm, args| match vm.get_args(args) { [a, b] => { let a = a.clone(); let b = b.clone(); let result = vm.run_binary_op(BinaryOp::NotEqual, a.clone(), b.clone()); match result { Ok(Bool(true)) => Ok(Empty), Ok(Bool(false)) => { runtime_error!("Assertion failed, '{}' should not be equal to '{}'", a, b) } Ok(unexpected) => unexpected_type_error_with_slice( "test.assert_ne", "Bool from equality comparison", &[unexpected], ), Err(e) => Err(e.with_prefix("assert_ne")), } } unexpected => unexpected_type_error_with_slice("test.assert_ne", "two Values", unexpected), }); result.add_fn("assert_near", |vm, args| match vm.get_args(args) { [Number(a), Number(b), Number(allowed_diff)] => { if number_near(*a, *b, *allowed_diff) { Ok(Empty) } else { runtime_error!( "Assertion failed, '{}' and '{}' are not within {} of each other", a, b, allowed_diff, ) } } [Num2(a), Num2(b), Number(allowed_diff)] => { let allowed_diff: f64 = allowed_diff.into(); if f64_near(a.0, b.0, allowed_diff) && f64_near(a.1, b.1, allowed_diff) { Ok(Empty) } else { runtime_error!( "Assertion failed, '{}' and '{}' are not within {} of each other", a, b, allowed_diff, ) } } [Num4(a), Num4(b), Number(allowed_diff)] => { let allowed_diff: f32 = allowed_diff.into(); if f32_near(a.0, b.0, allowed_diff) && f32_near(a.1, b.1, allowed_diff) && f32_near(a.2, b.2, allowed_diff) && f32_near(a.3, b.3, allowed_diff) { Ok(Empty) } else { runtime_error!( "Assertion failed, '{}' and '{}' are not within {} of each other", a, b, allowed_diff, ) } } unexpected => unexpected_type_error_with_slice( "test.assert_near", "two Numbers (or Num2s or Num4s) as arguments, \ followed by a Number that specifies the allowed difference", unexpected, ), }); result.add_fn("run_tests", |vm, args| match vm.get_args(args) { [Map(tests)] => { let tests = tests.clone(); let mut vm = vm.spawn_shared_vm(); vm.run_tests(tests) } unexpected => { unexpected_type_error_with_slice("test.run_tests", "a Map as argument", unexpected) } }); result } fn f32_near(a: f32, b: f32, allowed_diff: f32) -> bool { (a - b).abs() <= allowed_diff } fn f64_near(a: f64, b: f64, allowed_diff: f64) -> bool { (a - b).abs() <= allowed_diff } fn number_near(a: ValueNumber, b: ValueNumber, allowed_diff: ValueNumber) -> bool { (a - b).abs() <= allowed_diff }
33.986486
99
0.454871
eda691b3b5c0470c21360a93b33331dd89da6dc8
615
use std::env; use std::process::Command; const RUSTFLAGS: &str = "RUSTFLAGS"; const IGNORED_LINTS: &[&str] = &["dead_code"]; pub fn make_vec() -> Vec<&'static str> { let mut rustflags = Vec::new(); for &lint in IGNORED_LINTS { rustflags.push("-A"); rustflags.push(lint); } rustflags } pub fn set_env(cmd: &mut Command) { let mut rustflags = match env::var_os(RUSTFLAGS) { Some(rustflags) => rustflags, None => return, }; for flag in make_vec() { rustflags.push(" "); rustflags.push(flag); } cmd.env(RUSTFLAGS, rustflags); }
19.83871
54
0.58374
eded4525834df100432ad5d0edfd16448f03fe13
7,768
// Copyright (c) 2019, Ben Boeckel // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of this project nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON // ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::keytypes::User; use super::utils; #[test] fn invalid_target_key() { let mut invalid_keyring = utils::invalid_keyring(); let mut keyring = utils::new_test_keyring(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("invalid_target_key", payload) .unwrap(); let err = invalid_keyring.unlink_key(&key).unwrap_err(); assert_eq!(err, errno::Errno(libc::EINVAL)); } #[test] fn invalid_target_keyring() { let mut invalid_keyring = utils::invalid_keyring(); let keyring = utils::new_test_keyring(); let err = invalid_keyring.unlink_keyring(&keyring).unwrap_err(); assert_eq!(err, errno::Errno(libc::EINVAL)); } #[test] fn invalid_source_key() { let mut keyring = utils::new_test_keyring(); let invalid_key = utils::invalid_key(); let err = keyring.unlink_key(&invalid_key).unwrap_err(); assert_eq!(err, errno::Errno(libc::EINVAL)); } #[test] fn invalid_source_keyring() { let mut keyring = utils::new_test_keyring(); let invalid_keyring = utils::invalid_keyring(); let err = keyring.unlink_keyring(&invalid_keyring).unwrap_err(); assert_eq!(err, errno::Errno(libc::EINVAL)); } #[test] fn unlink_key_from_non_keyring() { let mut keyring = utils::new_test_keyring(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_key_from_non_keyring", payload) .unwrap(); let mut not_a_keyring = utils::key_as_keyring(&key); let err = not_a_keyring.unlink_key(&key).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOTDIR)); } #[test] fn unlink_keyring_from_non_keyring() { let mut keyring = utils::new_test_keyring(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_keyring_from_non_keyring", payload) .unwrap(); let mut not_a_keyring = utils::key_as_keyring(&key); let err = not_a_keyring.unlink_keyring(&keyring).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOTDIR)); } #[test] fn unlink_key_as_keyring() { let mut keyring = utils::new_test_keyring(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_keyring_from_non_keyring", payload) .unwrap(); let not_a_keyring = utils::key_as_keyring(&key); // This is OK because the kernel doesn't have the type knowledge that our API does. keyring.unlink_keyring(&not_a_keyring).unwrap(); } #[test] fn unlink_keyring_as_key() { let mut keyring = utils::new_test_keyring(); let new_keyring = keyring.add_keyring("unlink_keyring_as_key").unwrap(); let not_a_key = utils::keyring_as_key(&new_keyring); // This is OK because the kernel doesn't have the type knowledge that our API does. keyring.unlink_key(&not_a_key).unwrap(); } #[test] fn unlink_unlinked_key() { let mut keyring = utils::new_test_keyring(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_unlinked_key", payload) .unwrap(); keyring.unlink_key(&key).unwrap(); utils::wait_for_key_gc(&key); let err = keyring.unlink_key(&key).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOKEY)); } #[test] fn unlink_unlinked_keyring() { let mut keyring = utils::new_test_keyring(); let new_keyring = keyring.add_keyring("unlink_unlinked_keyring").unwrap(); keyring.unlink_keyring(&new_keyring).unwrap(); utils::wait_for_keyring_gc(&new_keyring); let err = keyring.unlink_keyring(&new_keyring).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOKEY)); } #[test] fn unlink_key_from_unlinked_keyring() { let mut keyring = utils::new_test_keyring_manual(); let mut keyring_observer = keyring.clone(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_key_from_unlinked_keyring", payload) .unwrap(); keyring.invalidate().unwrap(); utils::wait_for_keyring_gc(&keyring_observer); let err = keyring_observer.unlink_key(&key).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOKEY)); } #[test] fn unlink_keyring_from_unlinked_keyring() { let mut keyring = utils::new_test_keyring_manual(); let mut keyring_observer = keyring.clone(); let new_keyring = keyring.add_keyring("unlink_from_unlinked_keyring").unwrap(); keyring.invalidate().unwrap(); utils::wait_for_keyring_gc(&keyring_observer); let err = keyring_observer.unlink_keyring(&new_keyring).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOKEY)); } #[test] fn unlink_unassociated_key() { let mut keyring = utils::new_test_keyring(); let mut new_keyring = keyring.add_keyring("unlink_unassociated_key").unwrap(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_unassociated_key", payload) .unwrap(); let err = new_keyring.unlink_key(&key).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOENT)); } #[test] fn unlink_unassociated_keyring() { let mut keyring = utils::new_test_keyring(); let mut new_keyring = keyring.add_keyring("unlink_unassociated_keyring").unwrap(); let inner_keyring = keyring .add_keyring("unlink_unassociated_keyring_keyring") .unwrap(); let err = new_keyring.unlink_keyring(&inner_keyring).unwrap_err(); assert_eq!(err, errno::Errno(libc::ENOENT)); } #[test] fn unlink_key() { let mut keyring = utils::new_test_keyring(); let payload = &b"payload"[..]; let key = keyring .add_key::<User, _, _>("unlink_unlinked_key", payload) .unwrap(); keyring.unlink_key(&key).unwrap(); utils::wait_for_key_gc(&key); let (keys, keyrings) = keyring.read().unwrap(); assert!(keys.is_empty()); assert!(keyrings.is_empty()); } #[test] fn unlink_keyring() { let mut keyring = utils::new_test_keyring(); let new_keyring = keyring.add_keyring("unlink_keyring").unwrap(); keyring.unlink_keyring(&new_keyring).unwrap(); utils::wait_for_keyring_gc(&new_keyring); let (keys, keyrings) = keyring.read().unwrap(); assert!(keys.is_empty()); assert!(keyrings.is_empty()); }
33.921397
87
0.694258
14dc03949a0d7f77c2d63b7694bcc7079528f4cc
2,220
use std::collections::HashMap; use std::env; use std::ffi::OsStr; use std::fs::{read_to_string, File}; use std::io::{LineWriter, Write}; use std::path::Path; use itertools::Itertools; fn build_raw_line<S: AsRef<OsStr> + ?Sized>(p: &S, const_name: &str) -> String { let json_file_path = Path::new(p); let json_file_str = read_to_string(json_file_path).expect("file not found"); let icon_map: HashMap<String, char> = serde_json::from_str(&json_file_str).expect("error while reading json"); let sorted_icon_tuples = icon_map .keys() .sorted() .map(|k| format!("(\"{}\", '{}')", k, icon_map[k])) .join(","); format!( "pub const {}: &[(&str, char)] = &[{}];", const_name, sorted_icon_tuples ) } fn main() { let current_dir = std::env::current_dir().unwrap(); let file_under_current_dir = |filename: &str| { let mut icon_path = current_dir.clone(); icon_path.push(filename); icon_path }; let out_dir = env::var_os("OUT_DIR").unwrap(); let dest_path = Path::new(&out_dir).join("constants.rs"); let file = File::create(dest_path).expect("can not create file"); let mut file = LineWriter::new(file); let build_line = |filename: &str, const_name: &str| { build_raw_line(&file_under_current_dir(filename), const_name) }; let line = build_line("exactmatch_map.json", "EXACTMATCH_ICON_TABLE"); file.write_all(format!("{}\n", line).as_bytes()).unwrap(); let line = build_line("extension_map.json", "EXTENSION_ICON_TABLE"); file.write_all(format!("\n{}\n", line).as_bytes()).unwrap(); let line = build_line("tagkind_map.json", "TAGKIND_ICON_TABLE"); file.write_all(format!("\n{}\n", line).as_bytes()).unwrap(); file.write_all( " pub fn bsearch_icon_table(c: &str, table: &[(&str, char)]) ->Option<usize> { table.binary_search_by(|&(key, _)| key.cmp(&c)).ok() } \n" .as_bytes(), ) .unwrap(); println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=exactmatch_map.json"); println!("cargo:rerun-if-changed=extension_map.json"); println!("cargo:rerun-if-changed=tagkind_map.json"); }
31.714286
80
0.629279
db772facd03a4854d9777811c28886011f33290e
2,186
// Copyright 2020-2021 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 use std::borrow::Cow; use identity_core::crypto::Signature; use crate::did::CoreDID; use crate::did::CoreDIDUrl; use crate::did::DIDUrl; use crate::did::RelativeDIDUrl; use crate::did::DID; /// Specifies the conditions of a DID document method resolution query. /// /// See `Document::resolve`. #[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] #[repr(transparent)] pub struct MethodQuery<'query>(Cow<'query, str>); impl<'query> MethodQuery<'query> { pub(crate) fn matches(&self, did: &CoreDIDUrl) -> bool { match self.fragment().zip(did.fragment()) { Some((a, b)) => a == b, None => false, } } fn fragment(&self) -> Option<&str> { let query = self.0.as_ref(); if query.starts_with(CoreDID::SCHEME) && !query.ends_with('#') { // Extract the fragment from a full DID-like string query.rfind('#').map(|index| &query[index + 1..]) } else if let Some(stripped) = query.strip_prefix('#') { // Remove the leading `#` if it was in the query Some(stripped) } else { Some(query) } } } impl<'query> From<&'query str> for MethodQuery<'query> { fn from(other: &'query str) -> Self { Self(Cow::Borrowed(other)) } } impl<'query> From<&'query String> for MethodQuery<'query> { fn from(other: &'query String) -> Self { Self(Cow::Borrowed(&**other)) } } impl<'query, T> From<&'query DIDUrl<T>> for MethodQuery<'query> where T: DID, { fn from(other: &'query DIDUrl<T>) -> Self { Self(Cow::Owned(other.to_string())) } } impl<'query, T> From<DIDUrl<T>> for MethodQuery<'query> where T: DID, { fn from(other: DIDUrl<T>) -> Self { Self(Cow::Owned(other.to_string())) } } impl<'query> From<&'query RelativeDIDUrl> for MethodQuery<'query> { fn from(other: &'query RelativeDIDUrl) -> Self { // TODO: improve RelativeDIDUrl performance - internal string segments representation Self(Cow::Owned(other.to_string())) } } impl<'query> From<&'query Signature> for MethodQuery<'query> { fn from(other: &'query Signature) -> Self { Self(Cow::Borrowed(other.verification_method())) } }
25.717647
89
0.642726
11def8b1566d9897602ae6a1e7f8cfe6b9b70a16
53,583
//! //! //! Take an AST and transform it into bytecode //! //! Inspirational code: //! https://github.com/python/cpython/blob/master/Python/compile.c //! https://github.com/micropython/micropython/blob/master/py/compile.c use super::bytecode::{self, CallType, CodeObject, Instruction}; use super::pyobject::{PyObject, PyObjectPayload, PyResult}; use super::vm::VirtualMachine; use num_complex::Complex64; use rustpython_parser::{ast, parser}; struct Compiler { code_object_stack: Vec<CodeObject>, nxt_label: usize, source_path: Option<String>, current_source_location: ast::Location, } /// Compile a given sourcecode into a bytecode object. pub fn compile( vm: &mut VirtualMachine, source: &str, mode: &Mode, source_path: Option<String>, ) -> PyResult { let mut compiler = Compiler::new(); compiler.source_path = source_path.clone(); compiler.push_new_code_object(source_path, "<module>".to_string()); let syntax_error = vm.context().exceptions.syntax_error.clone(); let result = match mode { Mode::Exec => match parser::parse_program(source) { Ok(ast) => compiler.compile_program(&ast), Err(msg) => Err(msg), }, Mode::Eval => match parser::parse_statement(source) { Ok(statement) => compiler.compile_statement_eval(&statement), Err(msg) => Err(msg), }, Mode::Single => match parser::parse_program(source) { Ok(ast) => compiler.compile_program_single(&ast), Err(msg) => Err(msg), }, }; match result { Err(msg) => return Err(vm.new_exception(syntax_error.clone(), msg)), _ => {} } let code = compiler.pop_code_object(); trace!("Compilation completed: {:?}", code); Ok(PyObject::new( PyObjectPayload::Code { code }, vm.ctx.code_type(), )) } pub enum Mode { Exec, Eval, Single, } #[derive(Clone, Copy)] enum EvalContext { Statement, Expression, } type Label = usize; impl Compiler { fn new() -> Self { Compiler { code_object_stack: Vec::new(), nxt_label: 0, source_path: None, current_source_location: ast::Location::default(), } } fn push_new_code_object(&mut self, source_path: Option<String>, obj_name: String) { self.code_object_stack.push(CodeObject::new( Vec::new(), None, Vec::new(), None, source_path.clone(), obj_name, )); } fn pop_code_object(&mut self) -> CodeObject { self.code_object_stack.pop().unwrap() } fn compile_program(&mut self, program: &ast::Program) -> Result<(), String> { let size_before = self.code_object_stack.len(); self.compile_statements(&program.statements)?; assert!(self.code_object_stack.len() == size_before); // Emit None at end: self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); Ok(()) } fn compile_program_single(&mut self, program: &ast::Program) -> Result<(), String> { for statement in &program.statements { if let ast::Statement::Expression { ref expression } = statement.node { self.compile_expression(expression)?; self.emit(Instruction::PrintExpr); } else { self.compile_statement(&statement)?; } } self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); Ok(()) } // Compile statement in eval mode: fn compile_statement_eval(&mut self, statement: &ast::LocatedStatement) -> Result<(), String> { if let ast::Statement::Expression { ref expression } = statement.node { self.compile_expression(expression)?; self.emit(Instruction::ReturnValue); Ok(()) } else { Err("Expecting expression, got statement".to_string()) } } fn compile_statements(&mut self, statements: &[ast::LocatedStatement]) -> Result<(), String> { for statement in statements { self.compile_statement(statement)? } Ok(()) } fn compile_statement(&mut self, statement: &ast::LocatedStatement) -> Result<(), String> { trace!("Compiling {:?}", statement); self.set_source_location(&statement.location); match &statement.node { ast::Statement::Import { import_parts } => { for ast::SingleImport { module, symbol, alias, } in import_parts { match symbol { Some(name) if name == "*" => { self.emit(Instruction::ImportStar { name: module.clone(), }); } _ => { self.emit(Instruction::Import { name: module.clone(), symbol: symbol.clone().map(|s| s.clone()), }); self.emit(Instruction::StoreName { name: match alias { Some(alias) => alias.clone(), None => match symbol { Some(symbol) => symbol.clone(), None => module.clone(), }, }, }); } } } } ast::Statement::Expression { expression } => { self.compile_expression(expression)?; // Pop result of stack, since we not use it: self.emit(Instruction::Pop); } ast::Statement::Global { names } => { unimplemented!("global {:?}", names); } ast::Statement::Nonlocal { names } => { unimplemented!("nonlocal {:?}", names); } ast::Statement::If { test, body, orelse } => { let end_label = self.new_label(); match orelse { None => { // Only if: self.compile_test(test, None, Some(end_label), EvalContext::Statement)?; self.compile_statements(body)?; self.set_label(end_label); } Some(statements) => { // if - else: let else_label = self.new_label(); self.compile_test(test, None, Some(else_label), EvalContext::Statement)?; self.compile_statements(body)?; self.emit(Instruction::Jump { target: end_label }); // else: self.set_label(else_label); self.compile_statements(statements)?; } } self.set_label(end_label); } ast::Statement::While { test, body, orelse } => { let start_label = self.new_label(); let else_label = self.new_label(); let end_label = self.new_label(); self.emit(Instruction::SetupLoop { start: start_label, end: end_label, }); self.set_label(start_label); self.compile_test(test, None, Some(else_label), EvalContext::Statement)?; self.compile_statements(body)?; self.emit(Instruction::Jump { target: start_label, }); self.set_label(else_label); if let Some(orelse) = orelse { self.compile_statements(orelse)?; } self.set_label(end_label); self.emit(Instruction::PopBlock); } ast::Statement::With { items, body } => { let end_label = self.new_label(); for item in items { self.compile_expression(&item.context_expr)?; self.emit(Instruction::SetupWith { end: end_label }); match &item.optional_vars { Some(var) => { self.compile_store(var)?; } None => { self.emit(Instruction::Pop); } } } self.compile_statements(body)?; for _ in 0..items.len() { self.emit(Instruction::CleanupWith { end: end_label }); } self.set_label(end_label); } ast::Statement::For { target, iter, body, orelse, } => { // The thing iterated: for i in iter { self.compile_expression(i)?; } // Retrieve iterator self.emit(Instruction::GetIter); // Start loop let start_label = self.new_label(); let else_label = self.new_label(); let end_label = self.new_label(); self.emit(Instruction::SetupLoop { start: start_label, end: end_label, }); self.set_label(start_label); self.emit(Instruction::ForIter { target: else_label }); // Start of loop iteration, set targets: self.compile_store(target)?; // Body of loop: self.compile_statements(body)?; self.emit(Instruction::Jump { target: start_label, }); self.set_label(else_label); if let Some(orelse) = orelse { self.compile_statements(orelse)?; } self.set_label(end_label); self.emit(Instruction::PopBlock); } ast::Statement::Raise { exception, cause } => match exception { Some(value) => { self.compile_expression(value)?; match cause { Some(cause) => { self.compile_expression(cause)?; self.emit(Instruction::Raise { argc: 2 }); } None => { self.emit(Instruction::Raise { argc: 1 }); } } } None => { self.emit(Instruction::Raise { argc: 0 }); } }, ast::Statement::Try { body, handlers, orelse, finalbody, } => { let mut handler_label = self.new_label(); let finally_label = self.new_label(); let else_label = self.new_label(); // try: self.emit(Instruction::SetupExcept { handler: handler_label, }); self.compile_statements(body)?; self.emit(Instruction::PopBlock); self.emit(Instruction::Jump { target: else_label }); // except handlers: self.set_label(handler_label); // Exception is on top of stack now handler_label = self.new_label(); for handler in handlers { // If we gave a typ, // check if this handler can handle the exception: if let Some(exc_type) = &handler.typ { // Duplicate exception for test: self.emit(Instruction::Duplicate); // Check exception type: self.emit(Instruction::LoadName { name: String::from("isinstance"), }); self.emit(Instruction::Rotate { amount: 2 }); self.compile_expression(exc_type)?; self.emit(Instruction::CallFunction { typ: CallType::Positional(2), }); // We cannot handle this exception type: self.emit(Instruction::JumpIfFalse { target: handler_label, }); // We have a match, store in name (except x as y) if let Some(alias) = &handler.name { self.emit(Instruction::StoreName { name: alias.clone(), }); } else { // Drop exception from top of stack: self.emit(Instruction::Pop); } } else { // Catch all! // Drop exception from top of stack: self.emit(Instruction::Pop); } // Handler code: self.compile_statements(&handler.body)?; self.emit(Instruction::Jump { target: finally_label, }); // Emit a new label for the next handler self.set_label(handler_label); handler_label = self.new_label(); } self.emit(Instruction::Jump { target: handler_label, }); self.set_label(handler_label); // If code flows here, we have an unhandled exception, // emit finally code and raise again! // Duplicate finally code here: // TODO: this bytecode is now duplicate, could this be // improved? if let Some(statements) = finalbody { self.compile_statements(statements)?; } self.emit(Instruction::Raise { argc: 1 }); // We successfully ran the try block: // else: self.set_label(else_label); if let Some(statements) = orelse { self.compile_statements(statements)?; } // finally: self.set_label(finally_label); if let Some(statements) = finalbody { self.compile_statements(statements)?; } // unimplemented!(); } ast::Statement::FunctionDef { name, args, body, decorator_list, } => { // Create bytecode for this function: let flags = self.enter_function(name, args)?; self.compile_statements(body)?; // Emit None at end: self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); let code = self.pop_code_object(); self.prepare_decorators(decorator_list)?; self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code }, }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name.clone(), }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags }); self.apply_decorators(decorator_list); self.emit(Instruction::StoreName { name: name.to_string(), }); } ast::Statement::ClassDef { name, body, bases, keywords, decorator_list, } => { self.prepare_decorators(decorator_list)?; self.emit(Instruction::LoadBuildClass); self.code_object_stack.push(CodeObject::new( vec![String::from("__locals__")], None, vec![], None, self.source_path.clone(), name.clone(), )); self.emit(Instruction::LoadName { name: String::from("__locals__"), }); self.emit(Instruction::StoreLocals); self.compile_statements(body)?; self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::ReturnValue); let code = self.pop_code_object(); self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code }, }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name.clone(), }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags: bytecode::FunctionOpArg::empty(), }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name.clone(), }, }); for base in bases { self.compile_expression(base)?; } if !keywords.is_empty() { let mut kwarg_names = vec![]; for keyword in keywords { if let Some(name) = &keyword.name { kwarg_names.push(bytecode::Constant::String { value: name.to_string(), }); } else { // This means **kwargs! panic!("name must be set"); } self.compile_expression(&keyword.value)?; } self.emit(Instruction::LoadConst { value: bytecode::Constant::Tuple { elements: kwarg_names, }, }); self.emit(Instruction::CallFunction { typ: CallType::Keyword(2 + keywords.len() + bases.len()), }); } else { self.emit(Instruction::CallFunction { typ: CallType::Positional(2 + bases.len()), }); } self.apply_decorators(decorator_list); self.emit(Instruction::StoreName { name: name.to_string(), }); } ast::Statement::Assert { test, msg } => { // TODO: if some flag, ignore all assert statements! let end_label = self.new_label(); self.compile_test(test, Some(end_label), None, EvalContext::Statement)?; self.emit(Instruction::LoadName { name: String::from("AssertionError"), }); match msg { Some(e) => { self.compile_expression(e)?; self.emit(Instruction::CallFunction { typ: CallType::Positional(1), }); } None => { self.emit(Instruction::CallFunction { typ: CallType::Positional(0), }); } } self.emit(Instruction::Raise { argc: 1 }); self.set_label(end_label); } ast::Statement::Break => { self.emit(Instruction::Break); } ast::Statement::Continue => { self.emit(Instruction::Continue); } ast::Statement::Return { value } => { match value { Some(e) => { let size = e.len(); for v in e { self.compile_expression(v)?; } // If we have more than 1 return value, make it a tuple: if size > 1 { self.emit(Instruction::BuildTuple { size, unpack: false, }); } } None => { self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); } } self.emit(Instruction::ReturnValue); } ast::Statement::Assign { targets, value } => { self.compile_expression(value)?; for (i, target) in targets.into_iter().enumerate() { if i + 1 != targets.len() { self.emit(Instruction::Duplicate); } self.compile_store(target)?; } } ast::Statement::AugAssign { target, op, value } => { self.compile_expression(target)?; self.compile_expression(value)?; // Perform operation: self.compile_op(op); self.compile_store(target)?; } ast::Statement::Delete { targets } => { for target in targets { match target { ast::Expression::Identifier { name } => { self.emit(Instruction::DeleteName { name: name.to_string(), }); } ast::Expression::Attribute { value, name } => { self.compile_expression(value)?; self.emit(Instruction::DeleteAttr { name: name.to_string(), }); } ast::Expression::Subscript { a, b } => { self.compile_expression(a)?; self.compile_expression(b)?; self.emit(Instruction::DeleteSubscript); } _ => { return Err("Invalid delete statement".to_string()); } } } } ast::Statement::Pass => { self.emit(Instruction::Pass); } } Ok(()) } fn enter_function( &mut self, name: &str, args: &ast::Parameters, ) -> Result<bytecode::FunctionOpArg, String> { let have_kwargs = !args.defaults.is_empty(); if have_kwargs { // Construct a tuple: let size = args.defaults.len(); for element in &args.defaults { self.compile_expression(element)?; } self.emit(Instruction::BuildTuple { size, unpack: false, }); } self.code_object_stack.push(CodeObject::new( args.args.clone(), args.vararg.clone(), args.kwonlyargs.clone(), args.kwarg.clone(), self.source_path.clone(), name.to_string(), )); let mut flags = bytecode::FunctionOpArg::empty(); if have_kwargs { flags = flags | bytecode::FunctionOpArg::HAS_DEFAULTS; } Ok(flags) } fn prepare_decorators(&mut self, decorator_list: &[ast::Expression]) -> Result<(), String> { for decorator in decorator_list { self.compile_expression(decorator)?; } Ok(()) } fn apply_decorators(&mut self, decorator_list: &[ast::Expression]) { // Apply decorators: for _ in decorator_list { self.emit(Instruction::CallFunction { typ: CallType::Positional(1), }); } } fn compile_store(&mut self, target: &ast::Expression) -> Result<(), String> { match target { ast::Expression::Identifier { name } => { self.emit(Instruction::StoreName { name: name.to_string(), }); } ast::Expression::Subscript { a, b } => { self.compile_expression(a)?; self.compile_expression(b)?; self.emit(Instruction::StoreSubscript); } ast::Expression::Attribute { value, name } => { self.compile_expression(value)?; self.emit(Instruction::StoreAttr { name: name.to_string(), }); } ast::Expression::Tuple { elements } => { let mut seen_star = false; // Scan for star args: for (i, element) in elements.iter().enumerate() { if let ast::Expression::Starred { .. } = element { if seen_star { return Err("two starred expressions in assignment".to_string()); } else { seen_star = true; self.emit(Instruction::UnpackEx { before: i, after: elements.len() - i - 1, }); } } } if !seen_star { self.emit(Instruction::UnpackSequence { size: elements.len(), }); } for element in elements { if let ast::Expression::Starred { value } = element { self.compile_store(value)?; } else { self.compile_store(element)?; } } } _ => { return Err(format!("Cannot store value into: {:?}", target)); } } Ok(()) } fn compile_op(&mut self, op: &ast::Operator) { let i = match op { ast::Operator::Add => bytecode::BinaryOperator::Add, ast::Operator::Sub => bytecode::BinaryOperator::Subtract, ast::Operator::Mult => bytecode::BinaryOperator::Multiply, ast::Operator::MatMult => bytecode::BinaryOperator::MatrixMultiply, ast::Operator::Div => bytecode::BinaryOperator::Divide, ast::Operator::FloorDiv => bytecode::BinaryOperator::FloorDivide, ast::Operator::Mod => bytecode::BinaryOperator::Modulo, ast::Operator::Pow => bytecode::BinaryOperator::Power, ast::Operator::LShift => bytecode::BinaryOperator::Lshift, ast::Operator::RShift => bytecode::BinaryOperator::Rshift, ast::Operator::BitOr => bytecode::BinaryOperator::Or, ast::Operator::BitXor => bytecode::BinaryOperator::Xor, ast::Operator::BitAnd => bytecode::BinaryOperator::And, }; self.emit(Instruction::BinaryOperation { op: i }); } fn compile_test( &mut self, expression: &ast::Expression, true_label: Option<Label>, false_label: Option<Label>, context: EvalContext, ) -> Result<(), String> { // Compile expression for test, and jump to label if false match expression { ast::Expression::BoolOp { a, op, b } => match op { ast::BooleanOperator::And => { let f = false_label.unwrap_or_else(|| self.new_label()); self.compile_test(a, None, Some(f), context)?; self.compile_test(b, true_label, false_label, context)?; if false_label.is_none() { self.set_label(f); } } ast::BooleanOperator::Or => { let t = true_label.unwrap_or_else(|| self.new_label()); self.compile_test(a, Some(t), None, context)?; self.compile_test(b, true_label, false_label, context)?; if true_label.is_none() { self.set_label(t); } } }, _ => { self.compile_expression(expression)?; match context { EvalContext::Statement => { if let Some(true_label) = true_label { self.emit(Instruction::JumpIf { target: true_label }); } if let Some(false_label) = false_label { self.emit(Instruction::JumpIfFalse { target: false_label, }); } } EvalContext::Expression => { if let Some(true_label) = true_label { self.emit(Instruction::Duplicate); self.emit(Instruction::JumpIf { target: true_label }); self.emit(Instruction::Pop); } if let Some(false_label) = false_label { self.emit(Instruction::Duplicate); self.emit(Instruction::JumpIfFalse { target: false_label, }); self.emit(Instruction::Pop); } } } } } Ok(()) } fn compile_expression(&mut self, expression: &ast::Expression) -> Result<(), String> { trace!("Compiling {:?}", expression); match expression { ast::Expression::Call { function, args, keywords, } => self.compile_call(function, args, keywords)?, ast::Expression::BoolOp { .. } => { self.compile_test(expression, None, None, EvalContext::Expression)? } ast::Expression::Binop { a, op, b } => { self.compile_expression(a)?; self.compile_expression(b)?; // Perform operation: self.compile_op(op); } ast::Expression::Subscript { a, b } => { self.compile_expression(a)?; self.compile_expression(b)?; self.emit(Instruction::BinaryOperation { op: bytecode::BinaryOperator::Subscript, }); } ast::Expression::Unop { op, a } => { self.compile_expression(a)?; // Perform operation: let i = match op { ast::UnaryOperator::Pos => bytecode::UnaryOperator::Plus, ast::UnaryOperator::Neg => bytecode::UnaryOperator::Minus, ast::UnaryOperator::Not => bytecode::UnaryOperator::Not, ast::UnaryOperator::Inv => bytecode::UnaryOperator::Invert, }; let i = Instruction::UnaryOperation { op: i }; self.emit(i); } ast::Expression::Attribute { value, name } => { self.compile_expression(value)?; self.emit(Instruction::LoadAttr { name: name.to_string(), }); } ast::Expression::Compare { a, op, b } => { self.compile_expression(a)?; self.compile_expression(b)?; let i = match op { ast::Comparison::Equal => bytecode::ComparisonOperator::Equal, ast::Comparison::NotEqual => bytecode::ComparisonOperator::NotEqual, ast::Comparison::Less => bytecode::ComparisonOperator::Less, ast::Comparison::LessOrEqual => bytecode::ComparisonOperator::LessOrEqual, ast::Comparison::Greater => bytecode::ComparisonOperator::Greater, ast::Comparison::GreaterOrEqual => bytecode::ComparisonOperator::GreaterOrEqual, ast::Comparison::In => bytecode::ComparisonOperator::In, ast::Comparison::NotIn => bytecode::ComparisonOperator::NotIn, ast::Comparison::Is => bytecode::ComparisonOperator::Is, ast::Comparison::IsNot => bytecode::ComparisonOperator::IsNot, }; let i = Instruction::CompareOperation { op: i }; self.emit(i); } ast::Expression::Number { value } => { let const_value = match value { ast::Number::Integer { value } => bytecode::Constant::Integer { value: value.clone(), }, ast::Number::Float { value } => bytecode::Constant::Float { value: *value }, ast::Number::Complex { real, imag } => bytecode::Constant::Complex { value: Complex64::new(*real, *imag), }, }; self.emit(Instruction::LoadConst { value: const_value }); } ast::Expression::List { elements } => { let size = elements.len(); let must_unpack = self.gather_elements(elements)?; self.emit(Instruction::BuildList { size, unpack: must_unpack, }); } ast::Expression::Tuple { elements } => { let size = elements.len(); let must_unpack = self.gather_elements(elements)?; self.emit(Instruction::BuildTuple { size, unpack: must_unpack, }); } ast::Expression::Set { elements } => { let size = elements.len(); let must_unpack = self.gather_elements(elements)?; self.emit(Instruction::BuildSet { size, unpack: must_unpack, }); } ast::Expression::Dict { elements } => { let size = elements.len(); for (key, value) in elements { self.compile_expression(key)?; self.compile_expression(value)?; } self.emit(Instruction::BuildMap { size, unpack: false, }); } ast::Expression::Slice { elements } => { let size = elements.len(); for element in elements { self.compile_expression(element)?; } self.emit(Instruction::BuildSlice { size }); } ast::Expression::Yield { value } => { self.mark_generator(); match value { Some(expression) => self.compile_expression(expression)?, None => self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }), }; self.emit(Instruction::YieldValue); } ast::Expression::YieldFrom { value } => { self.mark_generator(); self.compile_expression(value)?; self.emit(Instruction::GetIter); self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); self.emit(Instruction::YieldFrom); } ast::Expression::True => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Boolean { value: true }, }); } ast::Expression::False => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Boolean { value: false }, }); } ast::Expression::None => { self.emit(Instruction::LoadConst { value: bytecode::Constant::None, }); } ast::Expression::String { value } => { self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: value.to_string(), }, }); } ast::Expression::Bytes { value } => { self.emit(Instruction::LoadConst { value: bytecode::Constant::Bytes { value: value.clone(), }, }); } ast::Expression::Identifier { name } => { self.emit(Instruction::LoadName { name: name.to_string(), }); } ast::Expression::Lambda { args, body } => { let name = "<lambda>".to_string(); let flags = self.enter_function(&name, args)?; self.compile_expression(body)?; self.emit(Instruction::ReturnValue); let code = self.pop_code_object(); self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code }, }); self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags }); } ast::Expression::Comprehension { kind, generators } => { self.compile_comprehension(kind, generators)?; } ast::Expression::Starred { value } => { self.compile_expression(value)?; self.emit(Instruction::Unpack); panic!("We should not just unpack a starred args, since the size is unknown."); } ast::Expression::IfExpression { test, body, orelse } => { let no_label = self.new_label(); let end_label = self.new_label(); self.compile_test(test, None, Some(no_label), EvalContext::Expression)?; self.compile_expression(body)?; self.emit(Instruction::Jump { target: end_label }); self.set_label(no_label); self.compile_expression(orelse)?; self.set_label(end_label); } } Ok(()) } fn compile_call( &mut self, function: &ast::Expression, args: &[ast::Expression], keywords: &[ast::Keyword], ) -> Result<(), String> { self.compile_expression(function)?; let count = args.len() + keywords.len(); // Normal arguments: let must_unpack = self.gather_elements(args)?; let has_double_star = keywords.iter().any(|k| k.name.is_none()); if must_unpack || has_double_star { // Create a tuple with positional args: self.emit(Instruction::BuildTuple { size: args.len(), unpack: must_unpack, }); // Create an optional map with kw-args: if !keywords.is_empty() { for keyword in keywords { if let Some(name) = &keyword.name { self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name.to_string(), }, }); self.compile_expression(&keyword.value)?; if has_double_star { self.emit(Instruction::BuildMap { size: 1, unpack: false, }); } } else { // This means **kwargs! self.compile_expression(&keyword.value)?; } } self.emit(Instruction::BuildMap { size: keywords.len(), unpack: has_double_star, }); self.emit(Instruction::CallFunction { typ: CallType::Ex(true), }); } else { self.emit(Instruction::CallFunction { typ: CallType::Ex(false), }); } } else { // Keyword arguments: if !keywords.is_empty() { let mut kwarg_names = vec![]; for keyword in keywords { if let Some(name) = &keyword.name { kwarg_names.push(bytecode::Constant::String { value: name.to_string(), }); } else { // This means **kwargs! panic!("name must be set"); } self.compile_expression(&keyword.value)?; } self.emit(Instruction::LoadConst { value: bytecode::Constant::Tuple { elements: kwarg_names, }, }); self.emit(Instruction::CallFunction { typ: CallType::Keyword(count), }); } else { self.emit(Instruction::CallFunction { typ: CallType::Positional(count), }); } } Ok(()) } // Given a vector of expr / star expr generate code which gives either // a list of expressions on the stack, or a list of tuples. fn gather_elements(&mut self, elements: &[ast::Expression]) -> Result<bool, String> { // First determine if we have starred elements: let has_stars = elements.iter().any(|e| { if let ast::Expression::Starred { .. } = e { true } else { false } }); for element in elements { if let ast::Expression::Starred { value } = element { self.compile_expression(value)?; } else { self.compile_expression(element)?; if has_stars { self.emit(Instruction::BuildTuple { size: 1, unpack: false, }); } } } Ok(has_stars) } fn compile_comprehension( &mut self, kind: &ast::ComprehensionKind, generators: &[ast::Comprehension], ) -> Result<(), String> { // We must have at least one generator: assert!(!generators.is_empty()); let name = match kind { ast::ComprehensionKind::GeneratorExpression { .. } => "<genexpr>", ast::ComprehensionKind::List { .. } => "<listcomp>", ast::ComprehensionKind::Set { .. } => "<setcomp>", ast::ComprehensionKind::Dict { .. } => "<dictcomp>", } .to_string(); // Create magnificent function <listcomp>: self.code_object_stack.push(CodeObject::new( vec![".0".to_string()], None, vec![], None, self.source_path.clone(), name.clone(), )); // Create empty object of proper type: match kind { ast::ComprehensionKind::GeneratorExpression { .. } => {} ast::ComprehensionKind::List { .. } => { self.emit(Instruction::BuildList { size: 0, unpack: false, }); } ast::ComprehensionKind::Set { .. } => { self.emit(Instruction::BuildSet { size: 0, unpack: false, }); } ast::ComprehensionKind::Dict { .. } => { self.emit(Instruction::BuildMap { size: 0, unpack: false, }); } } let mut loop_labels = vec![]; for generator in generators { if loop_labels.is_empty() { // Load iterator onto stack (passed as first argument): self.emit(Instruction::LoadName { name: String::from(".0"), }); } else { // Evaluate iterated item: self.compile_expression(&generator.iter)?; // Get iterator / turn item into an iterator self.emit(Instruction::GetIter); } // Setup for loop: let start_label = self.new_label(); let end_label = self.new_label(); loop_labels.push((start_label, end_label)); self.emit(Instruction::SetupLoop { start: start_label, end: end_label, }); self.set_label(start_label); self.emit(Instruction::ForIter { target: end_label }); self.compile_store(&generator.target)?; // Now evaluate the ifs: for if_condition in &generator.ifs { self.compile_test( if_condition, None, Some(start_label), EvalContext::Statement, )? } } match kind { ast::ComprehensionKind::GeneratorExpression { element } => { self.compile_expression(element)?; self.mark_generator(); self.emit(Instruction::YieldValue); self.emit(Instruction::Pop); } ast::ComprehensionKind::List { element } => { self.compile_expression(element)?; self.emit(Instruction::ListAppend { i: 1 + generators.len(), }); } ast::ComprehensionKind::Set { element } => { self.compile_expression(element)?; self.emit(Instruction::SetAdd { i: 1 + generators.len(), }); } ast::ComprehensionKind::Dict { key, value } => { self.compile_expression(value)?; self.compile_expression(key)?; self.emit(Instruction::MapAdd { i: 1 + generators.len(), }); } } for (start_label, end_label) in loop_labels.iter().rev() { // Repeat: self.emit(Instruction::Jump { target: *start_label, }); // End of for loop: self.set_label(*end_label); self.emit(Instruction::PopBlock); } // Return freshly filled list: self.emit(Instruction::ReturnValue); // Fetch code for listcomp function: let code = self.pop_code_object(); // List comprehension code: self.emit(Instruction::LoadConst { value: bytecode::Constant::Code { code }, }); // List comprehension function name: self.emit(Instruction::LoadConst { value: bytecode::Constant::String { value: name }, }); // Turn code object into function object: self.emit(Instruction::MakeFunction { flags: bytecode::FunctionOpArg::empty(), }); // Evaluate iterated item: self.compile_expression(&generators[0].iter)?; // Get iterator / turn item into an iterator self.emit(Instruction::GetIter); // Call just created <listcomp> function: self.emit(Instruction::CallFunction { typ: CallType::Positional(1), }); Ok(()) } // Low level helper functions: fn emit(&mut self, instruction: Instruction) { self.current_code_object().instructions.push(instruction); // TODO: insert source filename let location = self.current_source_location.clone(); self.current_code_object().locations.push(location); } fn current_code_object(&mut self) -> &mut CodeObject { self.code_object_stack.last_mut().unwrap() } // Generate a new label fn new_label(&mut self) -> Label { let l = self.nxt_label; self.nxt_label += 1; l } // Assign current position the given label fn set_label(&mut self, label: Label) { let position = self.current_code_object().instructions.len(); // assert!(label not in self.label_map) self.current_code_object().label_map.insert(label, position); } fn set_source_location(&mut self, location: &ast::Location) { self.current_source_location = location.clone(); } fn mark_generator(&mut self) { self.current_code_object().is_generator = true; } } #[cfg(test)] mod tests { use super::bytecode::CodeObject; use super::bytecode::Constant::*; use super::bytecode::Instruction::*; use super::Compiler; use rustpython_parser::parser; fn compile_exec(source: &str) -> CodeObject { let mut compiler = Compiler::new(); compiler.push_new_code_object(Option::None, "<module>".to_string()); let ast = parser::parse_program(&source.to_string()).unwrap(); compiler.compile_program(&ast).unwrap(); compiler.pop_code_object() } #[test] fn test_if_ors() { let code = compile_exec("if True or False or False:\n pass\n"); assert_eq!( vec![ LoadConst { value: Boolean { value: true } }, JumpIf { target: 1 }, LoadConst { value: Boolean { value: false } }, JumpIf { target: 1 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, Pass, LoadConst { value: None }, ReturnValue ], code.instructions ); } #[test] fn test_if_ands() { let code = compile_exec("if True and False and False:\n pass\n"); assert_eq!( vec![ LoadConst { value: Boolean { value: true } }, JumpIfFalse { target: 0 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, Pass, LoadConst { value: None }, ReturnValue ], code.instructions ); } #[test] fn test_if_mixed() { let code = compile_exec("if (True and False) or (False and True):\n pass\n"); assert_eq!( vec![ LoadConst { value: Boolean { value: true } }, JumpIfFalse { target: 2 }, LoadConst { value: Boolean { value: false } }, JumpIf { target: 1 }, LoadConst { value: Boolean { value: false } }, JumpIfFalse { target: 0 }, LoadConst { value: Boolean { value: true } }, JumpIfFalse { target: 0 }, Pass, LoadConst { value: None }, ReturnValue ], code.instructions ); } }
37.107341
100
0.444917
e52eb03df49e57dd6377aff711818254df8d3ad8
6,872
//! # Honey Badger BFT //! //! An implementation of [The Honey Badger of BFT Protocols](https://eprint.iacr.org/2016/199.pdf), //! an asynchronous, Byzantine fault tolerant consensus algorithm. //! //! //! ## Consensus //! //! A consensus algorithm is a protocol that helps a number of nodes agree on some data value. //! Byzantine fault tolerant systems can tolerate a number of faulty nodes _f_ (broken, or even //! controlled by an attacker), as long as the total number of nodes _N_ is greater than _3 f_. //! Asynchronous protocols do not make assumptions about timing: Even if an adversary controls //! network scheduling and can delay message delivery, consensus will still be reached as long as //! all messages are _eventually_ delivered. //! //! The Honey Badger consensus algorithm is both Byzantine fault tolerant and asynchronous. It is //! also modular, and the subalgorithms it is composed of are exposed in this crate as well, and //! usable separately. //! //! Consensus algorithms are fundamental to resilient, distributed systems such as decentralized //! databases and blockchains. //! //! //! ## Usage //! //! `hbbft` is meant to solve the consensus problem in a distributed application. Participating //! nodes provide input to the algorithm and are guaranteed to eventually produce the same output, //! after passing several messages back and forth. //! //! The crate only implements the abstract protocols, it is the application's responsibility to //! serialize, sign and send the messages. The application is required to call `handle_message` for //! every correctly signed message from a peer. Methods return a [Step](struct.Step.html) data //! structure, which contain messages that need to be sent, fault logs indicating misbehaving //! peers, and outputs. //! //! The network must contain a number of nodes that are known to each other by some unique //! identifiers (IDs), which is a generic type argument to the algorithms. Where applicable, the //! type of the input and output is also generic. //! //! //! ## Algorithms //! //! Honey Badger is modular, and composed of several algorithms that can also be used independently. //! //! [**Honey Badger**](honey_badger/index.html) //! //! The nodes repeatedly input _contributions_ (any user-defined type) and output a sequence of //! _batches_. The batches have sequential numbers (_epochs_) and contain one contribution //! from at least _N - f_ nodes. The sequence and contents of the batches will be the same in all //! nodes. //! //! [**Dynamic Honey Badger**](dynamic_honey_badger/index.html) //! //! A modified Honey Badger where validators can dynamically add and remove others to/from the //! network. In addition to the transactions, they can input `Add` and `Remove` requests. The //! output batches contain information about validator changes. //! //! [**Queueing Honey Badger**](queueing_honey_badger/index.html) //! //! A modified Dynamic Honey Badger that has a built-in transaction queue. The nodes input any //! number of _transactions_, and output a sequence of batches. Each batch contains a set of //! transactions that were input by the nodes, and usually multiple transactions from each node. //! //! [**Subset**](subset/index.html) //! //! Each node inputs one item. The output is a set of at least _N - f_ nodes' IDs, together with //! their items, and will be the same in every correct node. //! //! This is the main building block of Honey Badger: In each epoch, every node proposes a number of //! transactions. Using the Subset protocol, they agree on at least _N - f_ of those //! proposals. The batch contains the union of these sets of transactions. //! //! [**Broadcast**](broadcast/index.html) //! //! One node, the _proposer_, inputs an item, and every node receives that item as an output. Even //! if the proposer is faulty it is guaranteed that either none of the correct nodes output //! anything, or all of them have the same output. //! //! This is used in Subset to send each node's proposal to the other nodes. //! //! [**Binary Agreement**](binary_agreement/index.html) //! //! Each node inputs a binary value: `true` or `false`. As output, either all correct nodes receive //! `true` or all correct nodes receive `false`. The output is guaranteed to be a value that was //! input by at least one _correct_ node. //! //! This is used in Subset to decide whether each node's proposal should be included in the subset //! or not. //! //! [**Threshold Sign**](threshold_sign/index.html) //! //! Each node inputs `()` to broadcast signature shares. Once _f + 1_ nodes have input, all nodes //! receive a valid signature. The outcome cannot be known by the adversary before at least one //! correct node has provided input, and can be used as a source of pseudorandomness. //! //! [**Threshold Decrypt**](threshold_decrypt/index.html) //! //! Each node inputs the same ciphertext, encrypted to the public master key. Once _f + 1_ //! validators have received input, all nodes output the decrypted data. //! //! [**Synchronous Key Generation**](sync_key_gen/index.html) //! //! The participating nodes collaboratively generate a key set for threshold cryptography, such //! that each node learns its own secret key share, as well as everyone's public key share and the //! public master key. No single trusted dealer is involved and no node ever learns the secret //! master key or another node's secret key share. //! //! Unlike the other algorithms, this one is _not_ asynchronous: All nodes must handle the same //! messages, in the same order. //! //! ## Serialization //! //! `hbbft` supports [serde](https://serde.rs/): All message types implement the `Serialize` and //! `Deserialize` traits so they can be easily serialized or included as part of other serializable //! types. // We put algorithm structs in `src/algorithm/algorithm.rs`. // Some of our constructors return results. #![allow(clippy::module_inception, clippy::new_ret_no_self)] #![warn(missing_docs)] pub use threshold_crypto as crypto; mod fault_log; mod messaging; mod network_info; mod traits; pub mod binary_agreement; pub mod broadcast; pub mod dynamic_honey_badger; pub mod honey_badger; pub mod queueing_honey_badger; pub mod sender_queue; pub mod subset; pub mod sync_key_gen; pub mod threshold_decrypt; pub mod threshold_sign; pub mod transaction_queue; pub mod util; pub use crate::crypto::pairing; pub use crate::fault_log::{Fault, FaultLog}; pub use crate::messaging::{SourcedMessage, Target, TargetedMessage}; pub use crate::network_info::{NetworkInfo, ValidatorSet}; pub use crate::sync_key_gen::{to_pub_keys, PubKeyMap}; pub use crate::traits::{ ConsensusProtocol, Contribution, CpStep, Epoched, Message, NodeIdT, SessionIdT, Step, }; pub use rand::thread_rng as thread_rng_hbbft_compat; pub use rand::prelude::ThreadRng as HbThreadRng; pub use rand::OsRng;
44.915033
100
0.744034
de2c27bdcbcca690fc1c03337780db68b29566ce
24,243
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:lint_stability.rs // aux-build:inherited_stability.rs // aux-build:stability_cfg1.rs // aux-build:stability_cfg2.rs // ignore-tidy-linelength #![warn(deprecated)] #![allow(dead_code, unused_extern_crates)] #![feature(staged_api, unstable_test_feature, rustc_attrs)] #![stable(feature = "rust1", since = "1.0.0")] #[macro_use] extern crate lint_stability; mod cross_crate { extern crate stability_cfg1; extern crate stability_cfg2; use lint_stability::*; fn test() { type Foo = MethodTester; let foo = MethodTester; deprecated(); //~ WARN use of deprecated item 'lint_stability::deprecated' foo.method_deprecated(); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated' Foo::method_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated' <Foo>::method_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated' foo.trait_deprecated(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' Trait::trait_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' <Foo>::trait_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' <Foo as Trait>::trait_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' deprecated_text(); //~ WARN use of deprecated item 'lint_stability::deprecated_text': text foo.method_deprecated_text(); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_text': text Foo::method_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_text': text <Foo>::method_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_text': text foo.trait_deprecated_text(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text Trait::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text <Foo>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text <Foo as Trait>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text deprecated_unstable(); //~ WARN use of deprecated item 'lint_stability::deprecated_unstable' foo.method_deprecated_unstable(); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_unstable' Foo::method_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_unstable' <Foo>::method_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_unstable' foo.trait_deprecated_unstable(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' Trait::trait_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' <Foo>::trait_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' <Foo as Trait>::trait_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' deprecated_unstable_text(); //~ WARN use of deprecated item 'lint_stability::deprecated_unstable_text': text foo.method_deprecated_unstable_text(); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_unstable_text': text Foo::method_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_unstable_text': text <Foo>::method_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::MethodTester::method_deprecated_unstable_text': text foo.trait_deprecated_unstable_text(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text Trait::trait_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text <Foo>::trait_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text <Foo as Trait>::trait_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text unstable(); foo.method_unstable(); Foo::method_unstable(&foo); <Foo>::method_unstable(&foo); foo.trait_unstable(); Trait::trait_unstable(&foo); <Foo>::trait_unstable(&foo); <Foo as Trait>::trait_unstable(&foo); unstable_text(); foo.method_unstable_text(); Foo::method_unstable_text(&foo); <Foo>::method_unstable_text(&foo); foo.trait_unstable_text(); Trait::trait_unstable_text(&foo); <Foo>::trait_unstable_text(&foo); <Foo as Trait>::trait_unstable_text(&foo); stable(); foo.method_stable(); Foo::method_stable(&foo); <Foo>::method_stable(&foo); foo.trait_stable(); Trait::trait_stable(&foo); <Foo>::trait_stable(&foo); <Foo as Trait>::trait_stable(&foo); stable_text(); foo.method_stable_text(); Foo::method_stable_text(&foo); <Foo>::method_stable_text(&foo); foo.trait_stable_text(); Trait::trait_stable_text(&foo); <Foo>::trait_stable_text(&foo); <Foo as Trait>::trait_stable_text(&foo); struct S1<T: TraitWithAssociatedTypes>(T::TypeUnstable); struct S2<T: TraitWithAssociatedTypes>(T::TypeDeprecated); //~^ WARN use of deprecated item 'lint_stability::TraitWithAssociatedTypes::TypeDeprecated': text type A = TraitWithAssociatedTypes< TypeUnstable = u8, TypeDeprecated = u16, //~^ WARN use of deprecated item 'lint_stability::TraitWithAssociatedTypes::TypeDeprecated' >; let _ = DeprecatedStruct { //~ WARN use of deprecated item 'lint_stability::DeprecatedStruct' i: 0 //~ WARN use of deprecated item 'lint_stability::DeprecatedStruct::i' }; let _ = DeprecatedUnstableStruct { //~^ WARN use of deprecated item 'lint_stability::DeprecatedUnstableStruct' i: 0 //~ WARN use of deprecated item 'lint_stability::DeprecatedUnstableStruct::i' }; let _ = UnstableStruct { i: 0 }; let _ = StableStruct { i: 0 }; let _ = DeprecatedUnitStruct; //~ WARN use of deprecated item 'lint_stability::DeprecatedUnitStruct' let _ = DeprecatedUnstableUnitStruct; //~ WARN use of deprecated item 'lint_stability::DeprecatedUnstableUnitStruct' let _ = UnstableUnitStruct; let _ = StableUnitStruct; let _ = Enum::DeprecatedVariant; //~ WARN use of deprecated item 'lint_stability::Enum::DeprecatedVariant' let _ = Enum::DeprecatedUnstableVariant; //~ WARN use of deprecated item 'lint_stability::Enum::DeprecatedUnstableVariant' let _ = Enum::UnstableVariant; let _ = Enum::StableVariant; let _ = DeprecatedTupleStruct (1); //~ WARN use of deprecated item 'lint_stability::DeprecatedTupleStruct' let _ = DeprecatedUnstableTupleStruct (1); //~ WARN use of deprecated item 'lint_stability::DeprecatedUnstableTupleStruct' let _ = UnstableTupleStruct (1); let _ = StableTupleStruct (1); // At the moment, the lint checker only checks stability in // in the arguments of macros. // Eventually, we will want to lint the contents of the // macro in the module *defining* it. Also, stability levels // on macros themselves are not yet linted. macro_test_arg!(deprecated_text()); //~ WARN use of deprecated item 'lint_stability::deprecated_text': text macro_test_arg!(deprecated_unstable_text()); //~ WARN use of deprecated item 'lint_stability::deprecated_unstable_text': text macro_test_arg!(macro_test_arg!(deprecated_text())); //~ WARN use of deprecated item 'lint_stability::deprecated_text': text } fn test_method_param<Foo: Trait>(foo: Foo) { foo.trait_deprecated(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' Trait::trait_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' <Foo>::trait_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' <Foo as Trait>::trait_deprecated(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' foo.trait_deprecated_text(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text Trait::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text <Foo>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text <Foo as Trait>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text foo.trait_deprecated_unstable(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' Trait::trait_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' <Foo>::trait_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' <Foo as Trait>::trait_deprecated_unstable(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' foo.trait_deprecated_unstable_text(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text Trait::trait_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text <Foo>::trait_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text <Foo as Trait>::trait_deprecated_unstable_text(&foo); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text foo.trait_unstable(); Trait::trait_unstable(&foo); <Foo>::trait_unstable(&foo); <Foo as Trait>::trait_unstable(&foo); foo.trait_unstable_text(); Trait::trait_unstable_text(&foo); <Foo>::trait_unstable_text(&foo); <Foo as Trait>::trait_unstable_text(&foo); foo.trait_stable(); Trait::trait_stable(&foo); <Foo>::trait_stable(&foo); <Foo as Trait>::trait_stable(&foo); } fn test_method_object(foo: &Trait) { foo.trait_deprecated(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated' foo.trait_deprecated_text(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_text': text foo.trait_deprecated_unstable(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable' foo.trait_deprecated_unstable_text(); //~ WARN use of deprecated item 'lint_stability::Trait::trait_deprecated_unstable_text': text foo.trait_unstable(); foo.trait_unstable_text(); foo.trait_stable(); } struct S; impl UnstableTrait for S { } impl DeprecatedTrait for S {} //~ WARN use of deprecated item 'lint_stability::DeprecatedTrait': text trait LocalTrait : UnstableTrait { } trait LocalTrait2 : DeprecatedTrait { } //~ WARN use of deprecated item 'lint_stability::DeprecatedTrait': text impl Trait for S { fn trait_stable(&self) {} fn trait_unstable(&self) {} } } mod inheritance { extern crate inherited_stability; use self::inherited_stability::*; fn test_inheritance() { unstable(); stable(); stable_mod::unstable(); stable_mod::stable(); unstable_mod::deprecated(); //~ WARN use of deprecated item 'inheritance::inherited_stability::unstable_mod::deprecated': text unstable_mod::unstable(); let _ = Unstable::UnstableVariant; let _ = Unstable::StableVariant; let x: usize = 0; x.unstable(); x.stable(); } } mod this_crate { #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub fn deprecated() {} #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub fn deprecated_text() {} #[unstable(feature = "unstable_test_feature", issue = "0")] pub fn unstable() {} #[unstable(feature = "unstable_test_feature", reason = "text", issue = "0")] pub fn unstable_text() {} #[stable(feature = "rust1", since = "1.0.0")] pub fn stable() {} #[stable(feature = "rust1", since = "1.0.0")] pub fn stable_text() {} #[stable(feature = "rust1", since = "1.0.0")] pub struct MethodTester; impl MethodTester { #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub fn method_deprecated(&self) {} #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub fn method_deprecated_text(&self) {} #[unstable(feature = "unstable_test_feature", issue = "0")] pub fn method_unstable(&self) {} #[unstable(feature = "unstable_test_feature", reason = "text", issue = "0")] pub fn method_unstable_text(&self) {} #[stable(feature = "rust1", since = "1.0.0")] pub fn method_stable(&self) {} #[stable(feature = "rust1", since = "1.0.0")] pub fn method_stable_text(&self) {} } pub trait Trait { #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] fn trait_deprecated(&self) {} #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] fn trait_deprecated_text(&self) {} #[unstable(feature = "unstable_test_feature", issue = "0")] fn trait_unstable(&self) {} #[unstable(feature = "unstable_test_feature", reason = "text", issue = "0")] fn trait_unstable_text(&self) {} #[stable(feature = "rust1", since = "1.0.0")] fn trait_stable(&self) {} #[stable(feature = "rust1", since = "1.0.0")] fn trait_stable_text(&self) {} } impl Trait for MethodTester {} #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub struct DeprecatedStruct { #[stable(feature = "stable_test_feature", since = "1.0.0")] i: isize } #[unstable(feature = "unstable_test_feature", issue = "0")] pub struct UnstableStruct { #[stable(feature = "stable_test_feature", since = "1.0.0")] i: isize } #[stable(feature = "rust1", since = "1.0.0")] pub struct StableStruct { #[stable(feature = "stable_test_feature", since = "1.0.0")] i: isize } #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub struct DeprecatedUnitStruct; #[unstable(feature = "unstable_test_feature", issue = "0")] pub struct UnstableUnitStruct; #[stable(feature = "rust1", since = "1.0.0")] pub struct StableUnitStruct; pub enum Enum { #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] DeprecatedVariant, #[unstable(feature = "unstable_test_feature", issue = "0")] UnstableVariant, #[stable(feature = "rust1", since = "1.0.0")] StableVariant, } #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub struct DeprecatedTupleStruct(isize); #[unstable(feature = "unstable_test_feature", issue = "0")] pub struct UnstableTupleStruct(isize); #[stable(feature = "rust1", since = "1.0.0")] pub struct StableTupleStruct(isize); fn test() { // Only the deprecated cases of the following should generate // errors, because other stability attributes now have meaning // only *across* crates, not within a single crate. type Foo = MethodTester; let foo = MethodTester; deprecated(); //~ WARN use of deprecated item 'this_crate::deprecated' foo.method_deprecated(); //~ WARN use of deprecated item 'this_crate::MethodTester::method_deprecated' Foo::method_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::MethodTester::method_deprecated' <Foo>::method_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::MethodTester::method_deprecated' foo.trait_deprecated(); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' Trait::trait_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' <Foo>::trait_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' <Foo as Trait>::trait_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' deprecated_text(); //~ WARN use of deprecated item 'this_crate::deprecated_text': text foo.method_deprecated_text(); //~ WARN use of deprecated item 'this_crate::MethodTester::method_deprecated_text': text Foo::method_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::MethodTester::method_deprecated_text': text <Foo>::method_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::MethodTester::method_deprecated_text': text foo.trait_deprecated_text(); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text Trait::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text <Foo>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text <Foo as Trait>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text unstable(); foo.method_unstable(); Foo::method_unstable(&foo); <Foo>::method_unstable(&foo); foo.trait_unstable(); Trait::trait_unstable(&foo); <Foo>::trait_unstable(&foo); <Foo as Trait>::trait_unstable(&foo); unstable_text(); foo.method_unstable_text(); Foo::method_unstable_text(&foo); <Foo>::method_unstable_text(&foo); foo.trait_unstable_text(); Trait::trait_unstable_text(&foo); <Foo>::trait_unstable_text(&foo); <Foo as Trait>::trait_unstable_text(&foo); stable(); foo.method_stable(); Foo::method_stable(&foo); <Foo>::method_stable(&foo); foo.trait_stable(); Trait::trait_stable(&foo); <Foo>::trait_stable(&foo); <Foo as Trait>::trait_stable(&foo); stable_text(); foo.method_stable_text(); Foo::method_stable_text(&foo); <Foo>::method_stable_text(&foo); foo.trait_stable_text(); Trait::trait_stable_text(&foo); <Foo>::trait_stable_text(&foo); <Foo as Trait>::trait_stable_text(&foo); let _ = DeprecatedStruct { //~^ WARN use of deprecated item 'this_crate::DeprecatedStruct' i: 0 //~ WARN use of deprecated item 'this_crate::DeprecatedStruct::i' }; let _ = UnstableStruct { i: 0 }; let _ = StableStruct { i: 0 }; let _ = DeprecatedUnitStruct; //~ WARN use of deprecated item 'this_crate::DeprecatedUnitStruct' let _ = UnstableUnitStruct; let _ = StableUnitStruct; let _ = Enum::DeprecatedVariant; //~ WARN use of deprecated item 'this_crate::Enum::DeprecatedVariant' let _ = Enum::UnstableVariant; let _ = Enum::StableVariant; let _ = DeprecatedTupleStruct (1); //~ WARN use of deprecated item 'this_crate::DeprecatedTupleStruct' let _ = UnstableTupleStruct (1); let _ = StableTupleStruct (1); } fn test_method_param<Foo: Trait>(foo: Foo) { foo.trait_deprecated(); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' Trait::trait_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' <Foo>::trait_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' <Foo as Trait>::trait_deprecated(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' foo.trait_deprecated_text(); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text Trait::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text <Foo>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text <Foo as Trait>::trait_deprecated_text(&foo); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text foo.trait_unstable(); Trait::trait_unstable(&foo); <Foo>::trait_unstable(&foo); <Foo as Trait>::trait_unstable(&foo); foo.trait_unstable_text(); Trait::trait_unstable_text(&foo); <Foo>::trait_unstable_text(&foo); <Foo as Trait>::trait_unstable_text(&foo); foo.trait_stable(); Trait::trait_stable(&foo); <Foo>::trait_stable(&foo); <Foo as Trait>::trait_stable(&foo); } fn test_method_object(foo: &Trait) { foo.trait_deprecated(); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated' foo.trait_deprecated_text(); //~ WARN use of deprecated item 'this_crate::Trait::trait_deprecated_text': text foo.trait_unstable(); foo.trait_unstable_text(); foo.trait_stable(); } #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] fn test_fn_body() { fn fn_in_body() {} fn_in_body(); //~ WARN use of deprecated item 'this_crate::test_fn_body::fn_in_body': text } impl MethodTester { #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] fn test_method_body(&self) { fn fn_in_body() {} fn_in_body(); //~ WARN use of deprecated item 'this_crate::MethodTester::test_method_body::fn_in_body': text } } #[unstable(feature = "unstable_test_feature", issue = "0")] #[rustc_deprecated(since = "1.0.0", reason = "text")] pub trait DeprecatedTrait { fn dummy(&self) { } } struct S; impl DeprecatedTrait for S { } //~ WARN use of deprecated item 'this_crate::DeprecatedTrait' trait LocalTrait : DeprecatedTrait { } //~ WARN use of deprecated item 'this_crate::DeprecatedTrait' } #[rustc_error] fn main() {} //~ ERROR: compilation successful
51.14557
155
0.67075
8a0899b7e08ee14618740cd360aeabeaaa53ac0f
6,047
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. /// Service config. /// /// /// Service configuration allows for customization of endpoints, region, credentials providers, /// and retry configuration. Generally, it is constructed automatically for you from a shared /// configuration loaded by the `aws-config` crate. For example: /// /// ```ignore /// // Load a shared config from the environment /// let shared_config = aws_config::from_env().load().await; /// // The client constructor automatically converts the shared config into the service config /// let client = Client::new(&shared_config); /// ``` /// /// The service config can also be constructed manually using its builder. /// pub struct Config { pub(crate) retry_config: Option<aws_smithy_types::retry::RetryConfig>, pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>, pub(crate) region: Option<aws_types::region::Region>, pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider, } impl std::fmt::Debug for Config { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut config = f.debug_struct("Config"); config.finish() } } impl Config { /// Constructs a config builder. pub fn builder() -> Builder { Builder::default() } /// Creates a new [service config](crate::Config) from a [shared `config`](aws_types::config::Config). pub fn new(config: &aws_types::config::Config) -> Self { Builder::from(config).build() } /// The signature version 4 service signing name to use in the credential scope when signing requests. /// /// The signing service may be overridden by the `Endpoint`, or by specifying a custom /// [`SigningService`](aws_types::SigningService) during operation construction pub fn signing_service(&self) -> &'static str { "lex" } } /// Builder for creating a `Config`. #[derive(Default)] pub struct Builder { retry_config: Option<aws_smithy_types::retry::RetryConfig>, endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>, region: Option<aws_types::region::Region>, credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>, } impl Builder { /// Constructs a config builder. pub fn new() -> Self { Self::default() } /// Set the retry_config for the builder /// /// # Examples /// ```rust /// use aws_sdk_lexmodelsv2::config::Config; /// use aws_smithy_types::retry::RetryConfig; /// /// let retry_config = RetryConfig::new().with_max_attempts(5); /// let config = Config::builder().retry_config(retry_config).build(); /// ``` pub fn retry_config(mut self, retry_config: aws_smithy_types::retry::RetryConfig) -> Self { self.set_retry_config(Some(retry_config)); self } /// Set the retry_config for the builder /// /// # Examples /// ```rust /// use aws_sdk_lexmodelsv2::config::{Builder, Config}; /// use aws_smithy_types::retry::RetryConfig; /// /// fn disable_retries(builder: &mut Builder) { /// let retry_config = RetryConfig::new().with_max_attempts(1); /// builder.set_retry_config(Some(retry_config)); /// } /// /// let mut builder = Config::builder(); /// disable_retries(&mut builder); /// let config = builder.build(); /// ``` pub fn set_retry_config( &mut self, retry_config: Option<aws_smithy_types::retry::RetryConfig>, ) -> &mut Self { self.retry_config = retry_config; self } // TODO(docs): include an example of using a static endpoint /// Sets the endpoint resolver to use when making requests. pub fn endpoint_resolver( mut self, endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static, ) -> Self { self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver)); self } /// Sets the AWS region to use when making requests. pub fn region(mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self { self.region = region.into(); self } /// Sets the credentials provider for this service pub fn credentials_provider( mut self, credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static, ) -> Self { self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new( credentials_provider, )); self } /// Sets the credentials provider for this service pub fn set_credentials_provider( &mut self, credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>, ) -> &mut Self { self.credentials_provider = credentials_provider; self } /// Builds a [`Config`]. pub fn build(self) -> Config { Config { retry_config: self.retry_config, endpoint_resolver: self .endpoint_resolver .unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())), region: self.region, credentials_provider: self.credentials_provider.unwrap_or_else(|| { aws_types::credentials::SharedCredentialsProvider::new( crate::no_credentials::NoCredentials, ) }), } } } impl From<&aws_types::config::Config> for Builder { fn from(input: &aws_types::config::Config) -> Self { let mut builder = Builder::default(); builder = builder.region(input.region().cloned()); builder.set_retry_config(input.retry_config().cloned()); builder.set_credentials_provider(input.credentials_provider().cloned()); builder } } impl From<&aws_types::config::Config> for Config { fn from(config: &aws_types::config::Config) -> Self { Builder::from(config).build() } }
37.09816
106
0.64594
7ae3c30548e6ef4b8c574d7e42e8d9fca889faa5
7,381
/* * Copyright 2018-2021 Cargill Incorporated * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ----------------------------------------------------------------------------- */ //! This module is based on the Sawtooth Sabre CLI. use std::fs::File; use std::io::{BufReader, Read}; use std::path::Path; use cylinder::{secp256k1::Secp256k1Context, Context, PrivateKey, Signer}; use futures::future::{self, Future}; use futures::stream::Stream; use hyper::{Body, Client, Request, StatusCode}; use sabre_sdk::protocol::{ compute_contract_address, payload::{ CreateContractActionBuilder, CreateContractRegistryActionBuilder, CreateNamespaceRegistryActionBuilder, CreateNamespaceRegistryPermissionActionBuilder, }, }; use scabbard::protocol; use transact::{ protocol::{batch::BatchBuilder, transaction::Transaction}, protos::IntoBytes, }; use super::AppAuthHandlerError; const XO_NAME: &str = "xo"; const XO_VERSION: &str = "0.3.3"; pub const XO_PREFIX: &str = "5b7349"; const XO_CONTRACT_PATH: &str = "/var/lib/gameroomd/xo-tp-rust.wasm"; /// Create and submit the Sabre transactions to setup the XO smart contract. pub fn setup_xo( private_key: &str, scabbard_admin_keys: Vec<String>, splinterd_url: &str, authorization: &str, circuit_id: &str, service_id: &str, ) -> Result<Box<dyn Future<Item = (), Error = ()> + Send + 'static>, AppAuthHandlerError> { let signer = new_signer(private_key)?; // The node with the first key in the list of scabbard admins is responsible for setting up xo let public_key = signer.public_key()?.as_hex(); let is_submitter = match scabbard_admin_keys.get(0) { Some(submitting_key) => &public_key == submitting_key, None => false, }; if !is_submitter { return Ok(Box::new(future::ok(()))); } // Create the transactions and batch them let txns = vec![ create_contract_registry_txn(scabbard_admin_keys.clone(), &*signer)?, upload_contract_txn(&*signer)?, create_xo_namespace_registry_txn(scabbard_admin_keys, &*signer)?, xo_namespace_permissions_txn(&*signer)?, ]; let batch = BatchBuilder::new() .with_transactions(txns) .build(&*signer)?; let payload = vec![batch].into_bytes()?; // Submit the batch to the scabbard service let body_stream = futures::stream::once::<_, std::io::Error>(Ok(payload)); let req = Request::builder() .uri(format!( "{}/scabbard/{}/{}/batches", splinterd_url, circuit_id, service_id )) .method("POST") .header("Authorization", authorization) .header( "SplinterProtocolVersion", protocol::SCABBARD_PROTOCOL_VERSION.to_string(), ) .body(Body::wrap_stream(body_stream)) .map_err(|err| AppAuthHandlerError::BatchSubmit(format!("{}", err)))?; let client = Client::new(); Ok(Box::new( client .request(req) .then(|response| match response { Ok(res) => { let status = res.status(); let body = res .into_body() .concat2() .wait() .map_err(|err| { AppAuthHandlerError::BatchSubmit(format!( "The client encountered an error {}", err )) })? .to_vec(); match status { StatusCode::ACCEPTED => Ok(()), _ => Err(AppAuthHandlerError::BatchSubmit(format!( "The server returned an error. Status: {}, {}", status, String::from_utf8(body)? ))), } } Err(err) => Err(AppAuthHandlerError::BatchSubmit(format!( "The client encountered an error {}", err ))), }) .map_err(|_| ()), )) } fn new_signer(private_key: &str) -> Result<Box<dyn Signer>, AppAuthHandlerError> { let context = Secp256k1Context::new(); let private_key = PrivateKey::new_from_hex(private_key)?; Ok(context.new_signer(private_key)) } fn create_contract_registry_txn( owners: Vec<String>, signer: &dyn Signer, ) -> Result<Transaction, AppAuthHandlerError> { Ok(CreateContractRegistryActionBuilder::new() .with_name(XO_NAME.into()) .with_owners(owners) .into_payload_builder()? .into_transaction_builder()? .build(signer)?) } fn upload_contract_txn(signer: &dyn Signer) -> Result<Transaction, AppAuthHandlerError> { let contract_path = Path::new(XO_CONTRACT_PATH); let contract_file = File::open(contract_path) .map_err(|err| AppAuthHandlerError::Sabre(format!("Failed to load contract: {}", err)))?; let mut buf_reader = BufReader::new(contract_file); let mut contract = Vec::new(); buf_reader.read_to_end(&mut contract).map_err(|err| { AppAuthHandlerError::Sabre(format!("IoError while reading contract: {}", err)) })?; let action_addresses = vec![XO_PREFIX.into()]; Ok(CreateContractActionBuilder::new() .with_name(XO_NAME.into()) .with_version(XO_VERSION.into()) .with_inputs(action_addresses.clone()) .with_outputs(action_addresses) .with_contract(contract) .into_payload_builder()? .into_transaction_builder()? .build(signer)?) } fn create_xo_namespace_registry_txn( owners: Vec<String>, signer: &dyn Signer, ) -> Result<Transaction, AppAuthHandlerError> { Ok(CreateNamespaceRegistryActionBuilder::new() .with_namespace(XO_PREFIX.into()) .with_owners(owners) .into_payload_builder()? .into_transaction_builder()? .build(signer)?) } fn xo_namespace_permissions_txn(signer: &dyn Signer) -> Result<Transaction, AppAuthHandlerError> { Ok(CreateNamespaceRegistryPermissionActionBuilder::new() .with_namespace(XO_PREFIX.into()) .with_contract_name(XO_NAME.into()) .with_read(true) .with_write(true) .into_payload_builder()? .into_transaction_builder()? .build(signer)?) } pub fn get_xo_contract_address() -> Result<String, AppAuthHandlerError> { Ok(bytes_to_hex_str(&compute_contract_address( XO_NAME, XO_VERSION, )?)) } /// Returns a hex string representation of the supplied bytes /// /// # Arguments /// /// * `b` - input bytes fn bytes_to_hex_str(b: &[u8]) -> String { b.iter() .map(|b| format!("{:02x}", b)) .collect::<Vec<_>>() .join("") }
34.013825
98
0.602764
feefefdbceee85788428fed7ad334294a34637bb
1,722
#[doc = "Register `PRO_INTR_STATUS_0` reader"] pub struct R(crate::R<PRO_INTR_STATUS_0_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PRO_INTR_STATUS_0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PRO_INTR_STATUS_0_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PRO_INTR_STATUS_0_SPEC>) -> Self { R(reader) } } #[doc = "Field `PRO_INTR_STATUS_0` reader - "] pub struct PRO_INTR_STATUS_0_R(crate::FieldReader<u32, u32>); impl PRO_INTR_STATUS_0_R { #[inline(always)] pub(crate) fn new(bits: u32) -> Self { PRO_INTR_STATUS_0_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PRO_INTR_STATUS_0_R { type Target = crate::FieldReader<u32, u32>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl R { #[doc = "Bits 0:31"] #[inline(always)] pub fn pro_intr_status_0(&self) -> PRO_INTR_STATUS_0_R { PRO_INTR_STATUS_0_R::new(self.bits as u32) } } #[doc = "\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pro_intr_status_0](index.html) module"] pub struct PRO_INTR_STATUS_0_SPEC; impl crate::RegisterSpec for PRO_INTR_STATUS_0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pro_intr_status_0::R](R) reader structure"] impl crate::Readable for PRO_INTR_STATUS_0_SPEC { type Reader = R; } #[doc = "`reset()` method sets PRO_INTR_STATUS_0 to value 0"] impl crate::Resettable for PRO_INTR_STATUS_0_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.888889
220
0.662021
1192a50286c10d850f20572e20c7a8a45dc7003e
4,848
use crossbeam_utils::thread; use itertools::Itertools; use std::{ io::{Read, Write}, net::TcpListener, path::Path, process::{Command, Stdio}, sync::mpsc, }; type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>; fn main() -> Result<()> { let listener = TcpListener::bind("0.0.0.0:0")?; println!("server listener: {}", listener.local_addr()?); let sig_listener = TcpListener::bind("0.0.0.0:0")?; println!("signal listener: {}", sig_listener.local_addr()?); std::thread::spawn(move || -> std::io::Result<()> { for sig in sig_listener.incoming() { let mut sig = sig?; loop { let n = sig.read(&mut [0; 1])?; if n == 0 { break; } Command::new("fish") .arg("-c") .arg("pkill -2 fish") .spawn()? .wait()?; } } Ok(()) }); let mut input = String::new(); // accept connections and process them serially let listener = listener.incoming().chunks(2); let mut listener = listener.into_iter(); loop { let mut stream = match listener.next() { Some(s) => s, None => break Ok(()), }; let mut stream_write = stream.next().ok_or("client should send this stream")??; let mut stream_read = stream.next().ok_or("client should send this stream")??; stream_write.read_to_string(&mut input)?; dbg!(&input); if input.starts_with("cd") { let dir = input.split_whitespace().nth(1); if let Some(dir) = dir { std::env::set_current_dir(dir)?; } } else if input.starts_with("vim") { let file = Path::new(input.strip_prefix("vim").expect("already checked").trim()); let file_name = file .file_name() .ok_or("Could not read filename")? .to_str() .ok_or("Could not read filename")?; let data = std::fs::read_to_string(&file).unwrap_or_default(); stream_read.write_all(b"?vim")?; stream_read.flush()?; stream_read.write_all(file_name.as_bytes())?; stream_read.write_all(b"???")?; stream_read.write_all(data.as_bytes())?; drop(stream_read); // get result let mut stream = listener.next().ok_or("client should send this stream")?; let mut stream_write = stream.next().ok_or("client should send this stream")??; let _stream_read = stream.next().ok_or("client should send this stream")??; let mut client_data = String::new(); stream_write.read_to_string(&mut client_data)?; std::fs::write(&file, client_data)?; } else { let mut process = Command::new("fish") .arg("-c") .arg(&input) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; let mut stdout = process.stdout.take().expect("stdout is piped"); let mut stderr = process.stderr.take().expect("stderr is piped"); let (tx, rx) = mpsc::channel(); let read_process_and_write_to_stream = |out: &mut dyn Read, tx: mpsc::Sender<Vec<u8>>| -> std::io::Result<bool> { let mut out_buf = [0; 512]; let out_n = out.read(&mut out_buf)?; if out_n != 0 { tx.send(out_buf[..out_n].to_vec()).map_err(|_| { std::io::Error::new( std::io::ErrorKind::Other, "failed to send data to write thread", ) })?; } Ok(out_n == 0) }; thread::scope(move |s| { let tx_c = tx.clone(); s.spawn(move |_| { while let Ok(false) = read_process_and_write_to_stream(&mut stdout, tx_c.clone()) { } }); s.spawn(move |_| { while let Ok(false) = read_process_and_write_to_stream(&mut stderr, tx.clone()) { } }); s.spawn(move |_| -> std::io::Result<()> { while let Ok(data) = rx.recv() { stream_read.write_all(&data)?; } Ok(()) }); }) .map_err(|e| format!("read/write threads failed {:?}", e))?; } input.clear(); } }
36.179104
99
0.459158
38556c03905f3f965cc7101ba7f6df884e46964a
3,171
use alloc::vec::Vec; use std::fmt; use super::lazy_buffer::LazyBuffer; /// An iterator to iterate through all the `n`-length combinations in an iterator, with replacement. /// /// See [`.combinations_with_replacement()`](../trait.Itertools.html#method.combinations_with_replacement) for more information. #[derive(Clone)] pub struct CombinationsWithReplacement<I> where I: Iterator, I::Item: Clone, { k: usize, indices: Vec<usize>, // The current known max index value. This increases as pool grows. max_index: usize, pool: LazyBuffer<I>, first: bool, } impl<I> fmt::Debug for CombinationsWithReplacement<I> where I: Iterator + fmt::Debug, I::Item: fmt::Debug + Clone, { debug_fmt_fields!(Combinations, k, indices, max_index, pool, first); } impl<I> CombinationsWithReplacement<I> where I: Iterator, I::Item: Clone, { /// Map the current mask over the pool to get an output combination fn current(&self) -> Vec<I::Item> { self.indices.iter().map(|i| self.pool[*i].clone()).collect() } } /// Create a new `CombinationsWithReplacement` from a clonable iterator. pub fn combinations_with_replacement<I>(iter: I, k: usize) -> CombinationsWithReplacement<I> where I: Iterator, I::Item: Clone, { let indices: Vec<usize> = alloc::vec![0; k]; let pool: LazyBuffer<I> = LazyBuffer::new(iter); CombinationsWithReplacement { k, indices, max_index: 0, pool, first: true, } } impl<I> Iterator for CombinationsWithReplacement<I> where I: Iterator, I::Item: Clone, { type Item = Vec<I::Item>; fn next(&mut self) -> Option<Self::Item> { // If this is the first iteration, return early if self.first { // In empty edge cases, stop iterating immediately return if self.k != 0 && !self.pool.get_next() { None // Otherwise, yield the initial state } else { self.first = false; Some(self.current()) }; } // Check if we need to consume more from the iterator // This will run while we increment our first index digit if self.pool.get_next() { self.max_index = self.pool.len() - 1; } // Work out where we need to update our indices let mut increment: Option<(usize, usize)> = None; for (i, indices_int) in self.indices.iter().enumerate().rev() { if indices_int < &self.max_index { increment = Some((i, indices_int + 1)); break; } } match increment { // If we can update the indices further Some((increment_from, increment_value)) => { // We need to update the rightmost non-max value // and all those to the right for indices_index in increment_from..self.indices.len() { self.indices[indices_index] = increment_value } Some(self.current()) } // Otherwise, we're done None => None, } } }
29.091743
128
0.585304
f5fbe08b9988dbc2792c5a985755dfe85c98c2e9
4,344
use core::result::Result; use reqwest::Error; use serde_derive::Deserialize; #[derive(Debug)] pub struct Blockchain {} #[derive(Deserialize, Debug)] pub struct BlockchainInfo { chain: String, blocks: u32, headers: u32, bestblockhash: String, difficulty: f64, mediantime: u32, verificationprogress: f32, chainwork: String, size_on_disk: u64, pruned: bool, warnings: String, softforks: Vec<Softfork>, } #[derive(Deserialize, Debug)] pub struct Softfork { id: String, version: u8, } #[derive(Deserialize, Debug)] pub struct BlockHeader { hash: String, confirmations: u32, height: u32, version: u32, versionHex: String, merkleroot: String, time: u32, mediantime: u32, nonce: u32, bits: String, difficulty: f32, chainwork: String, previousblockhash: String, nextblockhash: String, } #[derive(Deserialize, Debug)] pub struct ChainTips { chaintips: Vec<ChainTip>, } #[derive(Deserialize, Debug)] pub struct ChainTip { height: u32, hash: String, branchlen: u32, status: String, } #[derive(Deserialize, Debug)] pub struct QueryError { error: String, } #[derive(Deserialize, Debug)] pub struct MempoolInfo { size: u32, bytes: u32, usage: u32, maxmempool: u32, mempoolminfee: u32, } #[derive(Deserialize, Debug)] pub struct RawMempool {} impl Blockchain { pub fn get_best_block_hash() -> Result<String, Error> { let url: String = format!("{}blockchain/getBestBlockHash", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let block_hash: String = reqwest::get(s_slice)?.json()?; Ok(block_hash) } pub fn get_blockchain_info() -> Result<BlockchainInfo, Error> { let url: String = format!("{}blockchain/getBlockchainInfo", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let blockchain_info: BlockchainInfo = reqwest::get(s_slice)?.json()?; Ok(blockchain_info) } pub fn get_block_count() -> Result<u32, Error> { let url: String = format!("{}blockchain/getBlockCount", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let block_count: u32 = reqwest::get(s_slice)?.json()?; Ok(block_count) } pub fn get_block_header(block_hash: &str) -> Result<String, Error> { // TODO: Add query string params let url: String = format!( "{}blockchain/getBlockHeader/{}", crate::MAINNET_BASE_URL, block_hash ); let s_slice: &str = &url[..]; let block_header: String = reqwest::get(s_slice)?.json()?; Ok(block_header) } pub fn get_chain_tips() -> Result<ChainTips, Error> { // TODO: Get this working properly let url: String = format!("{}blockchain/getChainTips", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let chain_tips: ChainTips = reqwest::get(s_slice)?.json()?; Ok(chain_tips) } pub fn get_difficulty() -> Result<f32, Error> { let url: String = format!("{}blockchain/getDifficulty", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let difficulty: f32 = reqwest::get(s_slice)?.json()?; Ok(difficulty) } pub fn get_mempool_entry(txid: &str) -> Result<QueryError, Error> { // TODO: Add query string and match for Ok/Err let url: String = format!( "{}blockchain/getMempoolEntry/{}", crate::MAINNET_BASE_URL, txid ); let s_slice: &str = &url[..]; let mempool_entry: QueryError = reqwest::get(s_slice)?.json()?; Ok(mempool_entry) } pub fn get_mempool_info() -> Result<MempoolInfo, Error> { let url: String = format!("{}blockchain/getMempoolInfo", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let mempool_info: MempoolInfo = reqwest::get(s_slice)?.json()?; Ok(mempool_info) } pub fn get_raw_mempool() -> Result<RawMempool, Error> { // TODO: Add query string and match for Ok/Err let url: String = format!("{}blockchain/getMempoolInfo", crate::MAINNET_BASE_URL); let s_slice: &str = &url[..]; let raw_mempool: RawMempool = reqwest::get(s_slice)?.json()?; Ok(raw_mempool) } }
28.207792
93
0.615331
ed76b780df0bd000816599543f3bc408934448ab
3,886
mod semary; mod shared_mem; pub use self::semary::*; pub use self::shared_mem::*; use crate::memory::GlobalFrameAlloc; use crate::sync::SpinLock as Mutex; use alloc::collections::BTreeMap; use alloc::sync::Arc; use rcore_memory::memory_set::handler::{Shared, SharedGuard}; use rcore_memory::{PhysAddr, VirtAddr, PAGE_SIZE}; /// Semaphore table in a process #[derive(Default)] pub struct SemProc { /// Semaphore arrays arrays: BTreeMap<SemId, Arc<SemArray>>, /// Undo operations when process terminates undos: BTreeMap<(SemId, SemNum), SemOp>, } // TODO: Remove hack #[derive(Default)] pub struct ShmProc { shm_identifiers: BTreeMap<ShmId, ShmIdentifier>, } /// Semaphore set identifier (in a process) type SemId = usize; type ShmId = usize; /// Semaphore number (in an array) type SemNum = u16; /// Semaphore operation value type SemOp = i16; impl SemProc { /// Insert the `array` and return its ID pub fn add(&mut self, array: Arc<SemArray>) -> SemId { let id = self.get_free_id(); self.arrays.insert(id, array); id } /// Remove an `array` by ID pub fn remove(&mut self, id: SemId) { self.arrays.remove(&id); } /// Get a free ID fn get_free_id(&self) -> SemId { (0..).find(|i| self.arrays.get(i).is_none()).unwrap() } /// Get an semaphore set by `id` pub fn get(&self, id: SemId) -> Option<Arc<SemArray>> { self.arrays.get(&id).map(|a| a.clone()) } /// Add an undo operation pub fn add_undo(&mut self, id: SemId, num: SemNum, op: SemOp) { let old_val = *self.undos.get(&(id, num)).unwrap_or(&0); let new_val = old_val - op; self.undos.insert((id, num), new_val); } } /// Fork the semaphore table. Clear undo info. impl Clone for SemProc { fn clone(&self) -> Self { SemProc { arrays: self.arrays.clone(), undos: BTreeMap::default(), } } } /// Auto perform semaphores undo on drop impl Drop for SemProc { fn drop(&mut self) { for (&(id, num), &op) in self.undos.iter() { debug!("semundo: id: {}, num: {}, op: {}", id, num, op); let sem_array = self.arrays[&id].clone(); let sem = &sem_array[num as usize]; match op { 1 => sem.release(), 0 => {} _ => unimplemented!("Semaphore: semundo.(Not 1)"), } } } } impl ShmProc { /// Insert the `SharedGuard` and return its ID pub fn add(&mut self, shared_guard: Arc<spin::Mutex<SharedGuard<GlobalFrameAlloc>>>) -> ShmId { let id = self.get_free_id(); let shm_identifier = ShmIdentifier { addr: 0, shared_guard: shared_guard, }; self.shm_identifiers.insert(id, shm_identifier); id } /// Get a free ID fn get_free_id(&self) -> ShmId { (0..) .find(|i| self.shm_identifiers.get(i).is_none()) .unwrap() } /// Get an semaphore set by `id` pub fn get(&self, id: ShmId) -> Option<ShmIdentifier> { self.shm_identifiers.get(&id).map(|a| a.clone()) } /// Used to set Virtual Addr pub fn set(&mut self, id: ShmId, shm_id: ShmIdentifier) { self.shm_identifiers.insert(id, shm_id); } /// get id from virtaddr pub fn get_id(&self, addr: VirtAddr) -> Option<ShmId> { for (key, value) in &self.shm_identifiers { if value.addr == addr { return Some(*key); } } None } /// Pop Shared Area pub fn pop(&mut self, id: ShmId) { self.shm_identifiers.remove(&id); } } /// Fork the semaphore table. Clear undo info. impl Clone for ShmProc { fn clone(&self) -> Self { ShmProc { shm_identifiers: self.shm_identifiers.clone(), } } }
26.256757
99
0.572568
1e78b93e7b96c651fc5ae73ac37a7e4a7e4659c6
6,673
use std::mem::MaybeUninit; use std::os::unix::io::RawFd; use std::{io, ptr}; use log::error; use mio::unix::SourceFd; use mio::{event, Interest, Registry, Token}; use crate::{Signal, SignalSet}; use super::{from_raw_signal, raw_signal}; /// Signaler backed that uses `kqueue(2)`'s `EVFILT_SIGNAL`. /// /// # Implementation notes /// /// We crate a new `kqueue` which we register with the `kqueue` in `Poll`, so we /// can received signals by calling `receive` instead of returning them as /// `Event`s when calling `Poll::poll` to match the API provided by the /// `signalfd` implementation. /// /// We set the signal handler to ignore the signal (not blocking them like in /// the signalfd implementation) to ensure the signal doesn't grow endlessly. #[derive(Debug)] pub struct Signals { /// `kqueue(2)` file descriptor. kq: RawFd, /// All signals this is listening for, used in resetting the signal handlers. signals: SignalSet, } impl Signals { pub fn new(signals: SignalSet) -> io::Result<Signals> { new_kqueue() .map(|kq| Signals { kq, signals }) .and_then(|kq| register_signals(kq.kq, signals).map(|()| kq)) .and_then(|kq| ignore_signals(signals).map(|()| kq)) } pub fn receive(&mut self) -> io::Result<Option<Signal>> { let mut kevent: MaybeUninit<libc::kevent> = MaybeUninit::uninit(); // No blocking. let timeout = libc::timespec { tv_sec: 0, tv_nsec: 0, }; let n_events = unsafe { libc::kevent(self.kq, ptr::null(), 0, kevent.as_mut_ptr(), 1, &timeout) }; match n_events { -1 => Err(io::Error::last_os_error()), 0 => Ok(None), // No signals. 1 => { // This is safe because `kevent` ensures that the event is // initialised. let kevent = unsafe { kevent.assume_init() }; // Should never happen, but just in case. let filter = kevent.filter; // Can't create ref to packed struct. debug_assert_eq!(filter, libc::EVFILT_SIGNAL); // This should never return `None` as we control the signals we // register for, which is always defined in terms of `Signal`. Ok(from_raw_signal(kevent.ident as libc::c_int)) } _ => unreachable!("unexpected number of events"), } } } fn new_kqueue() -> io::Result<RawFd> { let kq = unsafe { libc::kqueue() }; if kq == -1 { Err(io::Error::last_os_error()) } else { Ok(kq) } } fn register_signals(kq: RawFd, signals: SignalSet) -> io::Result<()> { // For each signal create an kevent to indicate we want events for // those signals. let mut changes: [MaybeUninit<libc::kevent>; SignalSet::all().len()] = [MaybeUninit::uninit(); SignalSet::all().len()]; let mut n_changes = 0; for signal in signals { changes[n_changes] = MaybeUninit::new(libc::kevent { ident: raw_signal(signal) as libc::uintptr_t, filter: libc::EVFILT_SIGNAL, flags: libc::EV_ADD, fflags: 0, data: 0, udata: 0 as _, }); n_changes += 1; } let ok = unsafe { libc::kevent( kq, changes[0].as_ptr(), n_changes as _, ptr::null_mut(), 0, ptr::null(), ) }; if ok == -1 { // EINTR is the only error that we can handle, but according to // the man page of FreeBSD: "When kevent() call fails with EINTR // error, all changes in the changelist have been applied", so // we're done. // // EOPNOTSUPP (NetBSD only), // EACCESS, EFAULT, ENOMEM: can't handle. // // EBADF, EINVAL, // ENOENT, and ESRCH: all have to do with invalid arguments, // which shouldn't happen. let err = io::Error::last_os_error(); match err.raw_os_error() { Some(libc::EINTR) => Ok(()), _ => Err(err), } } else { Ok(()) } } /// Ignore all signals in the `signals` set. fn ignore_signals(signals: SignalSet) -> io::Result<()> { sigaction(signals, libc::SIG_IGN) } /// Inverse of `ignore_signals`, resetting all signal handlers to the default. fn unignore_signals(signals: SignalSet) -> io::Result<()> { sigaction(signals, libc::SIG_DFL) } /// Call `sigaction` for each signal in `signals`, using `action` as signal /// handler. fn sigaction(signals: SignalSet, action: libc::sighandler_t) -> io::Result<()> { let action = libc::sigaction { sa_sigaction: action, sa_mask: empty_sigset()?, sa_flags: 0, }; for signal in signals { if unsafe { libc::sigaction(raw_signal(signal), &action, ptr::null_mut()) } == -1 { return Err(io::Error::last_os_error()); } } Ok(()) } /// Create an empty `sigset_t`. fn empty_sigset() -> io::Result<libc::sigset_t> { let mut set: MaybeUninit<libc::sigset_t> = MaybeUninit::uninit(); if unsafe { libc::sigemptyset(set.as_mut_ptr()) } == -1 { Err(io::Error::last_os_error()) } else { // This is safe because `sigemptyset` ensures `set` is initialised. Ok(unsafe { set.assume_init() }) } } impl event::Source for Signals { fn register( &mut self, registry: &Registry, token: Token, interests: Interest, ) -> io::Result<()> { SourceFd(&self.kq).register(registry, token, interests) } fn reregister( &mut self, registry: &Registry, token: Token, interests: Interest, ) -> io::Result<()> { SourceFd(&self.kq).reregister(registry, token, interests) } fn deregister(&mut self, registry: &Registry) -> io::Result<()> { SourceFd(&self.kq).deregister(registry) } } impl Drop for Signals { fn drop(&mut self) { // Reverse the ignoring of signals. if let Err(err) = unignore_signals(self.signals) { error!("error resetting signal action: {}", err); } if unsafe { libc::close(self.kq) } == -1 { // Possible errors: // - EBADF, EIO: can't recover. // - EINTR: could try again but we're can't be sure if the file // descriptor was closed or not, so to be safe we don't // close it again. let err = io::Error::last_os_error(); error!("error closing Signals: {}", err); } } }
32.236715
95
0.562116
acbfe027fc1c714adfa7e3f1421f85e7b9526ae0
8,142
use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::path::{Path, PathBuf}; use std::process::{exit, Command, Stdio}; use structopt::StructOpt; use toml::Value; use log::{error, info, warn}; const PROGRESS_FLAG: &str = "--info=progress2"; #[derive(StructOpt, Debug)] #[structopt(name = "cargo-remote", bin_name = "cargo")] enum Opts { #[structopt(name = "remote")] Remote { #[structopt(short = "r", long = "remote", help = "Remote ssh build server")] remote: Option<String>, #[structopt( short = "b", long = "build-env", help = "Set remote environment variables. RUST_BACKTRACE, CC, LIB, etc. ", default_value = "RUST_BACKTRACE=1" )] build_env: String, #[structopt( short = "d", long = "rustup-default", help = "Rustup default (stable|beta|nightly)", default_value = "stable" )] rustup_default: String, #[structopt( short = "e", long = "env", help = "Environment profile. default_value = /etc/profile", default_value = "/etc/profile" )] env: String, #[structopt( short = "c", long = "copy-back", help = "Transfer the target folder or specific file from that folder back to the local machine" )] copy_back: Option<Option<String>>, #[structopt( long = "no-copy-lock", help = "don't transfer the Cargo.lock file back to the local machine" )] no_copy_lock: bool, #[structopt( long = "manifest-path", help = "Path to the manifest to execute", default_value = "Cargo.toml", parse(from_os_str) )] manifest_path: PathBuf, #[structopt( short = "h", long = "transfer-hidden", help = "Transfer hidden files and directories to the build server" )] hidden: bool, #[structopt(help = "cargo command that will be executed remotely")] command: String, #[structopt( help = "cargo options and flags that will be applied remotely", name = "remote options" )] options: Vec<String>, }, } /// Tries to parse the file [`config_path`]. Logs warnings and returns [`None`] if errors occur /// during reading or parsing, [`Some(Value)`] otherwise. fn config_from_file(config_path: &Path) -> Option<Value> { let config_file = std::fs::read_to_string(config_path) .map_err(|e| { warn!( "Can't parse config file '{}' (error: {})", config_path.to_string_lossy(), e ); }) .ok()?; let value = config_file .parse::<Value>() .map_err(|e| { warn!( "Can't parse config file '{}' (error: {})", config_path.to_string_lossy(), e ); }) .ok()?; Some(value) } fn main() { simple_logger::init().unwrap(); let Opts::Remote { remote, build_env, rustup_default, env, copy_back, no_copy_lock, manifest_path, hidden, command, options, } = Opts::from_args(); let mut metadata_cmd = cargo_metadata::MetadataCommand::new(); metadata_cmd.manifest_path(manifest_path).no_deps(); let project_metadata = metadata_cmd.exec().unwrap(); let project_dir = project_metadata.workspace_root; info!("Project dir: {:?}", project_dir); let configs = vec![ config_from_file(&project_dir.join(".cargo-remote.toml")), xdg::BaseDirectories::with_prefix("cargo-remote") .ok() .and_then(|base| base.find_config_file("cargo-remote.toml")) .and_then(|p: PathBuf| config_from_file(&p)), ]; // TODO: move Opts::Remote fields into own type and implement complete_from_config(&mut self, config: &Value) let build_server = remote .or_else(|| { configs .into_iter() .flat_map(|config| config.and_then(|c| c["remote"].as_str().map(String::from))) .next() }) .unwrap_or_else(|| { error!("No remote build server was defined (use config file or --remote flag)"); exit(-3); }); // generate a unique build path by using the hashed project dir as folder on the remote machine let mut hasher = DefaultHasher::new(); project_dir.hash(&mut hasher); let build_path = format!("~/remote-builds/{}/", hasher.finish()); info!("Transferring sources to build server."); // transfer project to build server let mut rsync_to = Command::new("rsync"); rsync_to .arg("-a".to_owned()) .arg("--delete") .arg("-zz") .arg("--info=progress2") .arg("--exclude") .arg("target"); if !hidden { rsync_to.arg("--exclude").arg(".*"); } rsync_to .arg("--rsync-path") .arg("mkdir -p remote-builds && rsync") .arg(format!("{}/", project_dir.to_string_lossy())) .arg(format!("{}:{}", build_server, build_path)) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .stdin(Stdio::inherit()) .output() .unwrap_or_else(|e| { error!("Failed to transfer project to build server (error: {})", e); exit(-4); }); info!("Build ENV: {:?}", build_env); info!("Environment profile: {:?}", env); info!("Build path: {:?}", build_path); let build_command = format!( "source {}; rustup default {}; cd {}; {} cargo {} {}", env, rustup_default, build_path, build_env, command, options.join(" ") ); info!("Starting build process."); let output = Command::new("ssh") .arg("-t") .arg(&build_server) .arg(build_command) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .stdin(Stdio::inherit()) .output() .unwrap_or_else(|e| { error!("Failed to run cargo command remotely (error: {})", e); exit(-5); }); if let Some(file_name) = copy_back { info!("Transferring artifacts back to client."); let file_name = file_name.unwrap_or_else(String::new); Command::new("rsync") .arg("-a") .arg("--delete") .arg("-zz") .arg("--info=progress2") .arg(format!( "{}:{}/target/{}", build_server, build_path, file_name )) .arg(format!( "{}/target/{}", project_dir.to_string_lossy(), file_name )) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .stdin(Stdio::inherit()) .output() .unwrap_or_else(|e| { error!( "Failed to transfer target back to local machine (error: {})", e ); exit(-6); }); } if !no_copy_lock { info!("Transferring Cargo.lock file back to client."); Command::new("rsync") .arg("-a") .arg("--delete") .arg("-zz") .arg("--info=progress2") .arg(format!("{}:{}/Cargo.lock", build_server, build_path)) .arg(format!("{}/Cargo.lock", project_dir.to_string_lossy())) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .stdin(Stdio::inherit()) .output() .unwrap_or_else(|e| { error!( "Failed to transfer Cargo.lock back to local machine (error: {})", e ); exit(-7); }); } if !output.status.success() { exit(output.status.code().unwrap_or(1)) } }
30.267658
113
0.509948
fb70d51c75f011ee3d4b5a3f79788f03cde78d86
4,623
use volatile::Volatile; use core::fmt; #[macro_export] macro_rules! print { ($($arg:tt)*) => ($crate::vga_buffer::print(format_args!($($arg)*))); } #[macro_export] macro_rules! println { () => (print!("\n")); ($fmt:expr) => (print!(concat!($fmt, "\n"))); ($fmt:expr, $($arg:tt)*) => (print!(concat!($fmt, "\n"), $($arg)*)); } /// Prints the given formatted string to the VGA text buffer /// through the global `WRITER` instance. pub fn print(args: fmt::Arguments) { use core::fmt::Write; use x86_64::instructions::interrupts; interrupts::without_interrupts(|| { WRITER.lock().write_fmt(args).unwrap(); }); } #[allow(dead_code)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(u8)] pub enum Color { Black = 0, Blue = 1, Green = 2, Cyan = 3, Red = 4, Magenta = 5, Brown = 6, LightGray = 7, DarkGray = 8, LightBlue = 9, LightGreen = 10, LightCyan = 11, RightRed = 12, Pink = 13, Yellow = 14, White = 15, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] struct ColorCode(u8); impl ColorCode { fn new(foreground: Color, background: Color) -> ColorCode { ColorCode((background as u8) << 4 | (foreground as u8)) } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[repr(C)] struct ScreenChar { ascii_character: u8, color_code: ColorCode, } const BUFFER_HEIGHT: usize = 25; const BUFFER_WIDTH: usize = 80; struct Buffer { chars: [[Volatile<ScreenChar>; BUFFER_WIDTH]; BUFFER_HEIGHT], } use spin::Mutex; lazy_static! { pub static ref WRITER: Mutex<Writer> = Mutex::new(Writer { column_position: 0, color_code: ColorCode::new(Color::Yellow, Color::Black), buffer: unsafe { &mut *(0xb8000 as *mut Buffer) }, }); } pub struct Writer { column_position: usize, color_code: ColorCode, buffer: &'static mut Buffer, } impl Writer { pub fn write_string(&mut self, s: &str) { for byte in s.bytes() { match byte { // printable ASCII byte or newline 0x20...0x7e | b'\n' => self.write_byte(byte), // not part of printable ASCII range _ => self.write_byte(0xfe), } } } pub fn write_byte(&mut self, byte: u8) { match byte { b'\n' => self.new_line(), byte => { if self.column_position >= BUFFER_WIDTH { self.new_line(); } let row = BUFFER_HEIGHT - 1; let col = self.column_position; let color_code = self.color_code; self.buffer.chars[row][col].write(ScreenChar { ascii_character: byte, color_code: color_code, }); self.column_position += 1; } } } fn new_line(&mut self) { for row in 1..BUFFER_HEIGHT { for col in 0..BUFFER_WIDTH { let character = self.buffer.chars[row][col].read(); self.buffer.chars[row - 1][col].write(character); } } self.clear_row(BUFFER_HEIGHT - 1); self.column_position = 0; } fn clear_row(&mut self, row: usize) { let blank = ScreenChar { ascii_character: b' ', color_code: self.color_code, }; for col in 0..BUFFER_WIDTH { self.buffer.chars[row][col].write(blank); } } } impl fmt::Write for Writer { fn write_str(&mut self, s: &str) -> fmt::Result { self.write_string(s); Ok(()) } } #[cfg(test)] mod test { use super::*; fn construct_writer() -> Writer { use std::boxed::Box; let buffer = construct_buffer(); Writer { column_position: 0, color_code: ColorCode::new(Color::Blue, Color::Magenta), buffer: Box::leak(Box::new(buffer)), } } fn construct_buffer() -> Buffer { use array_init::array_init; Buffer { chars: array_init(|_| array_init(|_| Volatile::new(empty_char()))), } } fn empty_char() -> ScreenChar { ScreenChar { ascii_character: b' ', color_code: ColorCode::new(Color::Green, Color::Brown), } } #[test] fn write_byte() { let mut writer = construct_writer(); writer.write_byte(b'X'); writer.write_byte(b'Y'); for (i, row) in writer.buffer.chars.iter().enumerate() { for (j, screen_char) in row.iter().enumerate() { let screen_char = screen_char.read(); if i == BUFFER_HEIGHT - 1 && j == 0 { assert_eq!(screen_char.ascii_character, b'X'); assert_eq!(screen_char.color_code, writer.color_code); } else if i == BUFFER_HEIGHT - 1 && j == 1 { assert_eq!(screen_char.ascii_character, b'Y'); assert_eq!(screen_char.color_code, writer.color_code); } else { assert_eq!(screen_char, empty_char()); } } } } }
22.886139
73
0.59615
1acb60e817298bc2860e790592b76edfd92deb64
3,277
use cm_fidl_translator; use failure::Error; use fidl_fuchsia_data as fd; use fidl_fuchsia_sys2::{ ChildDecl, ChildRef, CollectionDecl, CollectionRef, ComponentDecl, Durability, ExposeDecl, ExposeDirectoryDecl, OfferDecl, OfferServiceDecl, RealmRef, Ref, SelfRef, StartupMode, UseDecl, UseServiceDecl, }; use std::fs::File; use std::io::Read; use std::path::PathBuf; fn main() { let cm_content = read_cm("/pkg/meta/example.cm").expect("could not open example.cm"); let golden_cm = read_cm("/pkg/data/golden.cm").expect("could not open golden.cm"); assert_eq!(&cm_content, &golden_cm); let cm_decl = cm_fidl_translator::translate(&cm_content).expect("could not translate cm"); let expected_decl = { let program = fd::Dictionary { entries: vec![fd::Entry { key: "binary".to_string(), value: Some(Box::new(fd::Value::Str("bin/example".to_string()))), }], }; let uses = vec![UseDecl::Service(UseServiceDecl { source: Some(Ref::Realm(RealmRef {})), source_path: Some("/fonts/CoolFonts".to_string()), target_path: Some("/svc/fuchsia.fonts.Provider".to_string()), })]; let exposes = vec![ExposeDecl::Directory(ExposeDirectoryDecl { source: Some(Ref::Self_(SelfRef {})), source_path: Some("/volumes/blobfs".to_string()), target_path: Some("/volumes/blobfs".to_string()), })]; let offers = vec![OfferDecl::Service(OfferServiceDecl { source: Some(Ref::Child(ChildRef { name: "logger".to_string(), collection: None, })), source_path: Some("/svc/fuchsia.logger.Log".to_string()), target: Some(Ref::Collection(CollectionRef { name: "modular".to_string() })), target_path: Some("/svc/fuchsia.logger.Log".to_string()), })]; let children = vec![ChildDecl { name: Some("logger".to_string()), url: Some("fuchsia-pkg://fuchsia.com/logger/stable#meta/logger.cm".to_string()), startup: Some(StartupMode::Lazy), }]; let collections = vec![CollectionDecl { name: Some("modular".to_string()), durability: Some(Durability::Persistent), }]; let facets = fd::Dictionary { entries: vec![ fd::Entry { key: "author".to_string(), value: Some(Box::new(fd::Value::Str("Fuchsia".to_string()))), }, fd::Entry { key: "year".to_string(), value: Some(Box::new(fd::Value::Inum(2018))) }, ], }; // TODO: test storage ComponentDecl { program: Some(program), uses: Some(uses), exposes: Some(exposes), offers: Some(offers), children: Some(children), collections: Some(collections), facets: Some(facets), storage: None, } }; assert_eq!(cm_decl, expected_decl); } fn read_cm(file: &str) -> Result<String, Error> { let mut buffer = String::new(); let path = PathBuf::from(file); File::open(&path)?.read_to_string(&mut buffer)?; Ok(buffer) }
39.011905
100
0.569423
f869102406c55ee683a6ac149b45c6cc6e4f01ec
2,011
use crate::elementwise::elementwise_single::{UnaryElementwise, UnaryFunc}; use alumina_core::{ base_ops::OpSpecification, errors::{GradientError, OpBuildError}, grad::GradientContext, graph::{Node, NodeID}, }; /// Returns the sign of the input. /// /// The output node has the same shape as the input. pub fn sign<I>(input: I) -> Result<Node, OpBuildError> where I: Into<Node>, { let input = input.into(); let output = input .graph() .new_node(input.shape()) .set_name_unique(&format!("sign({})", input)); let _op = Sign::new_default(input, output.clone()).build()?; Ok(output) } pub type Sign = UnaryElementwise<SignFunc>; #[derive(Clone, Debug, Default)] pub struct SignFunc {} impl UnaryFunc for SignFunc { #[inline] fn calc(&self, input: f32) -> f32 { if input.is_nan() || input == 0.0 { 0.0 } else { input.signum() } } fn type_name(&self) -> &'static str { "Sign" } fn grad(&self, _ctx: &mut GradientContext, _input: &NodeID, _output: &NodeID) -> Result<(), GradientError> { Ok(()) } } #[cfg(test)] mod tests { use super::sign; use alumina_core::graph::Node; use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose}; use indexmap::indexset; use ndarray::arr0; #[test] fn forward_test() { let input = Node::new(&[13, 33]).set_name("input"); let output = sign(&input).unwrap(); input.set_value(arr0(1.25)); assert!(output .calc() .unwrap() .all_relatively_close(&arr0(1.0), ::std::f32::EPSILON)); input.set_value(arr0(-0.8)); assert!(output .calc() .unwrap() .all_relatively_close(&arr0(-1.0), ::std::f32::EPSILON)); input.set_value(arr0(-0.0)); assert!(output .calc() .unwrap() .all_relatively_close(&arr0(0.0), ::std::f32::EPSILON)); } #[test] fn grad_numeric_test() { let input = Node::new(&[13, 33]).set_name("input"); let output = sign(&input).unwrap(); GradNumericTest::new(&output, &indexset![&input]) .expect_zero(&input, ::std::f32::EPSILON) .run(); } }
21.623656
109
0.647936
e2c43a527e7a6e0b6f0c9d3f0e541d8202a10a10
8,479
// //! Copyright 2020 Alibaba Group Holding Limited. //! //! Licensed under the Apache License, Version 2.0 (the "License"); //! you may not use this file except in compliance with the License. //! You may obtain a copy of the License at //! //! http://www.apache.org/licenses/LICENSE-2.0 //! //! Unless required by applicable law or agreed to in writing, software //! distributed under the License is distributed on an "AS IS" BASIS, //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //! See the License for the specific language governing permissions and //! limitations under the License. use std::any::TypeId; use std::fmt::Debug; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Instant; use pegasus_executor::{Task, TaskState}; use crate::api::primitive::source::Source; use crate::channel_id::ChannelId; use crate::communication::output::{OutputBuilder, OutputBuilderImpl}; use crate::dataflow::{Dataflow, DataflowBuilder}; use crate::errors::{BuildJobError, JobExecError}; use crate::event::emitter::EventEmitter; use crate::event::Event; use crate::graph::Port; use crate::progress::{EndSignal, Weight}; use crate::resource::{KeyedResources, ResourceMap}; use crate::result::ResultSink; use crate::schedule::Schedule; use crate::{Data, JobConf, Tag, WorkerId}; pub struct Worker<D: Data, T: Debug + Send + 'static> { pub conf: Arc<JobConf>, pub id: WorkerId, task: WorkerTask, peer_guard: Arc<AtomicUsize>, start: Instant, sink: ResultSink<T>, resources: ResourceMap, keyed_resources: KeyedResources, _ph: std::marker::PhantomData<D>, } impl<D: Data, T: Debug + Send + 'static> Worker<D, T> { pub(crate) fn new( conf: &Arc<JobConf>, id: WorkerId, peer_guard: &Arc<AtomicUsize>, sink: ResultSink<T>, ) -> Self { if peer_guard.fetch_add(1, Ordering::SeqCst) == 0 { pegasus_memory::alloc::new_task(conf.job_id as usize); } Worker { conf: conf.clone(), id, task: WorkerTask::Empty, peer_guard: peer_guard.clone(), start: Instant::now(), sink, resources: ResourceMap::default(), keyed_resources: KeyedResources::default(), _ph: std::marker::PhantomData, } } pub fn dataflow<F>(&mut self, func: F) -> Result<(), BuildJobError> where F: FnOnce(&mut Source<D>, ResultSink<T>) -> Result<(), BuildJobError>, { // set current worker's id into tls variable to make it accessible at anywhere; let _g = crate::worker_id::guard(self.id); let resource = crate::communication::build_channel::<Event>(ChannelId::new(self.id.job_id, 0), &self.conf)?; assert_eq!(resource.ch_id.index, 0); let (tx, rx) = resource.take(); let event_emitter = EventEmitter::new(tx); let dfb = DataflowBuilder::new(self.id, event_emitter.clone(), &self.conf); let root_builder = OutputBuilderImpl::new( Port::new(0, 0), 0, self.conf.batch_size as usize, self.conf.batch_capacity, self.conf.scope_capacity, ); let mut input = Source::new(root_builder.copy_data(), &dfb); let output = self.sink.clone(); func(&mut input, output)?; let mut sch = Schedule::new(event_emitter, rx); let df = dfb.build(&mut sch)?; self.task = WorkerTask::Dataflow(df, sch); let root = Box::new(root_builder) .build() .expect("no output;"); let end = EndSignal::new(Tag::Root, Weight::all()); root.notify_end(end).ok(); root.close().ok(); Ok(()) } pub fn add_resource<R: Send + Sync + 'static>(&mut self, resource: R) { let type_id = TypeId::of::<R>(); self.resources .insert(type_id, Box::new(resource)); } pub fn add_resource_with_key<R: Send + Sync + 'static>(&mut self, key: String, resource: R) { self.keyed_resources .insert(key, Box::new(resource)); } fn check_cancel(&self) -> bool { // TODO: check cancel impl; false } #[cfg(not(feature = "mem"))] fn release(&mut self) { self.peer_guard.fetch_sub(1, Ordering::SeqCst); } } enum WorkerTask { Empty, Dataflow(Dataflow, Schedule), } impl WorkerTask { pub fn execute(&mut self) -> Result<TaskState, JobExecError> { match self { WorkerTask::Empty => Ok(TaskState::Finished), WorkerTask::Dataflow(df, sch) => { sch.step(df)?; if df.check_finish() { sch.close()?; Ok(TaskState::Finished) } else if df.is_idle()? { Ok(TaskState::NotReady) } else { Ok(TaskState::Ready) } } } } pub fn check_ready(&mut self) -> Result<TaskState, JobExecError> { match self { WorkerTask::Empty => Ok(TaskState::Finished), WorkerTask::Dataflow(df, sch) => { sch.try_notify()?; if df.is_idle()? { Ok(TaskState::NotReady) } else { Ok(TaskState::Ready) } } } } } struct WorkerContext<'a> { resource: Option<&'a mut ResourceMap>, keyed_resources: Option<&'a mut KeyedResources>, } impl<'a> WorkerContext<'a> { fn new(res: &'a mut ResourceMap, key_res: &'a mut KeyedResources) -> Self { let resource = if !res.is_empty() { let reset = std::mem::replace(res, Default::default()); let pre = crate::resource::replace_resource(reset); assert!(pre.is_empty()); Some(res) } else { None }; let keyed_resources = if !key_res.is_empty() { let reset = std::mem::replace(key_res, Default::default()); let pre = crate::resource::replace_keyed_resources(reset); assert!(pre.is_empty()); Some(key_res) } else { None }; WorkerContext { resource, keyed_resources } } } impl<'a> Drop for WorkerContext<'a> { fn drop(&mut self) { if let Some(res) = self.resource.take() { let my_res = crate::resource::replace_resource(Default::default()); let _r = std::mem::replace(res, my_res); } if let Some(res) = self.keyed_resources.take() { let my_res = crate::resource::replace_keyed_resources(Default::default()); let _r = std::mem::replace(res, my_res); } } } impl<D: Data, T: Debug + Send + 'static> Task for Worker<D, T> { fn execute(&mut self) -> TaskState { let _g = crate::worker_id::guard(self.id); if self.check_cancel() { return TaskState::Finished; } let _ctx = WorkerContext::new(&mut self.resources, &mut self.keyed_resources); match self.task.execute() { Ok(state) => { if TaskState::Finished == state { info_worker!( "job({}) '{}' finished, used {:?};", self.id.job_id, self.conf.job_name, self.start.elapsed() ) } state } Err(e) => { self.sink.on_error(e); TaskState::Finished } } } fn check_ready(&mut self) -> TaskState { let _g = crate::worker_id::guard(self.id); if self.check_cancel() { return TaskState::Finished; } match self.task.check_ready() { Ok(state) => { if TaskState::Finished == state { info_worker!( "job({}) '{}' finished, used {:?};", self.id.job_id, self.conf.job_name, self.start.elapsed() ); } state } Err(e) => { self.sink.on_error(e); TaskState::Finished } } } } impl<D: Data, T: Debug + Send + 'static> Drop for Worker<D, T> { fn drop(&mut self) { self.release(); } }
32.117424
105
0.541573
760b371ec7aed2bb170c157a9b0e4cd4e3aaa82d
801
use std::rc::Rc; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use web_sys::*; use web_sys::WebGl2RenderingContext as GL; pub fn create_canvas(parent: HtmlElement, width: u32, height: u32) -> Result<HtmlCanvasElement, JsValue> { let window = window().unwrap(); let document = window.document().unwrap(); let canvas: HtmlCanvasElement = document.create_element("canvas")?.dyn_into()?; canvas.set_width(width); canvas.set_height(height); parent.append_child(&canvas)?; Ok(canvas) } pub fn create_webgl_context(canvas: &HtmlCanvasElement) -> Result<WebGl2RenderingContext, JsValue> { let gl: WebGl2RenderingContext = canvas.get_context("webgl2")?.unwrap().dyn_into()?; gl.clear_color(0.0, 0.0, 0.0, 1.0); gl.enable(GL::DEPTH_TEST); Ok(gl) }
26.7
106
0.700375
dd0f1dcb4c5e288c9c45e60e8aa085c4c519eb04
613
//! The Undead Graphics Library. //! //! `ugly` is a work-in-progress pile of abstractions over, primarily, creating proportional pixel //! font based user interfaces. It doesn't do much yet, but we're working on that. #![warn(clippy::all, clippy::pedantic)] pub mod backends; pub mod colour; pub mod error; pub mod font; pub mod metrics; pub mod render; pub mod resource; pub mod text; // Generally, we re-export anything where the name would stutter; these tend to be the most // important type in the respective module anyway. pub use error::{Error, Result}; pub use font::Font; pub use render::Renderer;
26.652174
98
0.735726
01cf6b61efef644a891de77343614c1316e09fa1
3,093
use super::GetChannelMessagesConfigured; use crate::request::prelude::*; use std::{ error::Error, fmt::{Display, Formatter, Result as FmtResult}, }; use twilight_model::{ channel::Message, id::{ChannelId, MessageId}, }; #[derive(Clone, Debug)] pub enum GetChannelMessagesError { /// The maximum number of messages to retrieve is either 0 or more than 100. LimitInvalid, } impl Display for GetChannelMessagesError { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match self { Self::LimitInvalid => f.write_str("the limit is invalid"), } } } impl Error for GetChannelMessagesError {} #[derive(Default)] struct GetChannelMessagesFields { limit: Option<u64>, } pub struct GetChannelMessages<'a> { channel_id: ChannelId, fields: GetChannelMessagesFields, fut: Option<Pending<'a, Vec<Message>>>, http: &'a Client, } impl<'a> GetChannelMessages<'a> { pub(crate) fn new(http: &'a Client, channel_id: ChannelId) -> Self { Self { channel_id, fields: GetChannelMessagesFields::default(), fut: None, http, } } pub fn after(self, message_id: MessageId) -> GetChannelMessagesConfigured<'a> { GetChannelMessagesConfigured::new( self.http, self.channel_id, Some(message_id), None, None, self.fields.limit, ) } pub fn around(self, message_id: MessageId) -> GetChannelMessagesConfigured<'a> { GetChannelMessagesConfigured::new( self.http, self.channel_id, None, Some(message_id), None, self.fields.limit, ) } pub fn before(self, message_id: MessageId) -> GetChannelMessagesConfigured<'a> { GetChannelMessagesConfigured::new( self.http, self.channel_id, None, None, Some(message_id), self.fields.limit, ) } /// Set the maximum number of messages to retrieve. /// /// The minimum is 1 and the maximum is 100. /// /// # Errors /// /// Returns [`GetChannelMessages::LimitInvalid`] if the /// amount is greater than 21600. /// /// [`GetChannelMessages::LimitInvalid`]: enum.GetChannelMessages.html#variant.LimitInvalid pub fn limit(mut self, limit: u64) -> Result<Self, GetChannelMessagesError> { if !validate::get_channel_messages_limit(limit) { return Err(GetChannelMessagesError::LimitInvalid); } self.fields.limit.replace(limit); Ok(self) } fn start(&mut self) -> Result<()> { self.fut.replace(Box::pin(self.http.request(Request::from( Route::GetMessages { after: None, around: None, before: None, channel_id: self.channel_id.0, limit: self.fields.limit, }, )))); Ok(()) } } poll_req!(GetChannelMessages<'_>, Vec<Message>);
25.991597
95
0.576786
e5c8fee4cd59dbfeda06c4ebff29efaa0b212407
21,444
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::input_device::{self, InputDeviceBinding, InputEvent}, crate::utils::{Position, Size}, anyhow::{format_err, Error}, async_trait::async_trait, fidl_fuchsia_input_report as fidl, fidl_fuchsia_input_report::{InputDeviceProxy, InputReport}, fidl_fuchsia_ui_input as fidl_ui_input, fuchsia_syslog::fx_log_err, futures::channel::mpsc::Sender, maplit::hashmap, std::collections::HashMap, std::iter::FromIterator, }; /// A [`TouchEvent`] represents a set of contacts and the phase those contacts are in. /// /// For example, when a user touches a touch screen with two fingers, there will be two /// [`TouchContact`]s. When a user removes one finger, there will still be two contacts /// but one will be reported as removed. /// /// The expected sequence for any given contact is: /// 1. [`fidl_fuchsia_ui_input::PointerEventPhase::Add`] /// 2. [`fidl_fuchsia_ui_input::PointerEventPhase::Down`] /// 3. 0 or more [`fidl_fuchsia_ui_input::PointerEventPhase::Move`] /// 4. [`fidl_fuchsia_ui_input::PointerEventPhase::Up`] /// 5. [`fidl_fuchsia_ui_input::PointerEventPhase::Remove`] /// /// Additionally, a [`fidl_fuchsia_ui_input::PointerEventPhase::Cancel`] may be sent at any time /// signalling that the event is no longer directed towards the receiver. #[derive(Clone, Debug, PartialEq)] pub struct TouchEvent { /// The contacts associated with the touch event. For example, a two-finger touch would result /// in one touch event with two [`TouchContact`]s. /// /// Contacts are grouped based on their current phase (e.g., down, move). pub contacts: HashMap<fidl_ui_input::PointerEventPhase, Vec<TouchContact>>, } /// A [`TouchContact`] represents a single contact (e.g., one touch of a multi-touch gesture) related /// to a touch event. #[derive(Clone, Copy, Debug, PartialEq)] pub struct TouchContact { /// The identifier of the contact. Unique per touch device. pub id: u32, /// The position of the touch event, in the units of the associated /// [`ContactDeviceDescriptor`]'s `range`. pub position: Position, /// The pressure associated with the contact, in the units of the associated /// [`ContactDeviceDescriptor`]'s `pressure_range`. pub pressure: Option<i64>, /// The size of the touch event, in the units of the associated /// [`ContactDeviceDescriptor`]'s `range`. pub contact_size: Option<Size>, } impl TouchContact { pub fn new( id: u32, position: Position, pressure: Option<i64>, contact_size: Option<Size>, ) -> Self { Self { id, position, pressure, contact_size } } pub fn position(&self) -> Position { self.position } pub fn contact_size(&self) -> Option<Size> { self.contact_size } } impl Eq for TouchContact {} impl From<&fidl_fuchsia_input_report::ContactInputReport> for TouchContact { fn from(fidl_contact: &fidl_fuchsia_input_report::ContactInputReport) -> TouchContact { let contact_size = if fidl_contact.contact_width.is_some() && fidl_contact.contact_height.is_some() { Some(Size { width: fidl_contact.contact_width.unwrap() as f32, height: fidl_contact.contact_height.unwrap() as f32, }) } else { None }; TouchContact { id: fidl_contact.contact_id.unwrap_or_default(), position: Position { x: fidl_contact.position_x.unwrap_or_default() as f32, y: fidl_contact.position_y.unwrap_or_default() as f32, }, pressure: fidl_contact.pressure, contact_size, } } } #[derive(Clone, Debug, PartialEq)] pub struct TouchDeviceDescriptor { /// The id of the connected touch input device. pub device_id: u32, /// The descriptors for the possible contacts associated with the device. pub contacts: Vec<ContactDeviceDescriptor>, } /// A [`ContactDeviceDescriptor`] describes the possible values touch contact properties can take on. /// /// This descriptor can be used, for example, to determine where on a screen a touch made contact. /// /// # Example /// /// ``` /// // Determine the scaling factor between the display and the touch device's x range. /// let scaling_factor = /// display_width / (contact_descriptor._x_range.end - contact_descriptor._x_range.start); /// // Use the scaling factor to scale the contact report's x position. /// let hit_location = scaling_factor * contact_report.position_x; /// ``` #[derive(Clone, Debug, PartialEq)] pub struct ContactDeviceDescriptor { /// The range of possible x values for this touch contact. pub x_range: fidl::Range, /// The range of possible y values for this touch contact. pub y_range: fidl::Range, /// The range of possible pressure values for this touch contact. pub pressure_range: Option<fidl::Range>, /// The range of possible widths for this touch contact. pub width_range: Option<fidl::Range>, /// The range of possible heights for this touch contact. pub height_range: Option<fidl::Range>, } /// A [`TouchBinding`] represents a connection to a touch input device. /// /// The [`TouchBinding`] parses and exposes touch descriptor properties (e.g., the range of /// possible x values for touch contacts) for the device it is associated with. /// It also parses [`InputReport`]s from the device, and sends them to the device binding owner over /// `event_sender`. pub struct TouchBinding { /// The channel to stream InputEvents to. event_sender: Sender<InputEvent>, /// Holds information about this device. device_descriptor: TouchDeviceDescriptor, } #[async_trait] impl input_device::InputDeviceBinding for TouchBinding { fn input_event_sender(&self) -> Sender<InputEvent> { self.event_sender.clone() } fn get_device_descriptor(&self) -> input_device::InputDeviceDescriptor { input_device::InputDeviceDescriptor::Touch(self.device_descriptor.clone()) } } impl TouchBinding { /// Creates a new [`InputDeviceBinding`] from the `device_proxy`. /// /// The binding will start listening for input reports immediately and send new InputEvents /// to the device binding owner over `input_event_sender`. /// /// # Parameters /// - `device_proxy`: The proxy to bind the new [`InputDeviceBinding`] to. /// - `input_event_sender`: The channel to send new InputEvents to. /// /// # Errors /// If there was an error binding to the proxy. pub async fn new( device_proxy: InputDeviceProxy, input_event_sender: Sender<input_device::InputEvent>, ) -> Result<Self, Error> { let device_binding = Self::bind_device(&device_proxy, input_event_sender).await?; input_device::initialize_report_stream( device_proxy, device_binding.get_device_descriptor(), device_binding.input_event_sender(), Self::process_reports, ); Ok(device_binding) } /// Binds the provided input device to a new instance of `Self`. /// /// # Parameters /// - `device`: The device to use to initalize the binding. /// - `input_event_sender`: The channel to send new InputEvents to. /// /// # Errors /// If the device descriptor could not be retrieved, or the descriptor could not be parsed /// correctly. async fn bind_device( device: &InputDeviceProxy, input_event_sender: Sender<input_device::InputEvent>, ) -> Result<Self, Error> { let device_descriptor: fidl_fuchsia_input_report::DeviceDescriptor = device.get_descriptor().await?; match device_descriptor.touch { Some(fidl_fuchsia_input_report::TouchDescriptor { input: Some(fidl_fuchsia_input_report::TouchInputDescriptor { contacts: Some(contact_descriptors), max_contacts: _, touch_type: _, buttons: _, }), }) => Ok(TouchBinding { event_sender: input_event_sender, device_descriptor: TouchDeviceDescriptor { device_id: 0, contacts: contact_descriptors .iter() .map(TouchBinding::parse_contact_descriptor) .filter_map(Result::ok) .collect(), }, }), descriptor => Err(format_err!("Touch Descriptor failed to parse: \n {:?}", descriptor)), } } /// Parses an [`InputReport`] into one or more [`InputEvent`]s. /// /// The [`InputEvent`]s are sent to the device binding owner via [`input_event_sender`]. /// /// # Parameters /// - `report`: The incoming [`InputReport`]. /// - `previous_report`: The previous [`InputReport`] seen for the same device. This can be /// used to determine, for example, which keys are no longer present in /// a keyboard report to generate key released events. If `None`, no /// previous report was found. /// - `device_descriptor`: The descriptor for the input device generating the input reports. /// - `input_event_sender`: The sender for the device binding's input event stream. /// /// # Returns /// An [`InputReport`] which will be passed to the next call to [`process_reports`], as /// [`previous_report`]. If `None`, the next call's [`previous_report`] will be `None`. fn process_reports( report: InputReport, previous_report: Option<InputReport>, device_descriptor: &input_device::InputDeviceDescriptor, input_event_sender: &mut Sender<InputEvent>, ) -> Option<InputReport> { // Input devices can have multiple types so ensure `report` is a TouchInputReport. let touch_report: &fidl_fuchsia_input_report::TouchInputReport = match &report.touch { Some(touch) => touch, None => { return previous_report; } }; let previous_contacts: HashMap<u32, TouchContact> = previous_report .as_ref() .and_then(|unwrapped_report| unwrapped_report.touch.as_ref()) .map(touch_contacts_from_touch_report) .unwrap_or_default(); let current_contacts: HashMap<u32, TouchContact> = touch_contacts_from_touch_report(touch_report); // Contacts which exist only in current. let added_contacts: Vec<TouchContact> = Vec::from_iter( current_contacts .values() .cloned() .filter(|contact| !previous_contacts.contains_key(&contact.id)), ); // Contacts which exist in both previous and current. let moved_contacts: Vec<TouchContact> = Vec::from_iter( current_contacts .values() .cloned() .filter(|contact| previous_contacts.contains_key(&contact.id)), ); // Contacts which exist only in previous. let removed_contacts: Vec<TouchContact> = Vec::from_iter( previous_contacts .values() .cloned() .filter(|contact| !current_contacts.contains_key(&contact.id)), ); let event_time: input_device::EventTime = input_device::event_time_or_now(report.event_time); send_event( hashmap! { fidl_ui_input::PointerEventPhase::Add => added_contacts.clone(), fidl_ui_input::PointerEventPhase::Down => added_contacts, fidl_ui_input::PointerEventPhase::Move => moved_contacts, fidl_ui_input::PointerEventPhase::Up => removed_contacts.clone(), fidl_ui_input::PointerEventPhase::Remove => removed_contacts, }, device_descriptor, event_time, input_event_sender, ); Some(report) } /// Parses a FIDL contact descriptor into a [`ContactDeviceDescriptor`] /// /// # Parameters /// - `contact_device_descriptor`: The contact descriptor to parse. /// /// # Errors /// If the contact description fails to parse because required fields aren't present. fn parse_contact_descriptor( contact_device_descriptor: &fidl::ContactInputDescriptor, ) -> Result<ContactDeviceDescriptor, Error> { match contact_device_descriptor { fidl::ContactInputDescriptor { position_x: Some(x_axis), position_y: Some(y_axis), pressure: pressure_axis, contact_width: width_axis, contact_height: height_axis, } => Ok(ContactDeviceDescriptor { x_range: x_axis.range, y_range: y_axis.range, pressure_range: pressure_axis.map(|axis| axis.range), width_range: width_axis.map(|axis| axis.range), height_range: height_axis.map(|axis| axis.range), }), descriptor => { Err(format_err!("Touch Contact Descriptor failed to parse: \n {:?}", descriptor)) } } } } fn touch_contacts_from_touch_report( touch_report: &fidl_fuchsia_input_report::TouchInputReport, ) -> HashMap<u32, TouchContact> { // First unwrap all the optionals in the input report to get to the contacts. let contacts: Vec<TouchContact> = touch_report .contacts .as_ref() .and_then(|unwrapped_contacts| { // Once the contacts are found, convert them into `TouchContact`s. Some(unwrapped_contacts.iter().map(TouchContact::from).collect()) }) .unwrap_or_default(); contacts.into_iter().map(|contact| (contact.id, contact)).collect() } /// Sends a TouchEvent over `input_event_sender`. /// /// # Parameters /// - `contacts`: The contact points relevant to the new TouchEvent. /// - `device_descriptor`: The descriptor for the input device generating the input reports. /// - `event_time`: The time in nanoseconds when the event was first recorded. /// - `input_event_sender`: The sender for the device binding's input event stream. fn send_event( contacts: HashMap<fidl_ui_input::PointerEventPhase, Vec<TouchContact>>, device_descriptor: &input_device::InputDeviceDescriptor, event_time: input_device::EventTime, input_event_sender: &mut Sender<input_device::InputEvent>, ) { match input_event_sender.try_send(input_device::InputEvent { device_event: input_device::InputDeviceEvent::Touch(TouchEvent { contacts }), device_descriptor: device_descriptor.clone(), event_time, }) { Err(e) => fx_log_err!("Failed to send TouchEvent with error: {:?}", e), _ => {} } } #[cfg(test)] mod tests { use { super::*, crate::testing_utilities::{ self, create_touch_contact, create_touch_event, create_touch_input_report, }, crate::utils::Position, fuchsia_async as fasync, futures::StreamExt, }; // Tests that a input report with a new contact generates an event with an add and a down. #[fasync::run_singlethreaded(test)] async fn add_and_down() { const TOUCH_ID: u32 = 2; let descriptor = input_device::InputDeviceDescriptor::Touch(TouchDeviceDescriptor { device_id: 1, contacts: vec![], }); let (event_time_i64, event_time_u64) = testing_utilities::event_times(); let contact = fidl_fuchsia_input_report::ContactInputReport { contact_id: Some(TOUCH_ID), position_x: Some(0), position_y: Some(0), pressure: None, contact_width: None, contact_height: None, }; let reports = vec![create_touch_input_report(vec![contact], event_time_i64)]; let expected_events = vec![create_touch_event( hashmap! { fidl_ui_input::PointerEventPhase::Add => vec![create_touch_contact(TOUCH_ID, Position { x: 0.0, y: 0.0 })], fidl_ui_input::PointerEventPhase::Down => vec![create_touch_contact(TOUCH_ID, Position { x: 0.0, y: 0.0 })], }, event_time_u64, &descriptor, )]; assert_input_report_sequence_generates_events!( input_reports: reports, expected_events: expected_events, device_descriptor: descriptor, device_type: TouchBinding, ); } // Tests that up and remove events are sent when a touch is released. #[fasync::run_singlethreaded(test)] async fn up_and_remove() { const TOUCH_ID: u32 = 2; let descriptor = input_device::InputDeviceDescriptor::Touch(TouchDeviceDescriptor { device_id: 1, contacts: vec![], }); let (event_time_i64, event_time_u64) = testing_utilities::event_times(); let contact = fidl_fuchsia_input_report::ContactInputReport { contact_id: Some(TOUCH_ID), position_x: Some(0), position_y: Some(0), pressure: None, contact_width: None, contact_height: None, }; let reports = vec![ create_touch_input_report(vec![contact], event_time_i64), create_touch_input_report(vec![], event_time_i64), ]; let expected_events = vec![ create_touch_event( hashmap! { fidl_ui_input::PointerEventPhase::Add => vec![create_touch_contact(TOUCH_ID, Position { x: 0.0, y: 0.0 })], fidl_ui_input::PointerEventPhase::Down => vec![create_touch_contact(TOUCH_ID, Position { x: 0.0, y: 0.0 })], }, event_time_u64, &descriptor, ), create_touch_event( hashmap! { fidl_ui_input::PointerEventPhase::Up => vec![create_touch_contact(TOUCH_ID, Position { x: 0.0, y: 0.0 })], fidl_ui_input::PointerEventPhase::Remove => vec![create_touch_contact(TOUCH_ID, Position { x: 0.0, y: 0.0 })], }, event_time_u64, &descriptor, ), ]; assert_input_report_sequence_generates_events!( input_reports: reports, expected_events: expected_events, device_descriptor: descriptor, device_type: TouchBinding, ); } // Tests that a move generates the correct event. #[fasync::run_singlethreaded(test)] async fn add_down_move() { const TOUCH_ID: u32 = 2; let first = Position { x: 10.0, y: 30.0 }; let second = Position { x: first.x * 2.0, y: first.y * 2.0 }; let descriptor = input_device::InputDeviceDescriptor::Touch(TouchDeviceDescriptor { device_id: 1, contacts: vec![], }); let (event_time_i64, event_time_u64) = testing_utilities::event_times(); let first_contact = fidl_fuchsia_input_report::ContactInputReport { contact_id: Some(TOUCH_ID), position_x: Some(first.x as i64), position_y: Some(first.y as i64), pressure: None, contact_width: None, contact_height: None, }; let second_contact = fidl_fuchsia_input_report::ContactInputReport { contact_id: Some(TOUCH_ID), position_x: Some(first.x as i64 * 2), position_y: Some(first.y as i64 * 2), pressure: None, contact_width: None, contact_height: None, }; let reports = vec![ create_touch_input_report(vec![first_contact], event_time_i64), create_touch_input_report(vec![second_contact], event_time_i64), ]; let expected_events = vec![ create_touch_event( hashmap! { fidl_ui_input::PointerEventPhase::Add => vec![create_touch_contact(TOUCH_ID, first)], fidl_ui_input::PointerEventPhase::Down => vec![create_touch_contact(TOUCH_ID, first)], }, event_time_u64, &descriptor, ), create_touch_event( hashmap! { fidl_ui_input::PointerEventPhase::Move => vec![create_touch_contact(TOUCH_ID, second)], }, event_time_u64, &descriptor, ), ]; assert_input_report_sequence_generates_events!( input_reports: reports, expected_events: expected_events, device_descriptor: descriptor, device_type: TouchBinding, ); } }
38.224599
101
0.612992
ffd630478a7059a8bfe90cd52daf3e3fc71bbd2e
2,823
//! An iterator to enumerate every combinaison of a given array. //! Based on the [Heap's algorithm](https://en.wikipedia.org/wiki/Heap%27s_algorithm). #![deny(missing_docs)] /// The iterator to enumerate every combinaison of an array's item /// /// One can create such an iterator in two ways, consuming the original array in the process: /// ``` /// # use kombini::Kombini; /// // From a fixed size array /// let komb = Kombini::from([1, 2, 3]); /// /// // From a Vec /// let komb = Kombini::<i32, 3>::try_from(vec![1, 2, 3]); /// ``` /// /// ### Notes /// Creating a iterator from a [Vec] requires the array to /// have the exact same size of the inner iterator. /// On error, the original [Vec] will be returned. /// ``` /// # use kombini::Kombini; /// let komb = Kombini::<i32, 3>::try_from(vec![1, 2, 3]); /// assert!(komb.is_ok()); /// /// let komb = Kombini::<i32, 5>::try_from(vec![1, 2, 3]); /// assert!(komb.is_err()); /// ``` /// /// [Vec]: https://doc.rust-lang.org/std/vec/struct.Vec.html pub struct Kombini<T: Clone, const N: usize> { values: [T; N], counter: [usize; N], n_iter: usize, } impl<T: Clone, const N: usize> From<[T; N]> for Kombini<T, N> { fn from(values: [T; N]) -> Self { Self { values, counter: [0; N], n_iter: 0, } } } impl<T: Clone, const N: usize> TryFrom<Vec<T>> for Kombini<T, N> { type Error = Vec<T>; fn try_from(values: Vec<T>) -> Result<Self, Self::Error> { let values = values.try_into()?; Ok(Self { values, counter: [0; N], n_iter: 0, }) } } impl<T: Clone, const N: usize> Iterator for Kombini<T, N> { type Item = [T; N]; fn next(&mut self) -> Option<Self::Item> { if self.n_iter > N { return None } while self.n_iter < N && self.counter[self.n_iter] >= self.n_iter { self.counter[self.n_iter] = 0; self.n_iter += 1; } if self.n_iter == N { self.n_iter += 1; return Some(self.values.clone()); } let ret = Some(self.values.clone()); let swap_index = if self.n_iter % 2 == 0 { 0 } else { self.counter[self.n_iter] }; self.values.swap(swap_index, self.n_iter); self.counter[self.n_iter] += 1; self.n_iter = 0; ret } } impl<T: Clone, const N: usize> ExactSizeIterator for Kombini<T, N> { fn len(&self) -> usize { const_factorial::<N>() } } /* ---------- */ /// Compute the factoiral of given N. Use to return the len /// of the iterator. const fn const_factorial<const N: usize>() -> usize { let mut i = 1; let mut ret = 1; while i <= N { ret *= i; i += 1; } ret }
24.763158
93
0.533829
38fdb6a0cfdc20dac35584fc9544082dbc235f49
4,356
use phf::phf_map; pub static CHARSET: phf::Map<&'static str, [u8; 11]> = phf_map! { "A" => [0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00], "B" => [0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0xfc, 0x00], "C" => [0x00, 0x7c, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0xc6, 0x7c, 0x00], "D" => [0x00, 0xfc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0xfc, 0x00], "E" => [0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x62, 0x66, 0xfe, 0x00], "F" => [0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x60, 0xf0, 0x00], "G" => [0x00, 0x7c, 0xc6, 0xc6, 0xc0, 0xc0, 0xce, 0xc6, 0xc6, 0x7e, 0x00], "H" => [0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00], "I" => [0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00], "J" => [0x00, 0x1e, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0x78, 0x00], "K" => [0x00, 0xe6, 0x66, 0x6c, 0x6c, 0x78, 0x6c, 0x6c, 0x66, 0xe6, 0x00], "L" => [0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60, 0x62, 0x66, 0xfe, 0x00], "M" => [0x00, 0x82, 0xc6, 0xee, 0xfe, 0xd6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00], "N" => [0x00, 0x86, 0xc6, 0xe6, 0xf6, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0x00], "O" => [0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00], "P" => [0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0xf0, 0x00], "Q" => [0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c, 0x06], "R" => [0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c, 0x66, 0x66, 0xe6, 0x00], "S" => [0x00, 0x7c, 0xc6, 0xc6, 0x60, 0x38, 0x0c, 0xc6, 0xc6, 0x7c, 0x00], "T" => [0x00, 0x7e, 0x7e, 0x5a, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00], "U" => [0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00], "V" => [0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x6c, 0x38, 0x10, 0x00], "W" => [0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xfe, 0xee, 0xc6, 0x82, 0x00], "X" => [0x00, 0xc6, 0xc6, 0x6c, 0x7c, 0x38, 0x7c, 0x6c, 0xc6, 0xc6, 0x00], "Y" => [0x00, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x3c, 0x00], "Z" => [0x00, 0xfe, 0xc6, 0x86, 0x0c, 0x18, 0x30, 0x62, 0xc6, 0xfe, 0x00], "a" => [0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0x76, 0x00], "b" => [0x00, 0xe0, 0x60, 0x60, 0x7c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00], "c" => [0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc6, 0x7c, 0x00], "d" => [0x00, 0x1c, 0x0c, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00], "e" => [0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc6, 0x7c, 0x00], "f" => [0x00, 0x1c, 0x36, 0x30, 0x78, 0x30, 0x30, 0x30, 0x30, 0x78, 0x00], "g" => [0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78], "h" => [0x00, 0xe0, 0x60, 0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0xe6, 0x00], "i" => [0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00], "j" => [0x0c, 0x0c, 0x00, 0x1c, 0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0x78], "k" => [0x00, 0xe0, 0x60, 0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0xe6, 0x00], "l" => [0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00], "m" => [0x00, 0x00, 0x00, 0x00, 0xec, 0xfe, 0xd6, 0xd6, 0xd6, 0xc6, 0x00], "n" => [0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00], "o" => [0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00], "p" => [0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0], "q" => [0x00, 0x00, 0x00, 0x00, 0x7c, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e], "r" => [0x00, 0x00, 0x00, 0x00, 0xde, 0x76, 0x60, 0x60, 0x60, 0xf0, 0x00], "s" => [0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x70, 0x1c, 0xc6, 0x7c, 0x00], "t" => [0x00, 0x10, 0x30, 0x30, 0xfc, 0x30, 0x30, 0x30, 0x34, 0x18, 0x00], "u" => [0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76, 0x00], "v" => [0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0x6c, 0x38, 0x10, 0x00], "w" => [0x00, 0x00, 0x00, 0x00, 0xc6, 0xd6, 0xd6, 0xd6, 0xfe, 0x6c, 0x00], "x" => [0x00, 0x00, 0x00, 0x00, 0xc6, 0x6c, 0x38, 0x38, 0x6c, 0xc6, 0x00], "y" => [0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8], "z" => [0x00, 0x00, 0x00, 0x00, 0xfe, 0x8c, 0x18, 0x30, 0x62, 0xfe, 0x00], " " => [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00], "!" => [0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00], };
71.409836
78
0.570018
567633107c79c1a8acd3eea697ebd65bd09b3145
34,428
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! General-purpose-timers //! //! Used by: stm32f0x0, stm32f0x1, stm32f0x2, stm32f0x8 use crate::{RWRegister, WORegister}; #[cfg(not(feature = "nosync"))] use core::marker::PhantomData; /// control register 1 pub mod CR1 { /// Clock division pub mod CKD { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (2 bits: 0b11 << 8) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b00: t_DTS = t_CK_INT pub const Div1: u32 = 0b00; /// 0b01: t_DTS = 2 × t_CK_INT pub const Div2: u32 = 0b01; /// 0b10: t_DTS = 4 × t_CK_INT pub const Div4: u32 = 0b10; } } /// Auto-reload preload enable pub mod ARPE { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (1 bit: 1 << 7) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: TIMx_APRR register is not buffered pub const Disabled: u32 = 0b0; /// 0b1: TIMx_APRR register is buffered pub const Enabled: u32 = 0b1; } } /// One-pulse mode pub mod OPM { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Update request source pub mod URS { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Any of counter overflow/underflow, setting UG, or update through slave mode, generates an update interrupt or DMA request pub const AnyEvent: u32 = 0b0; /// 0b1: Only counter overflow/underflow generates an update interrupt or DMA request pub const CounterOnly: u32 = 0b1; } } /// Update disable pub mod UDIS { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Update event enabled pub const Enabled: u32 = 0b0; /// 0b1: Update event disabled pub const Disabled: u32 = 0b1; } } /// Counter enable pub mod CEN { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Counter disabled pub const Disabled: u32 = 0b0; /// 0b1: Counter enabled pub const Enabled: u32 = 0b1; } } } /// control register 2 pub mod CR2 { /// Output Idle state 2 pub mod OIS2 { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Output Idle state 1 pub mod OIS1N { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Output Idle state 1 pub mod OIS1 { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (1 bit: 1 << 8) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Master mode selection pub mod MMS { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (3 bits: 0b111 << 4) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/compare DMA selection pub mod CCDS { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/compare control update selection pub mod CCUS { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/compare preloaded control pub mod CCPC { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// slave mode control register pub mod SMCR { /// Master/Slave mode pub mod MSM { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (1 bit: 1 << 7) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Trigger selection pub mod TS { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (3 bits: 0b111 << 4) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Slave mode selection pub mod SMS { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (3 bits: 0b111 << 0) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// DMA/Interrupt enable register pub mod DIER { /// Trigger DMA request enable pub mod TDE { /// Offset (14 bits) pub const offset: u32 = 14; /// Mask (1 bit: 1 << 14) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 2 DMA request enable pub mod CC2DE { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 DMA request enable pub mod CC1DE { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Update DMA request enable pub mod UDE { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (1 bit: 1 << 8) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Break interrupt enable pub mod BIE { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (1 bit: 1 << 7) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Trigger interrupt enable pub mod TIE { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// COM interrupt enable pub mod COMIE { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 2 interrupt enable pub mod CC2IE { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 interrupt enable pub mod CC1IE { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Update interrupt enable pub mod UIE { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: Update interrupt disabled pub const Disabled: u32 = 0b0; /// 0b1: Update interrupt enabled pub const Enabled: u32 = 0b1; } } } /// status register pub mod SR { /// Capture/compare 2 overcapture flag pub mod CC2OF { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 overcapture flag pub mod CC1OF { /// Offset (9 bits) pub const offset: u32 = 9; /// Mask (1 bit: 1 << 9) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Break interrupt flag pub mod BIF { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (1 bit: 1 << 7) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Trigger interrupt flag pub mod TIF { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// COM interrupt flag pub mod COMIF { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 2 interrupt flag pub mod CC2IF { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/compare 1 interrupt flag pub mod CC1IF { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Update interrupt flag pub mod UIF { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b0: No update occurred pub const Clear: u32 = 0b0; /// 0b1: Update interrupt pending. pub const UpdatePending: u32 = 0b1; } } } /// event generation register pub mod EGR { /// Break generation pub mod BG { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (1 bit: 1 << 7) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Trigger generation pub mod TG { /// Offset (6 bits) pub const offset: u32 = 6; /// Mask (1 bit: 1 << 6) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare control update generation pub mod COMG { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/compare 2 generation pub mod CC2G { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/compare 1 generation pub mod CC1G { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Update generation pub mod UG { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b1: Re-initializes the timer counter and generates an update of the registers. pub const Update: u32 = 0b1; } } } /// CCMR1_Output and CCMR1_Input /// CCMR1_Output: capture/compare mode register (output mode) /// CCMR1_Input: capture/compare mode register 1 (input mode) pub mod CCMR1 { /// Output Compare 2 mode pub mod OC2M { /// Offset (12 bits) pub const offset: u32 = 12; /// Mask (3 bits: 0b111 << 12) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values pub mod RW { /// 0b000: The comparison between the output compare register TIMx_CCRy and the counter TIMx_CNT has no effect on the outputs pub const Frozen: u32 = 0b000; /// 0b001: Set channel to active level on match. OCyREF signal is forced high when the counter matches the capture/compare register pub const ActiveOnMatch: u32 = 0b001; /// 0b010: Set channel to inactive level on match. OCyREF signal is forced low when the counter matches the capture/compare register pub const InactiveOnMatch: u32 = 0b010; /// 0b011: OCyREF toggles when TIMx_CNT=TIMx_CCRy pub const Toggle: u32 = 0b011; /// 0b100: OCyREF is forced low pub const ForceInactive: u32 = 0b100; /// 0b101: OCyREF is forced high pub const ForceActive: u32 = 0b101; /// 0b110: In upcounting, channel is active as long as TIMx_CNT<TIMx_CCRy else inactive. In downcounting, channel is inactive as long as TIMx_CNT>TIMx_CCRy else active pub const PwmMode1: u32 = 0b110; /// 0b111: Inversely to PwmMode1 pub const PwmMode2: u32 = 0b111; } } /// Output Compare 2 preload enable pub mod OC2PE { /// Offset (11 bits) pub const offset: u32 = 11; /// Mask (1 bit: 1 << 11) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Output Compare 2 fast enable pub mod OC2FE { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 2 selection pub mod CC2S { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (2 bits: 0b11 << 8) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Output Compare 1 mode pub mod OC1M { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (3 bits: 0b111 << 4) pub const mask: u32 = 0b111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} pub use super::OC2M::RW; } /// Output Compare 1 preload enable pub mod OC1PE { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Output Compare 1 fast enable pub mod OC1FE { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 selection pub mod CC1S { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (2 bits: 0b11 << 0) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Input capture 2 filter pub mod IC2F { /// Offset (12 bits) pub const offset: u32 = 12; /// Mask (4 bits: 0b1111 << 12) pub const mask: u32 = 0b1111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Input capture 2 prescaler pub mod IC2PSC { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (2 bits: 0b11 << 10) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Input capture 1 filter pub mod IC1F { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (4 bits: 0b1111 << 4) pub const mask: u32 = 0b1111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Input capture 1 prescaler pub mod IC1PSC { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (2 bits: 0b11 << 2) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// capture/compare enable register pub mod CCER { /// Capture/Compare 2 output Polarity pub mod CC2NP { /// Offset (7 bits) pub const offset: u32 = 7; /// Mask (1 bit: 1 << 7) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 2 output Polarity pub mod CC2P { /// Offset (5 bits) pub const offset: u32 = 5; /// Mask (1 bit: 1 << 5) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 2 output enable pub mod CC2E { /// Offset (4 bits) pub const offset: u32 = 4; /// Mask (1 bit: 1 << 4) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 output Polarity pub mod CC1NP { /// Offset (3 bits) pub const offset: u32 = 3; /// Mask (1 bit: 1 << 3) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 complementary output enable pub mod CC1NE { /// Offset (2 bits) pub const offset: u32 = 2; /// Mask (1 bit: 1 << 2) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 output Polarity pub mod CC1P { /// Offset (1 bits) pub const offset: u32 = 1; /// Mask (1 bit: 1 << 1) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Capture/Compare 1 output enable pub mod CC1E { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (1 bit: 1 << 0) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// counter pub mod CNT { /// counter value pub mod CNT { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (16 bits: 0xffff << 0) pub const mask: u32 = 0xffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// prescaler pub mod PSC { /// Prescaler value pub mod PSC { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (16 bits: 0xffff << 0) pub const mask: u32 = 0xffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// auto-reload register pub mod ARR { /// Auto-reload value pub mod ARR { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (16 bits: 0xffff << 0) pub const mask: u32 = 0xffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// repetition counter register pub mod RCR { /// Repetition counter value pub mod REP { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u32 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// capture/compare register pub mod CCR1 { /// Capture/Compare value pub mod CCR { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (16 bits: 0xffff << 0) pub const mask: u32 = 0xffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// capture/compare register pub mod CCR2 { pub use super::CCR1::CCR; } /// break and dead-time register pub mod BDTR { /// Main output enable pub mod MOE { /// Offset (15 bits) pub const offset: u32 = 15; /// Mask (1 bit: 1 << 15) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Automatic output enable pub mod AOE { /// Offset (14 bits) pub const offset: u32 = 14; /// Mask (1 bit: 1 << 14) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Break polarity pub mod BKP { /// Offset (13 bits) pub const offset: u32 = 13; /// Mask (1 bit: 1 << 13) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Break enable pub mod BKE { /// Offset (12 bits) pub const offset: u32 = 12; /// Mask (1 bit: 1 << 12) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Off-state selection for Run mode pub mod OSSR { /// Offset (11 bits) pub const offset: u32 = 11; /// Mask (1 bit: 1 << 11) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Off-state selection for Idle mode pub mod OSSI { /// Offset (10 bits) pub const offset: u32 = 10; /// Mask (1 bit: 1 << 10) pub const mask: u32 = 1 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Lock configuration pub mod LOCK { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (2 bits: 0b11 << 8) pub const mask: u32 = 0b11 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// Dead-time generator setup pub mod DTG { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (8 bits: 0xff << 0) pub const mask: u32 = 0xff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// DMA control register pub mod DCR { /// DMA burst length pub mod DBL { /// Offset (8 bits) pub const offset: u32 = 8; /// Mask (5 bits: 0b11111 << 8) pub const mask: u32 = 0b11111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } /// DMA base address pub mod DBA { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (5 bits: 0b11111 << 0) pub const mask: u32 = 0b11111 << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } /// DMA address for full transfer pub mod DMAR { /// DMA register for burst accesses pub mod DMAB { /// Offset (0 bits) pub const offset: u32 = 0; /// Mask (16 bits: 0xffff << 0) pub const mask: u32 = 0xffff << offset; /// Read-only values (empty) pub mod R {} /// Write-only values (empty) pub mod W {} /// Read-write values (empty) pub mod RW {} } } #[repr(C)] pub struct RegisterBlock { /// control register 1 pub CR1: RWRegister<u32>, /// control register 2 pub CR2: RWRegister<u32>, /// slave mode control register pub SMCR: RWRegister<u32>, /// DMA/Interrupt enable register pub DIER: RWRegister<u32>, /// status register pub SR: RWRegister<u32>, /// event generation register pub EGR: WORegister<u32>, /// CCMR1_Output and CCMR1_Input /// CCMR1_Output: capture/compare mode register (output mode) /// CCMR1_Input: capture/compare mode register 1 (input mode) pub CCMR1: RWRegister<u32>, _reserved1: [u32; 1], /// capture/compare enable register pub CCER: RWRegister<u32>, /// counter pub CNT: RWRegister<u32>, /// prescaler pub PSC: RWRegister<u32>, /// auto-reload register pub ARR: RWRegister<u32>, /// repetition counter register pub RCR: RWRegister<u32>, /// capture/compare register pub CCR1: RWRegister<u32>, /// capture/compare register pub CCR2: RWRegister<u32>, _reserved2: [u32; 2], /// break and dead-time register pub BDTR: RWRegister<u32>, /// DMA control register pub DCR: RWRegister<u32>, /// DMA address for full transfer pub DMAR: RWRegister<u32>, } pub struct ResetValues { pub CR1: u32, pub CR2: u32, pub SMCR: u32, pub DIER: u32, pub SR: u32, pub EGR: u32, pub CCMR1: u32, pub CCER: u32, pub CNT: u32, pub PSC: u32, pub ARR: u32, pub RCR: u32, pub CCR1: u32, pub CCR2: u32, pub BDTR: u32, pub DCR: u32, pub DMAR: u32, } #[cfg(not(feature = "nosync"))] pub struct Instance { pub(crate) addr: u32, pub(crate) _marker: PhantomData<*const RegisterBlock>, } #[cfg(not(feature = "nosync"))] impl ::core::ops::Deref for Instance { type Target = RegisterBlock; #[inline(always)] fn deref(&self) -> &RegisterBlock { unsafe { &*(self.addr as *const _) } } } #[cfg(feature = "rtic")] unsafe impl Send for Instance {}
26.40184
179
0.492099
abb2e93757ed395777406a1ed94805c49b6ec5de
1,703
#![feature(box_syntax, plugin, plugin_registrar, rustc_private)] #![crate_type = "dylib"] #[macro_use] extern crate rustc; #[macro_use] extern crate rustc_session; extern crate rustc_driver; extern crate rustc_hir; extern crate rustc_span; extern crate syntax; use rustc_hir::intravisit; use rustc_hir as hir; use rustc_hir::Node; use rustc::lint::{LateContext, LintPass, LintArray, LateLintPass, LintContext}; use rustc_driver::plugin::Registry; use rustc_span::source_map; use syntax::print::pprust; #[plugin_registrar] pub fn plugin_registrar(reg: &mut Registry) { reg.lint_store.register_lints(&[&MISSING_WHITELISTED_ATTR]); reg.lint_store.register_late_pass(|| box MissingWhitelistedAttrPass); } declare_lint! { MISSING_WHITELISTED_ATTR, Deny, "Checks for missing `whitelisted_attr` attribute" } declare_lint_pass!(MissingWhitelistedAttrPass => [MISSING_WHITELISTED_ATTR]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingWhitelistedAttrPass { fn check_fn(&mut self, cx: &LateContext<'a, 'tcx>, _: intravisit::FnKind<'tcx>, _: &'tcx hir::FnDecl, _: &'tcx hir::Body, span: source_map::Span, id: hir::HirId) { let item = match cx.tcx.hir().get(id) { Node::Item(item) => item, _ => cx.tcx.hir().expect_item(cx.tcx.hir().get_parent_item(id)), }; let whitelisted = |attr| pprust::attribute_to_string(attr).contains("whitelisted_attr"); if !item.attrs.iter().any(whitelisted) { cx.span_lint(MISSING_WHITELISTED_ATTR, span, "Missing 'whitelisted_attr' attribute"); } } }
31.537037
96
0.658837
cc98ef9922ddf29a92d7dc23637f316161ef7d38
2,062
use super::{fmt, hasher, CodeBlock, Digest}; // SPLIT BLOCK // ================================================================================================ /// A code block used to describe conditional execution. /// /// When the VM executes a Split bock, either the true branch or the false branch of the block is /// executed. Specifically, if the top of the stack is `1`, the true branch is executed, and if /// the top of the stack is `0`, the false branch is executed. If the top of the stack is neither /// `0` nor `1`, the program fails. /// /// Hash of a Split block is computed by hashing a concatenation of the true and the false branch /// hashes. #[derive(Clone, Debug)] pub struct Split { branches: Box<[CodeBlock; 2]>, hash: Digest, } impl Split { // CONSTRUCTOR // -------------------------------------------------------------------------------------------- /// Returns a new [Split] block instantiated with the specified true and false branches. pub fn new(t_branch: CodeBlock, f_branch: CodeBlock) -> Self { let hash = hasher::merge(&[t_branch.hash(), f_branch.hash()]); Self { branches: Box::new([t_branch, f_branch]), hash, } } // PUBLIC ACCESSORS // -------------------------------------------------------------------------------------------- /// Returns a hash of this code block. pub fn hash(&self) -> Digest { self.hash } /// Returns a reference to the code block which is to be executed when the top of the stack /// is `1`. pub fn on_true(&self) -> &CodeBlock { &self.branches[0] } /// Returns a reference to the code block which is to be executed when the top of the stack /// is `0`. pub fn on_false(&self) -> &CodeBlock { &self.branches[1] } } impl fmt::Display for Split { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "if.true {} else {} end", self.branches[0], self.branches[1] ) } }
33.258065
99
0.519399
61cb7d2101b01f4d2309409891f5e590e3261f97
2,396
use std::io; use std::io::prelude::*; use std::collections::HashMap; fn main() { let stdin = io::stdin(); let mut wires: HashMap<String, u16> = HashMap::new(); let mut circuit_lines: Vec<(String, String, String, String)> = vec![]; for l in stdin.lock().lines() { let line = l.unwrap(); let v = line.split(" -> ").collect::<Vec<&str>>(); let action: Vec<&str> = v[0].split(' ').collect(); let wire = v[1]; if action.len() == 1 { match action[0].parse::<u16>() { Ok(v) => { wires.insert(wire.to_string(), v); continue; } Err(_) => { } } } circuit_lines.push((action[0].to_string(), if action.len() >= 2 { action[1].to_string() } else { "".to_string() }, if action.len() == 3 { action[2].to_string() } else { "".to_string() }, wire.to_string())); } while circuit_lines.len() != 0 { let mut i: i32 = 0; for line in circuit_lines.clone() { let wire = line.3; if wires.clone().get(&wire) != None { circuit_lines.remove(i as usize); i -= 1; continue; } if line.1 == "" { match wires.clone().get(&line.0) { Some(v) => { circuit_lines.remove(i as usize); i -= 1; wires.insert(wire, *v); } None => { } } } else if line.2 == "" { match wires.clone().get(&line.1) { Some(v) => { circuit_lines.remove(i as usize); i -= 1; wires.insert(wire, !*v); } None => { } } } else { let op = line.1.as_str(); let mut c = false; let x = match wires.clone().get(&line.0) { Some(x) => { *x } None => { match line.0.parse::<u16>() { Ok(x) => { x } Err(_) => { c = true; 0 } } } }; if !c { if op == "AND" || op == "OR" { match wires.clone().get(&line.2) { Some(y) => { let v = match op { "AND" => { x & *y } "OR" => { x | *y } _ => { panic!("Wait what?"); } }; circuit_lines.remove(i as usize); i -= 1; wires.insert(wire, v); } None => { } } } else { let y = line.2.parse::<u32>().unwrap(); let v = match op { "RSHIFT" => { x >> y } "LSHIFT" => { x << y } _ => { panic!("Wait what?"); } }; circuit_lines.remove(i as usize); i -= 1; wires.insert(wire, v); } } } i += 1; } } let c = wires.get("a").unwrap(); println!("{}", c); }
23.262136
74
0.469115
8f13fdd15d7f19e5d7440cea7ee64afad42d1165
12,975
// Copyright (c) The Dijets Core Contributors // SPDX-License-Identifier: Apache-2.0 use super::*; use crate::{pruner, DijetsDB}; use dijets_jellyfish_merkle::restore::JellyfishMerkleRestore; use dijets_temppath::TempPath; use dijets_types::{ account_address::{AccountAddress, HashAccountAddress}, account_state_blob::AccountStateBlob, }; use proptest::{collection::hash_map, prelude::*}; fn put_account_state_set( store: &StateStore, account_state_set: Vec<(AccountAddress, AccountStateBlob)>, version: Version, expected_new_nodes: usize, expected_stale_nodes: usize, expected_stale_leaves: usize, ) -> HashValue { let mut cs = ChangeSet::new(); let expected_new_leaves = account_state_set.len(); let root = store .put_account_state_sets( vec![account_state_set.into_iter().collect::<HashMap<_, _>>()], None, version, &mut cs, ) .unwrap()[0]; let bumps = cs.counter_bumps(version); assert_eq!(bumps.get(LedgerCounter::NewStateNodes), expected_new_nodes); assert_eq!( bumps.get(LedgerCounter::StaleStateNodes), expected_stale_nodes ); assert_eq!( bumps.get(LedgerCounter::NewStateLeaves), expected_new_leaves ); assert_eq!( bumps.get(LedgerCounter::StaleStateLeaves), expected_stale_leaves ); store.db.write_schemas(cs.batch).unwrap(); root } fn prune_stale_indices( store: &StateStore, least_readable_version: Version, target_least_readable_version: Version, limit: usize, ) { pruner::prune_state( Arc::clone(&store.db), least_readable_version, target_least_readable_version, limit, ) .unwrap(); } fn verify_state_in_store( store: &StateStore, address: AccountAddress, expected_value: Option<&AccountStateBlob>, version: Version, root: HashValue, ) { let (value, proof) = store .get_account_state_with_proof_by_version(address, version) .unwrap(); assert_eq!(value.as_ref(), expected_value); proof.verify(root, address.hash(), value.as_ref()).unwrap(); } #[test] fn test_empty_store() { let tmp_dir = TempPath::new(); let db = DijetsDB::new_for_test(&tmp_dir); let store = &db.state_store; let address = AccountAddress::new([1u8; AccountAddress::LENGTH]); assert!(store .get_account_state_with_proof_by_version(address, 0) .is_err()); } #[test] fn test_state_store_reader_writer() { let tmp_dir = TempPath::new(); let db = DijetsDB::new_for_test(&tmp_dir); let store = &db.state_store; let address1 = AccountAddress::new([1u8; AccountAddress::LENGTH]); let address2 = AccountAddress::new([2u8; AccountAddress::LENGTH]); let address3 = AccountAddress::new([3u8; AccountAddress::LENGTH]); let value1 = AccountStateBlob::from(vec![0x01]); let value1_update = AccountStateBlob::from(vec![0x00]); let value2 = AccountStateBlob::from(vec![0x02]); let value3 = AccountStateBlob::from(vec![0x03]); // Insert address1 with value 1 and verify new states. let mut root = put_account_state_set( store, vec![(address1, value1.clone())], 0, /* version */ 1, /* expected_nodes_created */ 0, /* expected_nodes_retired */ 0, /* expected_blobs_retired */ ); verify_state_in_store(store, address1, Some(&value1), 0, root); verify_state_in_store(store, address2, None, 0, root); verify_state_in_store(store, address3, None, 0, root); // Insert address 1 with updated value1, address2 with value 2 and address3 with value3 and // verify new states. root = put_account_state_set( store, vec![ (address1, value1_update.clone()), (address2, value2.clone()), (address3, value3.clone()), ], 1, /* version */ 4, /* expected_nodes_created */ 1, /* expected_nodes_retired */ 1, /* expected_blobs_retired */ ); verify_state_in_store(store, address1, Some(&value1_update), 1, root); verify_state_in_store(store, address2, Some(&value2), 1, root); verify_state_in_store(store, address3, Some(&value3), 1, root); } #[test] fn test_retired_records() { let address1 = AccountAddress::new([1u8; AccountAddress::LENGTH]); let address2 = AccountAddress::new([2u8; AccountAddress::LENGTH]); let address3 = AccountAddress::new([3u8; AccountAddress::LENGTH]); let value1 = AccountStateBlob::from(vec![0x01]); let value2 = AccountStateBlob::from(vec![0x02]); let value2_update = AccountStateBlob::from(vec![0x12]); let value3 = AccountStateBlob::from(vec![0x03]); let value3_update = AccountStateBlob::from(vec![0x13]); let tmp_dir = TempPath::new(); let db = DijetsDB::new_for_test(&tmp_dir); let store = &db.state_store; // Update. // ```text // | batch | 0 | 1 | 2 | // | address1 | value1 | | | // | address2 | value2 | value2_update | | // | address3 | | value3 | value3_update | // ``` let root0 = put_account_state_set( store, vec![(address1, value1.clone()), (address2, value2)], 0, /* version */ 3, /* expected_nodes_created */ 0, /* expected_nodes_retired */ 0, /* expected_blobs_retired */ ); let root1 = put_account_state_set( store, vec![ (address2, value2_update.clone()), (address3, value3.clone()), ], 1, /* version */ 3, /* expected_nodes_created */ 2, /* expected_nodes_retired */ 1, /* expected_blobs_retired */ ); let root2 = put_account_state_set( store, vec![(address3, value3_update.clone())], 2, /* version */ 2, /* expected_nodes_created */ 2, /* expected_nodes_retired */ 1, /* expected_blobs_retired */ ); // Verify. // Prune with limit=0, nothing is gone. { prune_stale_indices( store, 0, /* least_readable_version */ 1, /* target_least_readable_version */ 0, /* limit */ ); verify_state_in_store(store, address1, Some(&value1), 0, root0); } // Prune till version=1. { prune_stale_indices( store, 0, /* least_readable_version */ 1, /* target_least_readable_version */ 100, /* limit */ ); // root0 is gone. assert!(store .get_account_state_with_proof_by_version(address2, 0) .is_err()); // root1 is still there. verify_state_in_store(store, address1, Some(&value1), 1, root1); verify_state_in_store(store, address2, Some(&value2_update), 1, root1); verify_state_in_store(store, address3, Some(&value3), 1, root1); } // Prune till version=2. { prune_stale_indices( store, 1, /* least_readable_version */ 2, /* target_least_readable_version */ 100, /* limit */ ); // root1 is gone. assert!(store .get_account_state_with_proof_by_version(address2, 1) .is_err()); // root2 is still there. verify_state_in_store(store, address1, Some(&value1), 2, root2); verify_state_in_store(store, address2, Some(&value2_update), 2, root2); verify_state_in_store(store, address3, Some(&value3_update), 2, root2); } } proptest! { #![proptest_config(ProptestConfig::with_cases(10))] #[test] fn test_get_account_iter( input in hash_map(any::<AccountAddress>(), any::<AccountStateBlob>(), 1..200) ) { // Convert to a vector so iteration order becomes deterministic. let kvs: Vec<_> = input.into_iter().collect(); let tmp_dir = TempPath::new(); let db = DijetsDB::new_for_test(&tmp_dir); let store = &db.state_store; init_store(store, kvs.clone().into_iter()); // Test iterator at each version. for i in 0..kvs.len() { let actual_values = db .get_backup_handler() .get_account_iter(i as Version) .unwrap() .collect::<Result<Vec<_>>>() .unwrap(); let mut expected_values: Vec<_> = kvs[..=i] .iter() .map(|(addr, account)| (addr.hash(), account.clone())) .collect(); expected_values.sort_unstable_by_key(|item| item.0); prop_assert_eq!(actual_values, expected_values); } } #[test] fn test_restore( (input, batch1_size) in hash_map(any::<AccountAddress>(), any::<AccountStateBlob>(), 2..1000) .prop_flat_map(|input| { let len = input.len(); (Just(input), 1..len) }) ) { let tmp_dir1 = TempPath::new(); let db1 = DijetsDB::new_for_test(&tmp_dir1); let store1 = &db1.state_store; init_store(store1, input.clone().into_iter()); let version = (input.len() - 1) as Version; let expected_root_hash = store1.get_root_hash(version).unwrap(); let tmp_dir2 = TempPath::new(); let db2 = DijetsDB::new_for_test(&tmp_dir2); let store2 = &db2.state_store; let mut restore = JellyfishMerkleRestore::new(Arc::clone(store2), version, expected_root_hash).unwrap(); let mut ordered_input: Vec<_> = input .into_iter() .map(|(addr, value)| (addr.hash(), value)) .collect(); ordered_input.sort_unstable_by_key(|(key, _value)| *key); let batch1: Vec<_> = ordered_input .clone() .into_iter() .take(batch1_size) .collect(); let rightmost_of_batch1 = batch1.last().map(|(key, _value)| *key).unwrap(); let proof_of_batch1 = store1 .get_account_state_range_proof(rightmost_of_batch1, version) .unwrap(); restore.add_chunk(batch1, proof_of_batch1).unwrap(); let batch2: Vec<_> = ordered_input .into_iter() .skip(batch1_size) .collect(); let rightmost_of_batch2 = batch2.last().map(|(key, _value)| *key).unwrap(); let proof_of_batch2 = store1 .get_account_state_range_proof(rightmost_of_batch2, version) .unwrap(); restore.add_chunk(batch2, proof_of_batch2).unwrap(); restore.finish().unwrap(); let actual_root_hash = store2.get_root_hash(version).unwrap(); prop_assert_eq!(actual_root_hash, expected_root_hash); } #[test] fn test_get_rightmost_leaf( (input, batch1_size) in hash_map(any::<AccountAddress>(), any::<AccountStateBlob>(), 2..1000) .prop_flat_map(|input| { let len = input.len(); (Just(input), 1..len) }) ) { let tmp_dir1 = TempPath::new(); let db1 = DijetsDB::new_for_test(&tmp_dir1); let store1 = &db1.state_store; init_store(store1, input.clone().into_iter()); let version = (input.len() - 1) as Version; let expected_root_hash = store1.get_root_hash(version).unwrap(); let tmp_dir2 = TempPath::new(); let db2 = DijetsDB::new_for_test(&tmp_dir2); let store2 = &db2.state_store; let mut restore = JellyfishMerkleRestore::new(Arc::clone(store2), version, expected_root_hash).unwrap(); let mut ordered_input: Vec<_> = input .into_iter() .map(|(addr, value)| (addr.hash(), value)) .collect(); ordered_input.sort_unstable_by_key(|(key, _value)| *key); let batch1: Vec<_> = ordered_input .into_iter() .take(batch1_size) .collect(); let rightmost_of_batch1 = batch1.last().map(|(key, _value)| *key).unwrap(); let proof_of_batch1 = store1 .get_account_state_range_proof(rightmost_of_batch1, version) .unwrap(); restore.add_chunk(batch1, proof_of_batch1).unwrap(); let expected = store2.get_rightmost_leaf_naive().unwrap(); let actual = store2.get_rightmost_leaf().unwrap(); prop_assert_eq!(actual, expected); } } // Initializes the state store by inserting one key at each version. fn init_store(store: &StateStore, input: impl Iterator<Item = (AccountAddress, AccountStateBlob)>) { for (i, (key, value)) in input.enumerate() { let mut cs = ChangeSet::new(); let account_state_set: HashMap<_, _> = std::iter::once((key, value)).collect(); store .put_account_state_sets(vec![account_state_set], None, i as Version, &mut cs) .unwrap(); store.db.write_schemas(cs.batch).unwrap(); } }
34.416446
101
0.599769
38f8d48683c1ae0a90ef5040b0013f49131e9a88
1,235
use actix::prelude::*; use diesel::prelude::*; use uuid::Uuid; use actors::DbExecutor; use models::Namespace; use schema::namespace; #[derive(Debug)] pub enum Find { Any(Uuid), Active(Uuid), ByLabel(String), } impl Message for Find { type Result = QueryResult<Namespace>; } impl Handler<Find> for DbExecutor { type Result = QueryResult<Namespace>; fn handle(&mut self, msg: Find, _ctx: &mut Self::Context) -> Self::Result { let conn = &self.0.get().unwrap(); match msg { Find::Any(id) => find_any(conn, id), Find::Active(id) => find_active(conn, id), Find::ByLabel(ref label) => find_by_label(conn, label), } } } fn find_any(conn: &PgConnection, id: Uuid) -> QueryResult<Namespace> { namespace::table.find(id).get_result(conn) } fn find_active(conn: &PgConnection, id: Uuid) -> QueryResult<Namespace> { namespace::table .filter(namespace::deleted_at.is_null()) .find(id) .get_result(conn) } fn find_by_label(conn: &PgConnection, label: &str) -> QueryResult<Namespace> { namespace::table .filter(namespace::deleted_at.is_null()) .filter(namespace::label.eq(label)) .first(conn) }
24.7
79
0.62753
9bbb1be74f8aa0af937ef31718d1072108d36e69
10,400
// #[cfg(test)] // mod tests { // use drocoin::schema::*; // use serde_test::{assert_tokens, Token}; // use chrono::NaiveDate; // #[test] // fn claims_serialize_correctly() { // let claims = Claims { // tha: "hashed_string".to_owned(), // iat: 0, // exp: 100, // }; // assert_tokens( // &claims, // &[ // Token::Struct{name: "Claims", len: 3}, // Token::String("tha"), // Token::String("hashed_string"), // Token::String("iat"), // Token::U64(0), // Token::String("exp"), // Token::U64(100), // Token::StructEnd, // ] // ) // } // #[test] // fn claims_deserialize_correctly() { // let data = r#"{"tha":"hashed_string","iat":0,"exp":100}"#; // let claims: Claims = serde_json::from_str(data).unwrap(); // let expected_claims = Claims { // tha: "hashed_string".to_owned(), // iat: 0, // exp: 100, // }; // assert_eq!(claims, expected_claims); // } // #[test] // fn transaction_serialize_correctly() { // let transaction = Transaction { // by: "source".to_owned(), // source: "source".to_owned(), // target: "target".to_owned(), // amount: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // }; // assert_tokens( // &transaction, // &[ // Token::Struct{name: "Transaction", len: 5}, // Token::String("by"), // Token::String("source"), // Token::String("source"), // Token::String("source"), // Token::String("target"), // Token::String("target"), // Token::String("amount"), // Token::I32(0), // Token::String("timestamp"), // Token::String("2021-04-02T04:02:42"), // Token::StructEnd, // ] // ) // } // #[test] // fn transaction_deserialize_correctly() { // let data = r#"{"by":"source","source":"source","target":"target","amount":0,"timestamp":"2021-04-02T04:02:42"}"#; // let transaction: Transaction = serde_json::from_str(data).unwrap(); // let expected_transaction = Transaction { // by: "source".to_owned(), // source: "source".to_owned(), // target: "target".to_owned(), // amount: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // }; // assert_eq!(transaction, expected_transaction); // } // #[test] // fn block_serialize_correctly() { // let block = Block { // transaction_list: vec!["transaction1".to_owned()], // nonce: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // hash: "hash".to_owned() // }; // assert_tokens( // &block, // &[ // Token::Struct{name: "Block", len: 4}, // Token::String("transaction_list"), // Token::Seq {len: Some(1)}, // Token::String("transaction1"), // Token::SeqEnd, // Token::String("nonce"), // Token::U32(0), // Token::String("timestamp"), // Token::String("2021-04-02T04:02:42"), // Token::String("hash"), // Token::String("hash"), // Token::StructEnd, // ] // ) // } // #[test] // fn block_deserialize_correctly() { // let expected_block = Block { // transaction_list: vec!["transaction1".to_owned()], // nonce: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // hash: "hash".to_owned() // }; // let data = r#"{"transaction_list":["transaction1"],"nonce":0,"timestamp":"2021-04-02T04:02:42","hash":"hash"}"#; // let block: Block = serde_json::from_str(data).unwrap(); // assert_eq!(block, expected_block); // } // #[test] // fn block_serialize_when_vec_emptpy() { // let block = Block { // transaction_list: vec![], // nonce: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // hash: "hash".to_owned() // }; // let json = serde_json::to_string(&block).unwrap(); // assert_eq!(json, r#"{"nonce":0,"timestamp":"2021-04-02T04:02:42","hash":"hash"}"#) // } // #[test] // fn naked_block_serialize_correctly() { // let naked_block = NakedBlock { // transaction_list: vec!["transaction1".to_owned()], // nonce: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // }; // assert_tokens( // &naked_block, // &[ // Token::Struct{name: "NakedBlock", len: 3}, // Token::String("transaction_list"), // Token::Seq {len: Some(1)}, // Token::String("transaction1"), // Token::SeqEnd, // Token::String("nonce"), // Token::U32(0), // Token::String("timestamp"), // Token::String("2021-04-02T04:02:42"), // Token::StructEnd, // ] // ) // } // #[test] // fn naked_block_deserialize_correctly() { // let expected_naked_block = NakedBlock { // transaction_list: vec!["transaction1".to_owned()], // nonce: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // }; // let data = r#"{"transaction_list":["transaction1"],"nonce":0,"timestamp":"2021-04-02T04:02:42"}"#; // let naked_block: NakedBlock = serde_json::from_str(data).unwrap(); // assert_eq!(naked_block, expected_naked_block); // } // #[test] // fn naked_block_serialize_when_vec_emptpy() { // let naked_block = NakedBlock { // transaction_list: vec![], // nonce: 0, // timestamp: NaiveDate::from_ymd(2021, 4, 2).and_hms(4, 2, 42), // }; // let json = serde_json::to_string(&naked_block).unwrap(); // assert_eq!(json, r#"{"nonce":0,"timestamp":"2021-04-02T04:02:42"}"#) // } // #[test] // fn user_serialize_correctly() { // let user = User { // user_id: MetuId::new("e254275".to_owned(), "DtNX1qk4YF4saRH".to_owned()).unwrap(), // public_key: "public_key".to_owned(), // balance: 0 // }; // assert_tokens( // &user, // &[ // Token::Struct{name: "User", len: 3}, // Token::String("user_id"), // Token::Struct {name: "MetuId", len: 2}, // Token::String("id"), // Token::String("e254275"), // Token::String("passwd"), // Token::String("DtNX1qk4YF4saRH"), // Token::StructEnd, // Token::String("public_key"), // Token::String("public_key"), // Token::String("balance"), // Token::I32(0), // Token::StructEnd, // ] // ) // } // #[test] // fn user_deserialize_correctly() { // let expected_user = User { // user_id: MetuId::new("e254275".to_owned(), "DtNX1qk4YF4saRH".to_owned()).unwrap(), // public_key: "public_key".to_owned(), // balance: 0 // }; // let data = r#"{"user_id":{"id":"e254275","passwd":"DtNX1qk4YF4saRH"},"public_key":"public_key","balance":0}"#; // let user: User = serde_json::from_str(data).unwrap(); // assert_eq!(user, expected_user); // } // #[test] // fn metu_id_serialize_correctly() { // let metu_id = MetuId::new ("e254275".to_owned(), "DtNX1qk4YF4saRH".to_owned()).unwrap(); // assert_tokens( // &metu_id, // &[ // Token::Struct{name: "MetuId", len: 2}, // Token::String("id"), // Token::String("e254275"), // Token::String("passwd"), // Token::String("DtNX1qk4YF4saRH"), // Token::StructEnd, // ] // ) // } // #[test] // fn metu_id_deserialize_correctly() { // let expected_metu_id = MetuId::new ("e254275".to_owned(), "DtNX1qk4YF4saRH".to_owned()).unwrap(); // let data = r#"{"id":"e254275","passwd":"DtNX1qk4YF4saRH"}"#; // let metu_id: MetuId = serde_json::from_str(data).unwrap(); // assert_eq!(metu_id, expected_metu_id); // } // #[test] // fn auth_request_serialize_correctly() { // let auth_request = AuthRequest { // student_id: "e254275".to_owned(), // passwd: "DtNX1qk4YF4saRH".to_owned(), // public_key: "public_key".to_owned() // }; // assert_tokens( // &auth_request, // &[ // Token::Struct{name: "AuthRequest", len: 3}, // Token::String("student_id"), // Token::String("e254275"), // Token::String("passwd"), // Token::String("DtNX1qk4YF4saRH"), // Token::String("public_key"), // Token::String("public_key"), // Token::StructEnd, // ] // ) // } // #[test] // fn auth_request_deserialize_correctly() { // let expected_auth_request = AuthRequest { // student_id: "e254275".to_owned(), // passwd: "DtNX1qk4YF4saRH".to_owned(), // public_key: "public_key".to_owned() // }; // let data = r#"{"student_id":"e254275","passwd":"DtNX1qk4YF4saRH","public_key":"public_key"}"#; // let auth_request: AuthRequest = serde_json::from_str(data).unwrap(); // assert_eq!(auth_request, expected_auth_request); // } // }
33.440514
124
0.46625
c12c6a466c79df874e57e4445f1ea52f6e4feff9
1,500
use test::{self, Bencher}; use na::{DMatrix, Hessenberg, Matrix4}; #[path = "../common/macros.rs"] mod macros; // Without unpack. #[bench] fn hessenberg_decompose_4x4(bh: &mut Bencher) { let m = Matrix4::<f64>::new_random(); bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) } #[bench] fn hessenberg_decompose_100x100(bh: &mut Bencher) { let m = DMatrix::<f64>::new_random(100, 100); bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) } #[bench] fn hessenberg_decompose_200x200(bh: &mut Bencher) { let m = DMatrix::<f64>::new_random(200, 200); bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) } #[bench] fn hessenberg_decompose_500x500(bh: &mut Bencher) { let m = DMatrix::<f64>::new_random(500, 500); bh.iter(|| test::black_box(Hessenberg::new(m.clone()))) } // With unpack. #[bench] fn hessenberg_decompose_unpack_100x100(bh: &mut Bencher) { let m = DMatrix::<f64>::new_random(100, 100); bh.iter(|| { let hess = Hessenberg::new(m.clone()); let _ = hess.unpack(); }) } #[bench] fn hessenberg_decompose_unpack_200x200(bh: &mut Bencher) { let m = DMatrix::<f64>::new_random(200, 200); bh.iter(|| { let hess = Hessenberg::new(m.clone()); let _ = hess.unpack(); }) } #[bench] fn hessenberg_decompose_unpack_500x500(bh: &mut Bencher) { let m = DMatrix::<f64>::new_random(500, 500); bh.iter(|| { let hess = Hessenberg::new(m.clone()); let _ = hess.unpack(); }) }
25.423729
59
0.624667
7921a9cd87c71b16286c546dceded9ca47448008
1,251
// Copyright 2022 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use engula_api::server::v1::ShardPutRequest; use crate::{ node::engine::{GroupEngine, WriteBatch}, serverpb::v1::{EvalResult, WriteBatchRep}, Error, Result, }; pub async fn put(group_engine: &GroupEngine, req: &ShardPutRequest) -> Result<EvalResult> { let put = req .put .as_ref() .ok_or_else(|| Error::InvalidArgument("ShardPutRequest::put is None".into()))?; let mut wb = WriteBatch::default(); group_engine.put(&mut wb, req.shard_id, &put.key, &put.value)?; Ok(EvalResult { batch: Some(WriteBatchRep { data: wb.data().to_owned(), }), ..Default::default() }) }
32.921053
91
0.676259