hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
72a6b4a9e525d4f1240b96eacfd52c7edae91840
2,098
// Copyright (c) The Dijets Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::experimental; use thiserror::Error; #[derive(Debug, Error)] #[error(transparent)] pub struct DbError { #[from] inner: anyhow::Error, } #[derive(Debug, Error)] #[error(transparent)] pub struct StateSyncError { #[from] inner: anyhow::Error, } impl From<experimental::errors::Error> for StateSyncError { fn from(e: experimental::errors::Error) -> Self { StateSyncError { inner: e.into() } } } impl From<executor_types::Error> for StateSyncError { fn from(e: executor_types::Error) -> Self { StateSyncError { inner: e.into() } } } #[derive(Debug, Error)] #[error(transparent)] pub struct MempoolError { #[from] inner: anyhow::Error, } #[derive(Debug, Error)] #[error(transparent)] pub struct VerifyError { #[from] inner: anyhow::Error, } pub fn error_kind(e: &anyhow::Error) -> &'static str { if e.downcast_ref::<executor_types::Error>().is_some() { return "Execution"; } if let Some(e) = e.downcast_ref::<StateSyncError>() { if e.inner.downcast_ref::<executor_types::Error>().is_some() { return "Execution"; } return "StateSync"; } if e.downcast_ref::<MempoolError>().is_some() { return "Mempool"; } if e.downcast_ref::<DbError>().is_some() { return "ConsensusDb"; } if e.downcast_ref::<safety_rules::Error>().is_some() { return "SafetyRules"; } if e.downcast_ref::<VerifyError>().is_some() { return "VerifyError"; } "InternalError" } #[cfg(test)] mod tests { use crate::error::{error_kind, StateSyncError}; use anyhow::Context; #[test] fn conversion_and_downcast() { let error = executor_types::Error::InternalError { error: "lalala".to_string(), }; let typed_error: StateSyncError = error.into(); let upper: anyhow::Result<()> = Err(typed_error).context("Context!"); assert_eq!(error_kind(&upper.unwrap_err()), "Execution"); } }
24.114943
77
0.617255
621cf2b403dd7f6205a2fee395f0bcc8ccc02a03
10,189
use cgmath::*; use winit::window::Window; use crate::{texture::Texture, Options}; use super::{app_state::AppContext, game_state, gpu_state}; // --------------------------------------------------------------------------------------------------------------------- #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct LcdUniformData { camera_position: Point2<f32>, viewport_size: Vector2<f32>, pixels_per_unit: Vector2<f32>, pixel_effect_alpha: f32, shadow_effect_alpha: f32, } unsafe impl bytemuck::Pod for LcdUniformData {} unsafe impl bytemuck::Zeroable for LcdUniformData {} impl Default for LcdUniformData { fn default() -> Self { Self { camera_position: point2(0.0, 0.0), viewport_size: vec2(1.0, 1.0), pixels_per_unit: vec2(1.0, 1.0), pixel_effect_alpha: 1.0, shadow_effect_alpha: 1.0, } } } impl LcdUniformData { pub fn set_pixel_effect_alpha(&mut self, pixel_effect_alpha: f32) -> &mut Self { self.pixel_effect_alpha = pixel_effect_alpha; self } pub fn set_shadow_effect_alpha(&mut self, shadow_effect_alpha: f32) -> &mut Self { self.shadow_effect_alpha = shadow_effect_alpha; self } pub fn set_camera_position(&mut self, camera_position: Point2<f32>) -> &mut Self { self.camera_position = camera_position; self } pub fn set_viewport_size(&mut self, viewport_size: Vector2<f32>) -> &mut Self { self.viewport_size = viewport_size; self } pub fn set_pixels_per_unit(&mut self, pixels_per_unit: Vector2<f32>) -> &mut Self { self.pixels_per_unit = pixels_per_unit; self } } pub type LcdUniforms = crate::util::UniformWrapper<LcdUniformData>; // --------------------------------------------------------------------------------------------------------------------- pub struct LcdFilter { textures_bind_group_layout: wgpu::BindGroupLayout, textures_bind_group: wgpu::BindGroup, pipeline: wgpu::RenderPipeline, tonemap: Texture, uniforms: LcdUniforms, } impl LcdFilter { pub fn new(gpu: &mut gpu_state::GpuState, _options: &Options, tonemap: Texture) -> Self { let textures_bind_group_layout = gpu.device .create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor { label: Some("LcdFilter Bind Group Layout"), entries: &[ // Color attachment wgpu::BindGroupLayoutEntry { binding: 0, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { multisampled: false, sample_type: wgpu::TextureSampleType::Float { filterable: false }, view_dimension: wgpu::TextureViewDimension::D2, }, count: None, }, // Tonemap wgpu::BindGroupLayoutEntry { binding: 1, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Texture { multisampled: false, sample_type: wgpu::TextureSampleType::Float { filterable: false }, view_dimension: wgpu::TextureViewDimension::D2, }, count: None, }, // Sampler wgpu::BindGroupLayoutEntry { binding: 2, visibility: wgpu::ShaderStage::FRAGMENT, ty: wgpu::BindingType::Sampler { comparison: false, filtering: false, }, count: None, }, ], }); let textures_bind_group = Self::create_textures_bind_group(&gpu, &textures_bind_group_layout, &tonemap.view); let uniforms = LcdUniforms::new(&gpu.device); let pipeline = Self::create_render_pipeline( &gpu.device, gpu.sc_desc.format, &textures_bind_group_layout, &uniforms.bind_group_layout, ); Self { textures_bind_group_layout, textures_bind_group, pipeline, tonemap, uniforms, } } fn create_textures_bind_group( gpu: &gpu_state::GpuState, layout: &wgpu::BindGroupLayout, tonemap: &wgpu::TextureView, ) -> wgpu::BindGroup { gpu.device.create_bind_group(&wgpu::BindGroupDescriptor { layout: &layout, entries: &[ wgpu::BindGroupEntry { binding: 0, resource: wgpu::BindingResource::TextureView(&gpu.color_attachment.view), }, wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(&tonemap), }, wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::Sampler(&gpu.color_attachment.sampler), }, ], label: Some("LcdFilter Bind Group"), }) } fn create_render_pipeline( device: &wgpu::Device, color_format: wgpu::TextureFormat, textures_bind_group_layout: &wgpu::BindGroupLayout, uniforms_bind_group_layout: &wgpu::BindGroupLayout, ) -> wgpu::RenderPipeline { let vs_src = wgpu::include_spirv!("../shaders/lcd.vs.spv"); let fs_src = wgpu::include_spirv!("../shaders/lcd.fs.spv"); let vs_module = device.create_shader_module(&vs_src); let fs_module = device.create_shader_module(&fs_src); let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("LcdFilter Render Pipeline Layout"), bind_group_layouts: &[&textures_bind_group_layout, &uniforms_bind_group_layout], push_constant_ranges: &[], }); device.create_render_pipeline(&wgpu::RenderPipelineDescriptor { label: Some("LcdFilter Render Pipeline"), layout: Some(&layout), vertex: wgpu::VertexState { module: &vs_module, entry_point: "main", buffers: &[], }, fragment: Some(wgpu::FragmentState { module: &fs_module, entry_point: "main", targets: &[wgpu::ColorTargetState { format: color_format, alpha_blend: wgpu::BlendState::REPLACE, color_blend: wgpu::BlendState::REPLACE, write_mask: wgpu::ColorWrite::ALL, }], }), primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleStrip, strip_index_format: None, front_face: wgpu::FrontFace::Cw, cull_mode: wgpu::CullMode::None, polygon_mode: wgpu::PolygonMode::Fill, }, depth_stencil: None, multisample: wgpu::MultisampleState { count: 1, mask: !0, alpha_to_coverage_enabled: false, }, }) } pub fn resize( &mut self, _window: &Window, _new_size: winit::dpi::PhysicalSize<u32>, gpu: &gpu_state::GpuState, ) { self.textures_bind_group = Self::create_textures_bind_group( gpu, &self.textures_bind_group_layout, &self.tonemap.view, ); } pub fn update( &mut self, _dt: std::time::Duration, ctx: &mut AppContext, game: &game_state::GameState, ) { // Determine an appropriate alpha for pixel effects - as window gets // smaller the effect needs to fade out, since it looks busy on small windows. // NOTE: min_high_freq and max_high_freq were determined via experimentation let pixel_effect_alpha = { let frequency = (game.camera_controller.projection.scale() * game.pixels_per_unit.x) / ctx.gpu.size.width as f32; let min_high_freq = 0.2; let max_high_freq = 0.5; let falloff = ((frequency - min_high_freq) / (max_high_freq - min_high_freq)).clamp(0.0, 1.0); 1.0 - (falloff * falloff) }; self.uniforms .data .set_pixel_effect_alpha(pixel_effect_alpha) .set_camera_position(game.camera_controller.camera.position().xy()) .set_pixels_per_unit(game.pixels_per_unit) .set_viewport_size(game.camera_controller.projection.viewport_size()); self.uniforms.write(&mut ctx.gpu.queue); } pub fn render( &mut self, _window: &Window, _gpu: &mut gpu_state::GpuState, frame: &wgpu::SwapChainFrame, encoder: &mut wgpu::CommandEncoder, ) { let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { label: Some("LcdFilter Render Pass"), color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor { attachment: &frame.output.view, resolve_target: None, ops: wgpu::Operations { load: wgpu::LoadOp::Load, store: true, }, }], depth_stencil_attachment: None, }); render_pass.set_pipeline(&self.pipeline); render_pass.set_bind_group(0, &self.textures_bind_group, &[]); render_pass.set_bind_group(1, &self.uniforms.bind_group, &[]); render_pass.draw(0..3, 0..1); } }
35.501742
120
0.526352
de60303c894a4ce77a399bbbf3dcf53890f5bc75
10,034
//! Set of all the operations supported by the zkSync network. use super::ZkSyncTx; use crate::ZkSyncPriorityOp; use anyhow::format_err; use serde::{Deserialize, Serialize}; use zksync_crypto::params::CHUNK_BYTES; mod change_pubkey_op; mod close_op; mod deposit_op; mod forced_exit; mod full_exit_op; mod noop_op; mod transfer_op; mod transfer_to_new_op; mod withdraw_op; #[doc(hidden)] pub use self::close_op::CloseOp; pub use self::{ change_pubkey_op::ChangePubKeyOp, deposit_op::DepositOp, forced_exit::ForcedExitOp, full_exit_op::FullExitOp, noop_op::NoopOp, transfer_op::TransferOp, transfer_to_new_op::TransferToNewOp, withdraw_op::WithdrawOp, }; use zksync_basic_types::AccountId; /// zkSync network operation. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(tag = "type")] pub enum ZkSyncOp { Deposit(Box<DepositOp>), Transfer(Box<TransferOp>), /// Transfer to new operation is represented by `Transfer` transaction, /// same as `Transfer` operation. The difference is that for `TransferToNew` operation /// recipient account doesn't exist and has to be created. TransferToNew(Box<TransferToNewOp>), Withdraw(Box<WithdrawOp>), #[doc(hidden)] Close(Box<CloseOp>), FullExit(Box<FullExitOp>), ChangePubKeyOffchain(Box<ChangePubKeyOp>), ForcedExit(Box<ForcedExitOp>), /// `NoOp` operation cannot be directly created, but it's used to fill the block capacity. Noop(NoopOp), } impl ZkSyncOp { /// Returns the number of block chunks required for the operation. pub fn chunks(&self) -> usize { match self { ZkSyncOp::Noop(_) => NoopOp::CHUNKS, ZkSyncOp::Deposit(_) => DepositOp::CHUNKS, ZkSyncOp::TransferToNew(_) => TransferToNewOp::CHUNKS, ZkSyncOp::Withdraw(_) => WithdrawOp::CHUNKS, ZkSyncOp::Close(_) => CloseOp::CHUNKS, ZkSyncOp::Transfer(_) => TransferOp::CHUNKS, ZkSyncOp::FullExit(_) => FullExitOp::CHUNKS, ZkSyncOp::ChangePubKeyOffchain(_) => ChangePubKeyOp::CHUNKS, ZkSyncOp::ForcedExit(_) => ForcedExitOp::CHUNKS, } } /// Returns the public data required for the Ethereum smart contract to commit the operation. pub fn public_data(&self) -> Vec<u8> { match self { ZkSyncOp::Noop(op) => op.get_public_data(), ZkSyncOp::Deposit(op) => op.get_public_data(), ZkSyncOp::TransferToNew(op) => op.get_public_data(), ZkSyncOp::Withdraw(op) => op.get_public_data(), ZkSyncOp::Close(op) => op.get_public_data(), ZkSyncOp::Transfer(op) => op.get_public_data(), ZkSyncOp::FullExit(op) => op.get_public_data(), ZkSyncOp::ChangePubKeyOffchain(op) => op.get_public_data(), ZkSyncOp::ForcedExit(op) => op.get_public_data(), } } /// Gets the witness required for the Ethereum smart contract. /// Unlike public data, some operations may not have a witness. /// /// Operations that have witness data: /// /// - `ChangePubKey`; pub fn eth_witness(&self) -> Option<Vec<u8>> { match self { ZkSyncOp::ChangePubKeyOffchain(op) => Some(op.get_eth_witness()), _ => None, } } /// Returns eth_witness data and data_size for operation, if any. /// /// Operations that have withdrawal data: /// /// - `Withdraw`; /// - `FullExit`; /// - `ForcedExit`. pub fn withdrawal_data(&self) -> Option<Vec<u8>> { match self { ZkSyncOp::Withdraw(op) => Some(op.get_withdrawal_data()), ZkSyncOp::FullExit(op) => Some(op.get_withdrawal_data()), ZkSyncOp::ForcedExit(op) => Some(op.get_withdrawal_data()), _ => None, } } /// Attempts to restore the operation from the public data committed on the Ethereum smart contract. pub fn from_public_data(bytes: &[u8]) -> Result<Self, anyhow::Error> { let op_type: u8 = *bytes.first().ok_or_else(|| format_err!("Empty pubdata"))?; match op_type { NoopOp::OP_CODE => Ok(ZkSyncOp::Noop(NoopOp::from_public_data(&bytes)?)), DepositOp::OP_CODE => Ok(ZkSyncOp::Deposit(Box::new(DepositOp::from_public_data( &bytes, )?))), TransferToNewOp::OP_CODE => Ok(ZkSyncOp::TransferToNew(Box::new( TransferToNewOp::from_public_data(&bytes)?, ))), WithdrawOp::OP_CODE => Ok(ZkSyncOp::Withdraw(Box::new(WithdrawOp::from_public_data( &bytes, )?))), CloseOp::OP_CODE => Ok(ZkSyncOp::Close(Box::new(CloseOp::from_public_data( &bytes, )?))), TransferOp::OP_CODE => Ok(ZkSyncOp::Transfer(Box::new(TransferOp::from_public_data( &bytes, )?))), FullExitOp::OP_CODE => Ok(ZkSyncOp::FullExit(Box::new(FullExitOp::from_public_data( &bytes, )?))), ChangePubKeyOp::OP_CODE => Ok(ZkSyncOp::ChangePubKeyOffchain(Box::new( ChangePubKeyOp::from_public_data(&bytes)?, ))), ForcedExitOp::OP_CODE => Ok(ZkSyncOp::ForcedExit(Box::new( ForcedExitOp::from_public_data(&bytes)?, ))), _ => Err(format_err!("Wrong operation type: {}", &op_type)), } } /// Returns the expected number of chunks for a certain type of operation. pub fn public_data_length(op_type: u8) -> Result<usize, anyhow::Error> { match op_type { NoopOp::OP_CODE => Ok(NoopOp::CHUNKS), DepositOp::OP_CODE => Ok(DepositOp::CHUNKS), TransferToNewOp::OP_CODE => Ok(TransferToNewOp::CHUNKS), WithdrawOp::OP_CODE => Ok(WithdrawOp::CHUNKS), CloseOp::OP_CODE => Ok(CloseOp::CHUNKS), TransferOp::OP_CODE => Ok(TransferOp::CHUNKS), FullExitOp::OP_CODE => Ok(FullExitOp::CHUNKS), ChangePubKeyOp::OP_CODE => Ok(ChangePubKeyOp::CHUNKS), ForcedExitOp::OP_CODE => Ok(ForcedExitOp::CHUNKS), _ => Err(format_err!("Wrong operation type: {}", &op_type)), } .map(|chunks| chunks * CHUNK_BYTES) } /// Attempts to interpret the operation as the L2 transaction. pub fn try_get_tx(&self) -> Result<ZkSyncTx, anyhow::Error> { match self { ZkSyncOp::Transfer(op) => Ok(ZkSyncTx::Transfer(Box::new(op.tx.clone()))), ZkSyncOp::TransferToNew(op) => Ok(ZkSyncTx::Transfer(Box::new(op.tx.clone()))), ZkSyncOp::Withdraw(op) => Ok(ZkSyncTx::Withdraw(Box::new(op.tx.clone()))), ZkSyncOp::Close(op) => Ok(ZkSyncTx::Close(Box::new(op.tx.clone()))), ZkSyncOp::ChangePubKeyOffchain(op) => { Ok(ZkSyncTx::ChangePubKey(Box::new(op.tx.clone()))) } ZkSyncOp::ForcedExit(op) => Ok(ZkSyncTx::ForcedExit(Box::new(op.tx.clone()))), _ => Err(format_err!("Wrong tx type")), } } /// Attempts to interpret the operation as the L1 priority operation. pub fn try_get_priority_op(&self) -> Result<ZkSyncPriorityOp, anyhow::Error> { match self { ZkSyncOp::Deposit(op) => Ok(ZkSyncPriorityOp::Deposit(op.priority_op.clone())), ZkSyncOp::FullExit(op) => Ok(ZkSyncPriorityOp::FullExit(op.priority_op.clone())), _ => Err(format_err!("Wrong operation type")), } } /// Returns the list of account IDs affected by this operation. pub fn get_updated_account_ids(&self) -> Vec<AccountId> { match self { ZkSyncOp::Noop(op) => op.get_updated_account_ids(), ZkSyncOp::Deposit(op) => op.get_updated_account_ids(), ZkSyncOp::TransferToNew(op) => op.get_updated_account_ids(), ZkSyncOp::Withdraw(op) => op.get_updated_account_ids(), ZkSyncOp::Close(op) => op.get_updated_account_ids(), ZkSyncOp::Transfer(op) => op.get_updated_account_ids(), ZkSyncOp::FullExit(op) => op.get_updated_account_ids(), ZkSyncOp::ChangePubKeyOffchain(op) => op.get_updated_account_ids(), ZkSyncOp::ForcedExit(op) => op.get_updated_account_ids(), } } pub fn is_onchain_operation(&self) -> bool { matches!( self, &ZkSyncOp::Deposit(_) | &ZkSyncOp::Withdraw(_) | &ZkSyncOp::FullExit(_) | &ZkSyncOp::ChangePubKeyOffchain(_) | &ZkSyncOp::ForcedExit(_) ) } pub fn is_processable_onchain_operation(&self) -> bool { matches!( self, &ZkSyncOp::Withdraw(_) | &ZkSyncOp::FullExit(_) | &ZkSyncOp::ForcedExit(_) ) } pub fn is_priority_op(&self) -> bool { matches!(self, &ZkSyncOp::Deposit(_) | &ZkSyncOp::FullExit(_)) } } impl From<NoopOp> for ZkSyncOp { fn from(op: NoopOp) -> Self { Self::Noop(op) } } impl From<DepositOp> for ZkSyncOp { fn from(op: DepositOp) -> Self { Self::Deposit(Box::new(op)) } } impl From<TransferToNewOp> for ZkSyncOp { fn from(op: TransferToNewOp) -> Self { Self::TransferToNew(Box::new(op)) } } impl From<WithdrawOp> for ZkSyncOp { fn from(op: WithdrawOp) -> Self { Self::Withdraw(Box::new(op)) } } impl From<CloseOp> for ZkSyncOp { fn from(op: CloseOp) -> Self { Self::Close(Box::new(op)) } } impl From<TransferOp> for ZkSyncOp { fn from(op: TransferOp) -> Self { Self::Transfer(Box::new(op)) } } impl From<FullExitOp> for ZkSyncOp { fn from(op: FullExitOp) -> Self { Self::FullExit(Box::new(op)) } } impl From<ChangePubKeyOp> for ZkSyncOp { fn from(op: ChangePubKeyOp) -> Self { Self::ChangePubKeyOffchain(Box::new(op)) } } impl From<ForcedExitOp> for ZkSyncOp { fn from(op: ForcedExitOp) -> Self { Self::ForcedExit(Box::new(op)) } }
36.754579
104
0.603249
e81c3bbd304311d66b99ae7e5d082c065bbb7fb0
4,723
use std::env; use proc_macro2::Span; use syn::parse::{Parse, ParseStream}; use syn::punctuated::Punctuated; use syn::Token; use syn::{Expr, ExprLit, ExprPath, Lit}; use tokio::fs; use tokio_sqlx::describe::Describe; use tokio_sqlx::Connection; /// Macro input shared by `query!()` and `query_file!()` pub struct QueryMacroInput { pub(super) source: String, pub(super) source_span: Span, pub(super) args: Vec<Expr>, } impl QueryMacroInput { fn from_exprs(input: ParseStream, mut args: impl Iterator<Item = Expr>) -> syn::Result<Self> { let sql = match args.next() { Some(Expr::Lit(ExprLit { lit: Lit::Str(sql), .. })) => sql, Some(other_expr) => { return Err(syn::Error::new_spanned( other_expr, "expected string literal", )); } None => return Err(input.error("expected SQL string literal")), }; Ok(Self { source: sql.value(), source_span: sql.span(), args: args.collect(), }) } pub async fn expand_file_src(self) -> syn::Result<Self> { let source = read_file_src(&self.source, self.source_span).await?; Ok(Self { source, ..self }) } /// Run a parse/describe on the query described by this input and validate that it matches the /// passed number of args pub async fn describe_validate<C: Connection>( &self, conn: &mut C, ) -> crate::Result<Describe<C::Database>> { let describe = conn .describe(&self.source) .await .map_err(|e| syn::Error::new(self.source_span, e))?; if self.args.len() != describe.param_types.len() { return Err(syn::Error::new( Span::call_site(), format!( "expected {} parameters, got {}", describe.param_types.len(), self.args.len() ), ) .into()); } Ok(describe) } } impl Parse for QueryMacroInput { fn parse(input: ParseStream) -> syn::Result<Self> { let args = Punctuated::<Expr, Token![,]>::parse_terminated(input)?.into_iter(); Self::from_exprs(input, args) } } /// Macro input shared by `query_as!()` and `query_file_as!()` pub struct QueryAsMacroInput { pub(super) as_ty: ExprPath, pub(super) query_input: QueryMacroInput, } impl QueryAsMacroInput { pub async fn expand_file_src(self) -> syn::Result<Self> { Ok(Self { query_input: self.query_input.expand_file_src().await?, ..self }) } } impl Parse for QueryAsMacroInput { fn parse(input: ParseStream) -> syn::Result<Self> { let mut args = Punctuated::<Expr, Token![,]>::parse_terminated(input)?.into_iter(); let as_ty = match args.next() { Some(Expr::Path(path)) => path, Some(other_expr) => { return Err(syn::Error::new_spanned( other_expr, "expected path to a type", )); } None => return Err(input.error("expected path to SQL file")), }; Ok(QueryAsMacroInput { as_ty, query_input: QueryMacroInput::from_exprs(input, args)?, }) } } async fn read_file_src(source: &str, source_span: Span) -> syn::Result<String> { use std::path::Path; let path = Path::new(source); if path.is_absolute() { return Err(syn::Error::new( source_span, "absolute paths will only work on the current machine", )); } // requires `proc_macro::SourceFile::path()` to be stable // https://github.com/rust-lang/rust/issues/54725 if path.is_relative() && !path .parent() .map_or(false, |parent| !parent.as_os_str().is_empty()) { return Err(syn::Error::new( source_span, "paths relative to the current file's directory are not currently supported", )); } let base_dir = env::var("CARGO_MANIFEST_DIR").map_err(|_| { syn::Error::new( source_span, "CARGO_MANIFEST_DIR is not set; please use Cargo to build", ) })?; let base_dir_path = Path::new(&base_dir); let file_path = base_dir_path.join(path); fs::read_to_string(&file_path).await.map_err(|e| { syn::Error::new( source_span, format!( "failed to read query file at {}: {}", file_path.display(), e ), ) }) }
28.281437
98
0.539911
eb6544f446c0ace6d05addf3f4333ddad6da52b8
176
use actix_web::web; use validator::Validate; use crate::errors::JkError; pub fn validate(data: &web::Json<impl Validate>) -> Result<(), JkError> { Ok(data.validate()?) }
19.555556
73
0.676136
89443d0a802ecff3918ef1840a5f5b6e56a92d4e
3,763
use crate::{Board, OptSq, Piece, Sq, SqLike, SqStatus, Team, SIZE}; const PATTERN: [[isize; 2]; 4] = [[1, 1], [-1, 1], [-1, -1], [1, -1]]; pub fn get_translations<S: SqLike>(board: &Board, from: Sq, team: Team, piece: Piece) -> Vec<S> { let mut vec = Vec::new(); let team = match board.find(from, Some(&team), Some(piece)) { Some(entity) => entity.team, None => return vec, }; let mut lambda = |rank: isize, file: isize| -> bool { let target = match from.mutate(rank, file) { Some(sq) => sq, None => return false, }; match board.find(target, None, None) { None => { vec.push(S::into(target, None)); true } Some(entity) => { if entity.team != team { vec.push(S::into(target, Some(entity.kind))); } false } } }; for [a, b] in &PATTERN { for c in 1..SIZE as isize { if !lambda(c * a, c * b) { break; } } } vec } pub fn locate(board: &Board, to: Sq, from: OptSq, team: Team, piece: Piece) -> Option<Sq> { let lambda = |rank: isize, file: isize| -> SqStatus { if let Some(target) = to.mutate(rank, file) { let target = target.union(from); if let Some(sq) = board.legal_target(target, to, team, piece) { return SqStatus::Some(sq); } } SqStatus::None }; for [a, b] in &PATTERN { for c in 1..SIZE as isize { match lambda(c * a, c * b) { SqStatus::Some(sq) => return Some(sq), SqStatus::Blocked => break, SqStatus::None => (), } } } None } #[cfg(test)] mod tests { use super::*; use crate::execute::*; use crate::run; use crate::Entity; #[test] fn test_find_bishop() { let mut board = Board::new(); board.clear(); let sq = Sq::new(4, 4); for team in [Team::White, Team::Black].iter() { board.place(sq, Entity::new(Piece::Bishop, *team)); board.turn_order = *team; for x in 0..8 { if x == 4 { continue; } let to = Sq::new(x, x); assert_eq!( locate(&board, to, OptSq::new(), board.turn_order, Piece::Bishop), Some(sq), "Bishop at {} should be able to move to {}", sq, to ); } } } #[test] fn test_no_find_bishop() { let mut board = Board::new(); board.clear(); board.place(Sq::new(4, 4), Entity::new(Piece::Bishop, Team::White)); let res = locate( &board, Sq::new(1, 0), OptSq::new(), board.turn_order, Piece::Bishop, ); assert_eq!(res, None); } #[test] fn test_bishop_move_zero() { let mut board = Board::new(); board.clear(); board.place(Sq::new(4, 4), Entity::new(Piece::Bishop, Team::White)); assert_eq!( locate( &board, Sq::new(4, 4), OptSq::new(), board.turn_order, Piece::Bishop ), None ); } #[test] fn test_bishop_bd2_bd7() { let mut board = Board::new(); run!(board, "d4", "d5", "Bd2", "Bd7"); } #[test] fn test_bishop_bb5_bb4() { let mut board = Board::new(); run!(board, "e4", "e5", "Bb5", "Bb4"); } }
27.467153
97
0.433431
ddb83dedcc0f007bebfbf9f7aaa4d8c1b77b7dea
635
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:xcrate_address_insignificant.rs extern crate foo = "xcrate_address_insignificant"; pub fn main() { assert_eq!(foo::foo::<f64>(), foo::bar()); }
35.277778
69
0.732283
91643e07ed005c1c18a1a85f371afc39bebc4f84
1,342
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that deaggregate fires more than once per block enum Foo { A(i32), B, } fn test(x: i32) -> [Foo; 2] { [Foo::A(x), Foo::A(x)] } fn main() { // Make sure the function actually gets instantiated. test(0); } // END RUST SOURCE // START rustc.test.Deaggregator.before.mir // bb0: { // ... // _3 = _1; // ... // _2 = Foo::A(_3,); // ... // _5 = _1; // _4 = Foo::A(_5,); // ... // _0 = [_2, _4]; // ... // return; // } // END rustc.test.Deaggregator.before.mir // START rustc.test.Deaggregator.after.mir // bb0: { // ... // _3 = _1; // ... // ((_2 as A).0: i32) = _3; // discriminant(_2) = 0; // ... // _5 = _1; // ((_4 as A).0: i32) = _5; // discriminant(_4) = 0; // ... // _0 = [_2, _4]; // ... // return; // } // END rustc.test.Deaggregator.after.mir
22.366667
68
0.553651
0836a5422d12f2f15112af390827b093d061bd5a
1,071
// Test dropping an AioCb that hasn't yet finished. // This must happen in its own process, because on OSX this test seems to hose // the AIO subsystem and causes subsequent tests to fail #[test] #[should_panic(expected = "Dropped an in-progress AioCb")] #[cfg(all(not(target_env = "musl"), not(target_env = "uclibc"), any(target_os = "linux", target_os = "ios", target_os = "macos", target_os = "freebsd", target_os = "netbsd")))] fn test_drop() { use nix::sys::aio::*; use nix::sys::signal::*; use std::os::unix::io::AsRawFd; use tempfile::tempfile; const WBUF: &[u8] = b"CDEF"; let f = tempfile().unwrap(); f.set_len(6).unwrap(); let mut aiocb = Box::pin(AioWrite::new(f.as_raw_fd(), 2, //offset WBUF, 0, //priority SigevNotify::SigevNone)); aiocb.as_mut().submit().unwrap(); }
35.7
78
0.500467
0e46042c98f813b5087b81c7498d0191708e79f4
1,682
pub const BYTES_FOR_LENGTH_ENCODING: usize = 1; pub const CURVE_TYPE_LENGTH: usize = 1; pub const BLS12: u8 = 0x01; pub const BN: u8 = 0x02; pub const MNT4: u8 = 0x03; pub const MNT6: u8 = 0x04; pub const TWIST_TYPE_LENGTH: usize = 1; pub const TWIST_TYPE_M: u8 = 0x01; pub const TWIST_TYPE_D: u8 = 0x02; pub const SIGN_ENCODING_LENGTH: usize = 1; pub const SIGN_PLUS: u8 = 0x00; pub const SIGN_MINUS: u8 = 0x01; pub const BOOLEAN_ENCODING_LENGTH: usize = 1; pub const BOOLEAN_FALSE: u8 = 0x00; pub const BOOLEAN_TRUE: u8 = 0x01; pub const EXTENSION_DEGREE_ENCODING_LENGTH: usize = 1; pub const EXTENSION_DEGREE_2: u8 = 0x02; pub const EXTENSION_DEGREE_3: u8 = 0x03; pub const OPERATION_ENCODING_LENGTH: usize = 1; pub const OPERATION_G1_ADD: u8 = 0x01; pub const OPERATION_G1_MUL: u8 = 0x02; pub const OPERATION_G1_MULTIEXP: u8 = 0x03; pub const OPERATION_G2_ADD: u8 = 0x04; pub const OPERATION_G2_MUL: u8 = 0x05; pub const OPERATION_G2_MULTIEXP: u8 = 0x06; pub const OPERATION_PAIRING: u8 = 0x07; pub const NUM_LIMBS_MIN: usize = 4; pub const NUM_LIMBS_MAX: usize = 16; pub const NUM_GROUP_LIMBS_MIN: usize = 1; pub const NUM_GROUP_LIMBS_MAX: usize = 16; pub const MAX_MODULUS_BYTE_LEN: usize = 128; pub const MAX_GROUP_BYTE_LEN: usize = 128; use static_assertions::const_assert; use crate::integers::*; const_assert!(MAX_MODULUS_BYTE_LEN == NUM_LIMBS_MAX * 8); const_assert!(MAX_GROUP_BYTE_LEN == NUM_GROUP_LIMBS_MAX * 8); const_assert!(std::mem::size_of::<MaxFieldUint>() >= NUM_LIMBS_MAX * 8); const_assert!(std::mem::size_of::<MaxFieldSquaredUint>() >= NUM_LIMBS_MAX * 8 * 2); const_assert!(std::mem::size_of::<MaxGroupSizeUint>() >= NUM_GROUP_LIMBS_MAX * 8);
30.035714
83
0.756243
f5696380e9f2296eb1cdf54c4249bc6c528bdf66
43,230
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub mod storage_insights { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, workspace_name: &str, storage_insight_name: &str, subscription_id: &str, ) -> std::result::Result<StorageInsight, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/storageInsightConfigs/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, storage_insight_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: StorageInsight = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, workspace_name: &str, storage_insight_name: &str, parameters: &StorageInsight, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/storageInsightConfigs/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, storage_insight_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: StorageInsight = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: StorageInsight = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Created201(StorageInsight), Ok200(StorageInsight), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, workspace_name: &str, storage_insight_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/storageInsightConfigs/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, storage_insight_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_workspace( operation_config: &crate::OperationConfig, resource_group_name: &str, workspace_name: &str, subscription_id: &str, ) -> std::result::Result<StorageInsightListResult, list_by_workspace::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/storageInsightConfigs", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_workspace::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_workspace::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_workspace::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_workspace::ResponseBytesError)?; let rsp_value: StorageInsightListResult = serde_json::from_slice(&body).context(list_by_workspace::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_workspace::ResponseBytesError)?; list_by_workspace::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_workspace { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod workspaces { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_link_targets( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<Vec<LinkTarget>, list_link_targets::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.OperationalInsights/linkTargets", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_link_targets::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_link_targets::BuildRequestError)?; let rsp = client.execute(req).await.context(list_link_targets::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_link_targets::ResponseBytesError)?; let rsp_value: Vec<LinkTarget> = serde_json::from_slice(&body).context(list_link_targets::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_link_targets::ResponseBytesError)?; list_link_targets::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_link_targets { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_schema( operation_config: &crate::OperationConfig, resource_group_name: &str, workspace_name: &str, subscription_id: &str, ) -> std::result::Result<SearchGetSchemaResponse, get_schema::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/schema", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_schema::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(get_schema::BuildRequestError)?; let rsp = client.execute(req).await.context(get_schema::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_schema::ResponseBytesError)?; let rsp_value: SearchGetSchemaResponse = serde_json::from_slice(&body).context(get_schema::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_schema::ResponseBytesError)?; get_schema::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_schema { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn purge( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, workspace_name: &str, body: &WorkspacePurgeBody, ) -> std::result::Result<WorkspacePurgeResponse, purge::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/purge", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(purge::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(body); let req = req_builder.build().context(purge::BuildRequestError)?; let rsp = client.execute(req).await.context(purge::ExecuteRequestError)?; match rsp.status() { StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(purge::ResponseBytesError)?; let rsp_value: WorkspacePurgeResponse = serde_json::from_slice(&body).context(purge::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(purge::ResponseBytesError)?; purge::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod purge { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_purge_status( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, workspace_name: &str, purge_id: &str, ) -> std::result::Result<WorkspacePurgeStatusResponse, get_purge_status::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/operations/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, purge_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_purge_status::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_purge_status::BuildRequestError)?; let rsp = client.execute(req).await.context(get_purge_status::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_purge_status::ResponseBytesError)?; let rsp_value: WorkspacePurgeStatusResponse = serde_json::from_slice(&body).context(get_purge_status::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_purge_status::ResponseBytesError)?; get_purge_status::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_purge_status { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, ) -> std::result::Result<SharedKeys, list_keys::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/listKeys", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_keys::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(list_keys::BuildRequestError)?; let rsp = client.execute(req).await.context(list_keys::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?; let rsp_value: SharedKeys = serde_json::from_slice(&body).context(list_keys::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_keys::ResponseBytesError)?; list_keys::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_keys { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn regenerate_shared_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, ) -> std::result::Result<SharedKeys, regenerate_shared_keys::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/regenerateSharedKey", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(regenerate_shared_keys::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(regenerate_shared_keys::BuildRequestError)?; let rsp = client.execute(req).await.context(regenerate_shared_keys::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(regenerate_shared_keys::ResponseBytesError)?; let rsp_value: SharedKeys = serde_json::from_slice(&body).context(regenerate_shared_keys::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(regenerate_shared_keys::ResponseBytesError)?; regenerate_shared_keys::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod regenerate_shared_keys { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete_gateways( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, gateway_id: &str, ) -> std::result::Result<(), delete_gateways::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/gateways/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, gateway_id ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete_gateways::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete_gateways::BuildRequestError)?; let rsp = client.execute(req).await.context(delete_gateways::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(()), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete_gateways::ResponseBytesError)?; delete_gateways::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete_gateways { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn available_service_tiers( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, ) -> std::result::Result<Vec<AvailableServiceTier>, available_service_tiers::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/availableServiceTiers", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(available_service_tiers::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(available_service_tiers::BuildRequestError)?; let rsp = client.execute(req).await.context(available_service_tiers::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(available_service_tiers::ResponseBytesError)?; let rsp_value: Vec<AvailableServiceTier> = serde_json::from_slice(&body).context(available_service_tiers::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(available_service_tiers::ResponseBytesError)?; available_service_tiers::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod available_service_tiers { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod saved_searches { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, saved_search_id: &str, ) -> std::result::Result<SavedSearch, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/savedSearches/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, saved_search_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: SavedSearch = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, saved_search_id: &str, parameters: &SavedSearch, ) -> std::result::Result<SavedSearch, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/savedSearches/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, saved_search_id ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: SavedSearch = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, workspace_name: &str, saved_search_id: &str, ) -> std::result::Result<(), delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/savedSearches/{}", &operation_config.base_path, subscription_id, resource_group_name, workspace_name, saved_search_id ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(()), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_workspace( operation_config: &crate::OperationConfig, resource_group_name: &str, workspace_name: &str, subscription_id: &str, ) -> std::result::Result<SavedSearchesListResult, list_by_workspace::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.OperationalInsights/workspaces/{}/savedSearches", &operation_config.base_path, subscription_id, resource_group_name, workspace_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_workspace::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_workspace::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_workspace::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_workspace::ResponseBytesError)?; let rsp_value: SavedSearchesListResult = serde_json::from_slice(&body).context(list_by_workspace::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_workspace::ResponseBytesError)?; list_by_workspace::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_workspace { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod operations { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!("{}/providers/Microsoft.OperationalInsights/operations", &operation_config.base_path,); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: OperationListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } }
49.349315
135
0.620495
690c8127a72737d30e3d65b5a9c9d800a3de5293
5,094
//! HTTP Server //! //! A `Server` is created to listen on a port, parse HTTP requests, and hand //! them off to a `Service`. //! //! There are two levels of APIs provide for constructing HTTP servers: //! //! - The higher-level [`Server`](Server) type. //! - The lower-level [`conn`](conn) module. //! //! # Server //! //! The [`Server`](Server) is main way to start listening for HTTP requests. //! It wraps a listener with a [`MakeService`](crate::service), and then should //! be executed to start serving requests. //! //! [`Server`](Server) accepts connections in both HTTP1 and HTTP2 by default. //! //! ## Examples //! //! ```no_run //! use std::convert::Infallible; //! use std::net::SocketAddr; //! use hyper::{Body, Request, Response, Server}; //! use hyper::service::{make_service_fn, service_fn}; //! //! async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> { //! Ok(Response::new(Body::from("Hello World"))) //! } //! //! # #[cfg(feature = "runtime")] //! #[tokio::main] //! async fn main() { //! // Construct our SocketAddr to listen on... //! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); //! //! // And a MakeService to handle each connection... //! let make_service = make_service_fn(|_conn| async { //! Ok::<_, Infallible>(service_fn(handle)) //! }); //! //! // Then bind and serve... //! let server = Server::bind(&addr).serve(make_service); //! //! // And run forever... //! if let Err(e) = server.await { //! eprintln!("server error: {}", e); //! } //! } //! # #[cfg(not(feature = "runtime"))] //! # fn main() {} //! ``` //! //! If you don't need the connection and your service implements `Clone` you can use //! [`tower::make::Shared`] instead of `make_service_fn` which is a bit simpler: //! //! ```no_run //! # use std::convert::Infallible; //! # use std::net::SocketAddr; //! # use hyper::{Body, Request, Response, Server}; //! # use hyper::service::{make_service_fn, service_fn}; //! # use tower::make::Shared; //! # async fn handle(_req: Request<Body>) -> Result<Response<Body>, Infallible> { //! # Ok(Response::new(Body::from("Hello World"))) //! # } //! # #[cfg(feature = "runtime")] //! #[tokio::main] //! async fn main() { //! // Construct our SocketAddr to listen on... //! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); //! //! // Shared is a MakeService that produces services by cloning an inner service... //! let make_service = Shared::new(service_fn(handle)); //! //! // Then bind and serve... //! let server = Server::bind(&addr).serve(make_service); //! //! // And run forever... //! if let Err(e) = server.await { //! eprintln!("server error: {}", e); //! } //! } //! # #[cfg(not(feature = "runtime"))] //! # fn main() {} //! ``` //! //! Passing data to your request handler can be done like so: //! //! ```no_run //! use std::convert::Infallible; //! use std::net::SocketAddr; //! use hyper::{Body, Request, Response, Server}; //! use hyper::service::{make_service_fn, service_fn}; //! use hyper::server::conn::AddrStream; //! //! #[derive(Clone)] //! struct AppContext { //! // Whatever data your application needs can go here //! } //! //! async fn handle( //! context: AppContext, //! addr: SocketAddr, //! req: Request<Body> //! ) -> Result<Response<Body>, Infallible> { //! Ok(Response::new(Body::from("Hello World"))) //! } //! //! # #[cfg(feature = "runtime")] //! #[tokio::main] //! async fn main() { //! let context = AppContext { //! // ... //! }; //! //! // A `MakeService` that produces a `Service` to handle each connection. //! let make_service = make_service_fn(move |conn: &AddrStream| { //! // We have to clone the context to share it with each invocation of //! // `make_service`. If your data doesn't implement `Clone` consider using //! // an `std::sync::Arc`. //! let context = context.clone(); //! //! // You can grab the address of the incoming connection like so. //! let addr = conn.remote_addr(); //! //! // Create a `Service` for responding to the request. //! let service = service_fn(move |req| { //! handle(context.clone(), addr, req) //! }); //! //! // Return the service to hyper. //! async move { Ok::<_, Infallible>(service) } //! }); //! //! // Run the server like above... //! let addr = SocketAddr::from(([127, 0, 0, 1], 3000)); //! //! let server = Server::bind(&addr).serve(make_service); //! //! if let Err(e) = server.await { //! eprintln!("server error: {}", e); //! } //! } //! # #[cfg(not(feature = "runtime"))] //! # fn main() {} //! ``` //! //! [`tower::make::Shared`]: https://docs.rs/tower/latest/tower/make/struct.Shared.html pub mod accept; cfg_feature! { #![any(feature = "http1", feature = "http2")] pub use self::server::{Builder, Server}; pub mod conn; mod server; mod shutdown; cfg_feature! { #![feature = "tcp"] mod tcp; } }
30.502994
88
0.568316
e66c7980671d22139818381af73b6e7481abc46a
532
use eyros::{DB,Coord}; use async_std::prelude::*; type P = (Coord<f32>,Coord<f32>); type V = u64; type E = Box<dyn std::error::Error+Sync+Send>; #[async_std::main] async fn main() -> Result<(),E> { let mut db: DB<_,_,P,V> = eyros::open_from_path2( &std::path::PathBuf::from("/tmp/eyros.db") ).await?; let bbox = ((-180.0,-90.0),(180.0,90.0)); let mut stream = db.query(&bbox).await?; let mut count = 0; while let Some(_result) = stream.next().await { count += 1; } println!["count={}", count]; Ok(()) }
23.130435
51
0.588346
9bb72d576ea592aaebf5347ee39d7463a2e373dc
1,689
use std::borrow::Cow; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SeparatorKind { Hard, Soft, } #[derive(Debug, Clone, Copy, PartialEq)] pub enum TokenKind { Word, /// the token is a stop word, /// meaning that it can be ignored to optimize size and performance or be indexed as a Word StopWord, /// the token is a separator, /// meaning that it shouldn't be indexed but used to determine word proximity Separator(SeparatorKind), Unknown, } impl Default for TokenKind { fn default() -> Self { Self::Unknown } } /// script of a token (https://docs.rs/whatlang/0.10.0/whatlang/enum.Script.html) #[derive(Debug, Clone, Default)] pub struct Token<'a> { pub kind: TokenKind, pub word: Cow<'a, str>, /// index of the first character of the word pub char_index: usize, /// indexes of start and end of the byte slice pub byte_start: usize, pub byte_end: usize, } impl<'a> PartialEq for Token<'a> { fn eq(&self, other: &Self) -> bool { self.text() == other.text() } } impl<'a> Eq for Token<'a> {} impl<'a> Token<'a> { pub fn text(&self) -> &str { self.word.as_ref() } pub fn byte_len(&self) -> usize { self.byte_end - self.byte_start } pub fn kind(&self) -> TokenKind { self.kind } pub fn is_word(&self) -> bool { self.kind == TokenKind::Word } pub fn is_separator(&self) -> Option<SeparatorKind> { if let TokenKind::Separator(s) = self.kind { Some(s) } else { None } } pub fn is_stopword(&self) -> bool { self.kind == TokenKind::StopWord } }
23.136986
95
0.589106
2655563260d012ea00aea50f58e23a48110d8db2
4,745
use sp_core::{Pair, Public, sr25519}; use filecoin_bridge_runtime::{ AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, SudoConfig, TssConfig, SystemConfig, WASM_BINARY, Signature }; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{Verify, IdentifyAccount}; use sc_service::ChainType; // The URL for the telemetry server. // const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig>; /// Generate a crypto pair from seed. pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) .expect("static values are valid; qed") .public() } type AccountPublic = <Signature as Verify>::Signer; /// Generate an account ID from seed. pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where AccountPublic: From<<TPublic::Pair as Pair>::Public> { AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account() } /// Generate an Aura authority key. pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { ( get_from_seed::<AuraId>(s), get_from_seed::<GrandpaId>(s), ) } pub fn development_config() -> Result<ChainSpec, String> { let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; Ok(ChainSpec::from_genesis( // Name "Development", // ID "dev", ChainType::Development, move || testnet_genesis( wasm_binary, // Initial PoA authorities vec![ authority_keys_from_seed("Alice"), ], // Sudo account get_account_id_from_seed::<sr25519::Public>("Alice"), // Pre-funded accounts vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), get_account_id_from_seed::<sr25519::Public>("Alice//stash"), get_account_id_from_seed::<sr25519::Public>("Bob//stash"), ], true, ), // Bootnodes vec![], // Telemetry None, // Protocol ID None, // Properties None, // Extensions None, )) } pub fn local_testnet_config() -> Result<ChainSpec, String> { let wasm_binary = WASM_BINARY.ok_or("Development wasm binary not available".to_string())?; Ok(ChainSpec::from_genesis( // Name "Local Testnet", // ID "local_testnet", ChainType::Local, move || testnet_genesis( wasm_binary, // Initial PoA authorities vec![ authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob"), ], // Sudo account get_account_id_from_seed::<sr25519::Public>("Alice"), // Pre-funded accounts vec![ get_account_id_from_seed::<sr25519::Public>("Alice"), get_account_id_from_seed::<sr25519::Public>("Bob"), get_account_id_from_seed::<sr25519::Public>("Charlie"), get_account_id_from_seed::<sr25519::Public>("Dave"), get_account_id_from_seed::<sr25519::Public>("Eve"), get_account_id_from_seed::<sr25519::Public>("Ferdie"), get_account_id_from_seed::<sr25519::Public>("Alice//stash"), get_account_id_from_seed::<sr25519::Public>("Bob//stash"), get_account_id_from_seed::<sr25519::Public>("Charlie//stash"), get_account_id_from_seed::<sr25519::Public>("Dave//stash"), get_account_id_from_seed::<sr25519::Public>("Eve//stash"), get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"), ], true, ), // Bootnodes vec![], // Telemetry None, // Protocol ID None, // Properties None, // Extensions None, )) } /// Configure initial storage state for FRAME modules. fn testnet_genesis( wasm_binary: &[u8], initial_authorities: Vec<(AuraId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec<AccountId>, _enable_println: bool, ) -> GenesisConfig { let root_key_u8:[u8;32] = root_key.clone().into(); GenesisConfig { frame_system: Some(SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), changes_trie_config: Default::default(), }), pallet_balances: Some(BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), }), pallet_aura: Some(AuraConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), }), pallet_grandpa: Some(GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), }), pallet_sudo: Some(SudoConfig { // Assign network admin rights. key: root_key.clone(), }), pallet_tss: Some(TssConfig { key: root_key_u8.to_vec(), }), } }
29.110429
94
0.698841
dd2cbb2eb407cd4a951d6def06fe6758393bcac0
9,889
use crate::avm1::activation::Activation; use crate::avm1::error::Error; use crate::avm1::object::TObject; use crate::avm1::property_decl::{define_properties_on, Declaration}; use crate::avm1::Object; use crate::avm1::{ScriptObject, Value}; use crate::context_menu; use crate::display_object::TDisplayObject; use gc_arena::MutationContext; const PROTO_DECLS: &[Declaration] = declare_properties! { "copy" => method(copy; DONT_ENUM | DONT_DELETE); "hideBuiltInItems" => method(hide_builtin_items; DONT_ENUM | DONT_DELETE); }; pub fn constructor<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let callback = args .get(0) .unwrap_or(&Value::Undefined) .coerce_to_object(activation); this.set("onSelect", callback.into(), activation)?; let built_in_items = ScriptObject::object( activation.context.gc_context, Some(activation.context.avm1.prototypes.object), ); built_in_items.set("print", true.into(), activation)?; built_in_items.set("forward_back", true.into(), activation)?; built_in_items.set("rewind", true.into(), activation)?; built_in_items.set("loop", true.into(), activation)?; built_in_items.set("play", true.into(), activation)?; built_in_items.set("quality", true.into(), activation)?; built_in_items.set("zoom", true.into(), activation)?; built_in_items.set("save", true.into(), activation)?; this.set("builtInItems", built_in_items.into(), activation)?; let constructor = activation.context.avm1.prototypes.array_constructor; let custom_items = constructor.construct(activation, &[])?; this.set("customItems", custom_items, activation)?; Ok(this.into()) } pub fn copy<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let callback = this .get("onSelect", activation)? .coerce_to_object(activation); let constructor = activation.context.avm1.prototypes.context_menu_constructor; let copy = constructor .construct(activation, &[callback.into()])? .coerce_to_object(activation); let built_in = this .get("builtInItems", activation)? .coerce_to_object(activation); let copy_built_in = copy .get("builtInItems", activation)? .coerce_to_object(activation); let save = built_in .get("save", activation)? .as_bool(activation.swf_version()); let zoom = built_in .get("zoom", activation)? .as_bool(activation.swf_version()); let quality = built_in .get("quality", activation)? .as_bool(activation.swf_version()); let play = built_in .get("play", activation)? .as_bool(activation.swf_version()); let loop_ = built_in .get("loop", activation)? .as_bool(activation.swf_version()); let rewind = built_in .get("rewind", activation)? .as_bool(activation.swf_version()); let forward_back = built_in .get("forward_back", activation)? .as_bool(activation.swf_version()); let print = built_in .get("print", activation)? .as_bool(activation.swf_version()); copy_built_in.set("save", save.into(), activation)?; copy_built_in.set("zoom", zoom.into(), activation)?; copy_built_in.set("quality", quality.into(), activation)?; copy_built_in.set("play", play.into(), activation)?; copy_built_in.set("loop", loop_.into(), activation)?; copy_built_in.set("rewind", rewind.into(), activation)?; copy_built_in.set("forward_back", forward_back.into(), activation)?; copy_built_in.set("print", print.into(), activation)?; let custom_items = this .get("customItems", activation)? .coerce_to_object(activation); let custom_items_copy = copy .get("customItems", activation)? .coerce_to_object(activation); for i in 0..custom_items.length() { custom_items_copy.set_array_element( i, custom_items.array_element(i), activation.context.gc_context, ); } Ok(copy.into()) } pub fn hide_builtin_items<'gc>( activation: &mut Activation<'_, 'gc, '_>, this: Object<'gc>, _args: &[Value<'gc>], ) -> Result<Value<'gc>, Error<'gc>> { let built_in_items = this .get("builtInItems", activation)? .coerce_to_object(activation); built_in_items.set("zoom", false.into(), activation)?; built_in_items.set("quality", false.into(), activation)?; built_in_items.set("play", false.into(), activation)?; built_in_items.set("loop", false.into(), activation)?; built_in_items.set("rewind", false.into(), activation)?; built_in_items.set("forward_back", false.into(), activation)?; built_in_items.set("print", false.into(), activation)?; Ok(Value::Undefined) } pub fn create_proto<'gc>( gc_context: MutationContext<'gc, '_>, proto: Object<'gc>, fn_proto: Object<'gc>, ) -> Object<'gc> { let object = ScriptObject::object(gc_context, Some(proto)); define_properties_on(PROTO_DECLS, gc_context, object, fn_proto); object.into() } pub fn make_context_menu_state<'gc>( menu: Option<Object<'gc>>, activation: &mut Activation<'_, 'gc, '_>, ) -> context_menu::ContextMenuState<'gc> { let mut result = context_menu::ContextMenuState::new(); let root_mc = activation.context.stage.root_clip().as_movie_clip(); let builtin_items = { let is_multiframe_movie = root_mc.map(|mc| mc.total_frames() > 1).unwrap_or(false); let mut names = if is_multiframe_movie { vec![ "zoom", "quality", "play", "loop", "rewind", "forward_back", "print", ] } else { vec!["zoom", "quality", "print"] }; if let Some(menu) = menu { if let Ok(Value::Object(builtins)) = menu.get("builtInItems", activation) { names.retain(|name| { !matches!(builtins.get(name, activation), Ok(Value::Bool(false))) }); } } names }; if builtin_items.contains(&"play") { let is_playing_root_movie = root_mc.unwrap().playing(); result.push( context_menu::ContextMenuItem { enabled: true, separator_before: true, caption: "Play".to_string(), checked: is_playing_root_movie, }, context_menu::ContextMenuCallback::Play, ); } if builtin_items.contains(&"rewind") { let is_first_frame = root_mc.unwrap().current_frame() <= 1; result.push( context_menu::ContextMenuItem { enabled: !is_first_frame, separator_before: true, caption: "Rewind".to_string(), checked: false, }, context_menu::ContextMenuCallback::Rewind, ); } if builtin_items.contains(&"forward_back") { let is_first_frame = root_mc.unwrap().current_frame() <= 1; result.push( context_menu::ContextMenuItem { enabled: true, separator_before: false, caption: "Forward".to_string(), checked: false, }, context_menu::ContextMenuCallback::Forward, ); result.push( context_menu::ContextMenuItem { enabled: !is_first_frame, separator_before: false, caption: "Back".to_string(), checked: false, }, context_menu::ContextMenuCallback::Back, ); } if let Some(menu) = menu { if let Ok(Value::Object(custom_items)) = menu.get("customItems", activation) { for (i, item) in custom_items.array().iter().enumerate() { if let Value::Object(item) = item { let caption = if let Ok(Value::String(caption)) = item.get("caption", activation) { caption } else { continue; }; let on_select = if let Ok(Value::Object(on_select)) = item.get("onSelect", activation) { on_select } else { continue; }; // false if `false`, everything else is true let visible = !matches!(item.get("visible", activation), Ok(Value::Bool(false))); // true if `true`, everything else is false let enabled = matches!(item.get("enabled", activation), Ok(Value::Bool(true))); let separator_before = matches!( item.get("separatorBefore", activation), Ok(Value::Bool(true)) ); if !visible { continue; } result.push( context_menu::ContextMenuItem { enabled, separator_before: separator_before || i == 0, caption: caption.to_string(), checked: false, }, context_menu::ContextMenuCallback::Avm1 { item: *item, callback: on_select, }, ); } } } } result }
35.317857
99
0.557589
1e92c06d950d4e81f3ee6f81b647823939aa9ed9
2,331
// Copyright 2017 th0rex // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use vulkano::format::{ClearValue, Format}; use vulkano::framebuffer::{LayoutAttachmentDescription, LayoutPassDependencyDescription, LayoutPassDescription, LoadOp, RenderPassDesc, RenderPassDescClearValues, StoreOp}; use vulkano::image::ImageLayout; pub struct CustomRenderPassDesc { pub color: (Format, u32), } unsafe impl RenderPassDesc for CustomRenderPassDesc { fn num_attachments(&self) -> usize { 1 } fn attachment_desc(&self, id: usize) -> Option<LayoutAttachmentDescription> { if id == 0 { Some(LayoutAttachmentDescription { format: self.color.0, samples: self.color.1, load: LoadOp::Clear, store: StoreOp::Store, stencil_load: LoadOp::Clear, stencil_store: StoreOp::Store, initial_layout: ImageLayout::Undefined, final_layout: ImageLayout::ColorAttachmentOptimal, }) } else { unreachable!(); } } fn num_subpasses(&self) -> usize { 1 } fn subpass_desc(&self, id: usize) -> Option<LayoutPassDescription> { if id == 0 { Some(LayoutPassDescription { color_attachments: vec![(0, ImageLayout::ColorAttachmentOptimal)], depth_stencil: None, input_attachments: vec![], resolve_attachments: vec![], preserve_attachments: vec![], }) } else { unreachable!(); } } fn num_dependencies(&self) -> usize { 0 } fn dependency_desc(&self, _: usize) -> Option<LayoutPassDependencyDescription> { unreachable!(); } } unsafe impl RenderPassDescClearValues<Vec<ClearValue>> for CustomRenderPassDesc { fn convert_clear_values(&self, values: Vec<ClearValue>) -> Box<Iterator<Item = ClearValue>> { Box::new(values.into_iter()) } }
31.931507
97
0.600172
140c682fd33927101826e7f2089879e4ad8658fc
1,998
use std::fmt; /// An enum of all possible tokens allowed by the /// [WebIDL grammar](https://heycam.github.io/webidl/#idl-grammar) A token in this case is a /// terminal, either a static string or regular expression based token. Note that not all possible /// simplifications are made such as converting the `True` and `False` tokens to actual booleans. /// This choice was made to be as consistent as possible with the WebIDL grammar. #[allow(missing_docs)] #[derive(Clone, Debug, PartialEq)] pub enum Token { // Keywords Any, ArrayBuffer, Attribute, Boolean, Byte, ByteString, Callback, Const, DataView, Deleter, Dictionary, DOMString, Double, Enum, Error, False, Float, Float32Array, Float64Array, FrozenArray, Getter, Implements, Includes, Inherit, Int16Array, Int32Array, Int8Array, Interface, Iterable, LegacyCaller, Long, Maplike, Mixin, Namespace, NaN, NegativeInfinity, Null, Object, Octet, Optional, Or, Partial, PositiveInfinity, Promise, ReadOnly, Record, Required, Sequence, Setlike, Setter, Short, Static, Stringifier, Symbol, True, Typedef, USVString, Uint16Array, Uint32Array, Uint8Array, Uint8ClampedArray, Unrestricted, Unsigned, Void, // Regular expressions FloatLiteral(f64), Identifier(String), OtherLiteral(char), SignedIntegerLiteral(i64), StringLiteral(String), UnsignedIntegerLiteral(u64), // Symbols Colon, Comma, Ellipsis, Equals, GreaterThan, Hyphen, LeftBrace, LeftBracket, LeftParenthesis, LessThan, Period, QuestionMark, RightBrace, RightBracket, RightParenthesis, Semicolon, } impl fmt::Display for Token { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self) } }
18.330275
98
0.624625
ac001523d3aeb6d32a668e73acdeaa8ce8cc185a
41
pub mod scheduler; pub use scheduler::*;
13.666667
21
0.731707
e9e30615211667557288c4f53fa5a2210f7877af
2,613
use crate::{ azure_context::AzureContext, query_objects::medicine_levels_in_pregnancy::report::{ get_reports, get_reports_graph_from_reports_vector, Report, Reports, }, }; use anyhow::anyhow; use async_graphql::{Context, FieldResult, Object}; #[derive(Debug, Eq, Ord, PartialEq, PartialOrd)] pub struct SubstanceReports { name: String, reports: Option<Vec<Report>>, } impl SubstanceReports { pub fn new(name: String, reports: Option<Vec<Report>>) -> Self { Self { name, reports } } } #[Object(desc = "An active ingredient found in medical products")] impl SubstanceReports { #[field(desc = "Name")] async fn name(&self) -> &str { &self.name } #[field(desc = "Reports related to active substance")] async fn reports( &self, context: &Context<'_>, first: Option<i32>, offset: Option<i32>, ) -> FieldResult<Reports> { let context = context.data::<AzureContext>()?; let offset = match offset { Some(a) => a, None => 0, }; if let Some(reports) = self.reports.clone() { let total_count = reports.len() as i32; let reports = match first { Some(t) => reports.into_iter().take(t as usize).collect(), None => reports, }; Ok(get_reports_graph_from_reports_vector( reports, offset, total_count, )) } else { get_reports(&context.bmgf_client, "", first, offset, Some(&self.name)) .await .map(Into::into) .map_err(|e| { tracing::error!("Error fetching reeports from Azure search service: {:?}", e); anyhow!("Error retrieving results").into() }) } } } pub async fn get_substance(substance_name: String) -> Result<SubstanceReports, reqwest::Error> { Ok(SubstanceReports::new(substance_name, None)) } #[cfg(test)] mod test { use super::*; #[test] fn test_sort_substances() { let mut substances = Vec::<SubstanceReports>::new(); substances.push(SubstanceReports::new("Ibuprofen".to_owned(), None)); substances.push(SubstanceReports::new("Paracetamol".to_owned(), None)); substances.push(SubstanceReports::new("Aspirin".to_owned(), None)); substances.sort(); assert_eq!(substances[0].name, "Aspirin"); assert_eq!(substances[1].name, "Ibuprofen"); assert_eq!(substances[2].name, "Paracetamol"); } }
29.693182
98
0.574053
1c69cd5c42c96b8a9caa6b04394320c18ee33656
349
use std::io; use std::process; use rustventure::Config; fn main() { let config = Config::parse(); let stdin = io::stdin(); let mut input = stdin.lock(); let mut stdout = io::stdout(); if let Err(err) = rustventure::run(config, &mut input, &mut stdout) { eprintln!("Error: {}", err); process::exit(1); } }
19.388889
73
0.567335
03153bedc6c35e98bf0373d2e573d421375cb676
5,750
// Test for Macro, exact same as usecase-recursive use mpstthree::binary::struct_trait::{end::End, recv::Recv, send::Send, session::Session}; use mpstthree::functionmpst::close::close_mpst; use mpstthree::functionmpst::fork::fork_mpst; use mpstthree::meshedchannels::MeshedChannels; use mpstthree::role::broadcast::RoleBroadcast; use mpstthree::role::end::RoleEnd; use std::error::Error; use std::marker; use rand::{thread_rng, Rng}; use mpstthree::{ choose_mpst_to_all, create_multiple_normal_role, create_recv_mpst_session_1, create_recv_mpst_session_2, create_send_mpst_session_1, create_send_mpst_session_2, offer_mpst, }; // Create new roles create_multiple_normal_role!( RoleA, RoleADual | RoleB, RoleBDual | RoleC, RoleCDual | ); // Create new send functions create_send_mpst_session_1!(send_mpst_c_to_a, RoleA, RoleC); create_send_mpst_session_2!(send_mpst_a_to_c, RoleC, RoleA); create_send_mpst_session_2!(send_mpst_c_to_b, RoleB, RoleC); create_send_mpst_session_1!(send_mpst_b_to_a, RoleA, RoleB); create_send_mpst_session_1!(send_mpst_a_to_b, RoleB, RoleA); // Create new recv functions and related types create_recv_mpst_session_1!(recv_mpst_c_from_a, RoleA, RoleC); create_recv_mpst_session_2!(recv_mpst_a_from_c, RoleC, RoleA); create_recv_mpst_session_2!(recv_mpst_b_from_c, RoleC, RoleB); create_recv_mpst_session_1!(recv_mpst_b_from_a, RoleA, RoleB); create_recv_mpst_session_1!(recv_mpst_a_from_b, RoleB, RoleA); // Types type AtoBVideo<N> = Send<N, Recv<N, End>>; type AtoCVideo<N> = Recv<N, Send<N, RecursAtoC<N>>>; type InitA<N> = Recv<N, Send<N, RecursAtoC<N>>>; type BtoAVideo<N> = <AtoBVideo<N> as Session>::Dual; type RecursAtoC<N> = Recv<Branches0AtoC<N>, End>; type RecursBtoC<N> = Recv<Branches0BtoC<N>, End>; enum Branches0AtoC<N: marker::Send> { End(MeshedChannels<End, End, RoleEnd, RoleA<RoleEnd>>), Video(MeshedChannels<AtoBVideo<N>, AtoCVideo<N>, StackAVideo, RoleA<RoleEnd>>), } enum Branches0BtoC<N: marker::Send> { End(MeshedChannels<End, End, RoleEnd, RoleB<RoleEnd>>), Video(MeshedChannels<BtoAVideo<N>, RecursBtoC<N>, StackBVideo, RoleB<RoleEnd>>), } type Choose0fromCtoA<N> = Send<Branches0AtoC<N>, End>; type Choose0fromCtoB<N> = Send<Branches0BtoC<N>, End>; type InitC<N> = Send<N, Recv<N, Choose0fromCtoA<N>>>; /// Stacks type StackAVideo = RoleC<RoleB<RoleB<RoleC<RoleC<RoleEnd>>>>>; type StackAInit = RoleC<RoleC<RoleC<RoleEnd>>>; type StackBVideo = RoleA<RoleA<RoleC<RoleEnd>>>; type StackCRecurs = RoleBroadcast; type StackCFull = RoleA<RoleA<StackCRecurs>>; /// Creating the MP sessions /// For C type EndpointCRecurs<N> = MeshedChannels<Choose0fromCtoA<N>, Choose0fromCtoB<N>, StackCRecurs, RoleC<RoleEnd>>; type EndpointCFull<N> = MeshedChannels<InitC<N>, Choose0fromCtoB<N>, StackCFull, RoleC<RoleEnd>>; /// For A type EndpointARecurs<N> = MeshedChannels<End, RecursAtoC<N>, RoleC<RoleEnd>, RoleA<RoleEnd>>; type EndpointAFull<N> = MeshedChannels<End, InitA<N>, StackAInit, RoleA<RoleEnd>>; /// For B type EndpointBFull<N> = MeshedChannels<End, RecursBtoC<N>, RoleC<RoleEnd>, RoleB<RoleEnd>>; /// Functions related to endpoints fn server(s: EndpointBFull<i32>) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_b_from_c, { Branches0BtoC::End(s) => { close_mpst(s) }, Branches0BtoC::Video(s) => { let (request, s) = recv_mpst_b_from_a(s)?; let s = send_mpst_b_to_a(request + 1, s); server(s) }, }) } fn authenticator(s: EndpointAFull<i32>) -> Result<(), Box<dyn Error>> { let (id, s) = recv_mpst_a_from_c(s)?; let s = send_mpst_a_to_c(id + 1, s); authenticator_recurs(s) } fn authenticator_recurs(s: EndpointARecurs<i32>) -> Result<(), Box<dyn Error>> { offer_mpst!(s, recv_mpst_a_from_c, { Branches0AtoC::End(s) => { close_mpst(s) }, Branches0AtoC::Video(s) => { let (request, s) = recv_mpst_a_from_c(s)?; let s = send_mpst_a_to_b(request + 1, s); let (video, s) = recv_mpst_a_from_b(s)?; let s = send_mpst_a_to_c(video + 1, s); authenticator_recurs(s) }, }) } fn client(s: EndpointCFull<i32>) -> Result<(), Box<dyn Error>> { let mut rng = thread_rng(); let xs: Vec<i32> = (1..100).map(|_| rng.gen()).collect(); let s = send_mpst_c_to_a(0, s); let (_, s) = recv_mpst_c_from_a(s)?; client_recurs(s, xs, 1) } fn client_recurs( s: EndpointCRecurs<i32>, mut xs: Vec<i32>, index: i32, ) -> Result<(), Box<dyn Error>> { match xs.pop() { Option::Some(_) => { let s = choose_mpst_to_all!( s, Branches0AtoC::Video, Branches0BtoC::Video, => RoleA, RoleB, => RoleC ); let s = send_mpst_c_to_a(1, s); let (_, s) = recv_mpst_c_from_a(s)?; client_recurs(s, xs, index + 1) } Option::None => { let s = choose_mpst_to_all!( s, Branches0AtoC::End, Branches0BtoC::End, => RoleA, RoleB, => RoleC ); assert_eq!(index, 100); close_mpst(s) } } } ///////////////////////////////////////// pub fn run_macro_recursive() { assert!(|| -> Result<(), Box<dyn Error>> { { let (thread_a, thread_b, thread_c) = fork_mpst(authenticator, server, client); assert!(thread_a.join().is_ok()); assert!(thread_b.join().is_ok()); assert!(thread_c.join().is_ok()); } Ok(()) }() .is_ok()); }
31.081081
99
0.633217
e600ccc4d414bbbd6143e283d6ea08c020dea4da
38,080
use syntect::highlighting::Style as SyntectStyle; use unicode_segmentation::UnicodeSegmentation; use crate::config::INLINE_SYMBOL_WIDTH_1; use crate::config::Config; use crate::delta::DiffType; use crate::delta::State; use crate::features::line_numbers::{self, SideBySideLineWidth}; use crate::features::side_by_side::{available_line_width, line_is_too_long, Left, Right}; use crate::minusplus::*; use crate::paint::LineSections; use crate::style::Style; /// See [`wrap_line`] for documentation. #[derive(Clone, Debug)] pub struct WrapConfig { pub left_symbol: String, pub right_symbol: String, pub right_prefix_symbol: String, // In fractions of 1000 so that a >100 wide panel can // still be configured down to a single character. pub use_wrap_right_permille: usize, // This value is --wrap-max-lines + 1, and unlimited is 0, see // adapt_wrap_max_lines_argument() pub max_lines: usize, pub inline_hint_syntect_style: SyntectStyle, } #[derive(PartialEq)] enum Stop { StackEmpty, LineLimit, } /// Wrap the given `line` if it is longer than `line_width`. Wrap to at most /// [Config::WrapConfig::max_lines](WrapConfig::max_lines) lines, /// then truncate again - but never truncate if it is `0`. Place /// [left_symbol](WrapConfig::left_symbol) at the end of wrapped lines. /// If wrapping results in only *one* extra line and if the width of the wrapped /// line is less than [use_wrap_right_permille](WrapConfig::use_wrap_right_permille) /// then right-align the second line and use the symbols /// [right_symbol](WrapConfig::right_symbol) and /// on the next line [right_prefix_symbol](WrapConfig::right_prefix_symbol). /// The inserted characters will follow the /// [inline_hint_syntect_style](WrapConfig::inline_hint_syntect_style). pub fn wrap_line<'a, I, S>( config: &'a Config, line: I, line_width: usize, fill_style: &S, inline_hint_style: &Option<S>, ) -> Vec<LineSections<'a, S>> where I: IntoIterator<Item = (S, &'a str)> + std::fmt::Debug, <I as IntoIterator>::IntoIter: DoubleEndedIterator, S: Copy + Default + std::fmt::Debug, { let mut result = Vec::new(); let wrap_config = &config.wrap_config; // The current line being assembled from the input to fit exactly into the given width. // A somewhat leaky abstraction as the fields are also accessed directly. struct CurrLine<'a, S: Default> { line_segments: LineSections<'a, S>, len: usize, } impl<'a, S: Default> CurrLine<'a, S> { fn reset() -> Self { CurrLine { line_segments: Vec::new(), len: 0, } } fn push_and_set_len(&mut self, text: (S, &'a str), len: usize) { self.line_segments.push(text); self.len = len; } fn has_text(&self) -> bool { self.len > 0 } fn text_len(&self) -> usize { self.len } } let mut curr_line = CurrLine::reset(); // Determine the background (diff) and color (syntax) of an inserted symbol. let symbol_style = match inline_hint_style { Some(style) => *style, None => *fill_style, }; let mut stack = line.into_iter().rev().collect::<Vec<_>>(); // If only the wrap symbol and no extra text fits, then wrapping is not possible. let max_lines = if line_width <= INLINE_SYMBOL_WIDTH_1 { 1 } else { wrap_config.max_lines }; let line_limit_reached = |result: &Vec<_>| max_lines > 0 && result.len() + 1 >= max_lines; let stop = loop { if stack.is_empty() { break Stop::StackEmpty; } else if line_limit_reached(&result) { break Stop::LineLimit; } let (style, text, graphemes) = stack .pop() .map(|(style, text)| (style, text, text.grapheme_indices(true).collect::<Vec<_>>())) .unwrap(); let new_len = curr_line.len + graphemes.len(); let must_split = if new_len < line_width { curr_line.push_and_set_len((style, text), new_len); false } else if new_len == line_width { match stack.last() { // Perfect fit, no need to make space for a `wrap_symbol`. None => { curr_line.push_and_set_len((style, text), new_len); false } #[allow(clippy::identity_op)] // A single '\n' left on the stack can be pushed onto the current line. Some((next_style, nl)) if stack.len() == 1 && *nl == "\n" => { curr_line.push_and_set_len((style, text), new_len); // Do not count the '\n': + 0 curr_line.push_and_set_len((*next_style, *nl), new_len + 0); stack.pop(); false } _ => true, } } else if new_len == line_width + 1 && stack.is_empty() { // If the one overhanging char is '\n' then keep it on the current line. if text.ends_with('\n') { // Do not count the included '\n': - 1 curr_line.push_and_set_len((style, text), new_len - 1); false } else { true } } else { true }; // Text must be split, one part (or just `wrap_symbol`) is added to the // current line, the other is pushed onto the stack. if must_split { let grapheme_split_pos = graphemes.len() - (new_len - line_width) - 1; // The length does not matter anymore and `curr_line` will be reset // at the end, so move the line segments out. let mut line_segments = curr_line.line_segments; let next_line = if grapheme_split_pos == 0 { text } else { let byte_split_pos = graphemes[grapheme_split_pos].0; let this_line = &text[..byte_split_pos]; line_segments.push((style, this_line)); &text[byte_split_pos..] }; stack.push((style, next_line)); line_segments.push((symbol_style, &wrap_config.left_symbol)); result.push(line_segments); curr_line = CurrLine::reset(); } }; // Right-align wrapped line: // Done if wrapping adds exactly one line and this line is less than the given // permille wide. Also change the wrap symbol at the end of the previous (first) line. if result.len() == 1 && curr_line.has_text() { let current_permille = (curr_line.text_len() * 1000) / line_width; let pad_len = line_width.saturating_sub(curr_line.text_len()); if wrap_config.use_wrap_right_permille > current_permille && pad_len > 0 { // The inserted spaces, which align a line to the right, point into this string. const SPACES: &str = " "; match result.last_mut() { Some(ref mut vec) if !vec.is_empty() => { vec.last_mut().unwrap().1 = &wrap_config.right_symbol } _ => unreachable!("wrap result must not be empty"), } let mut right_aligned_line = Vec::new(); for _ in 0..(pad_len / SPACES.len()) { right_aligned_line.push((*fill_style, SPACES)); } match pad_len % SPACES.len() { 0 => (), n => right_aligned_line.push((*fill_style, &SPACES[0..n])), } right_aligned_line.push((symbol_style, &wrap_config.right_prefix_symbol)); right_aligned_line.extend(curr_line.line_segments.into_iter()); curr_line.line_segments = right_aligned_line; // curr_line.len not updated, as only 0 / >0 for `has_text()` is required. } } if curr_line.has_text() { result.push(curr_line.line_segments); } if stop == Stop::LineLimit && result.len() != max_lines { result.push(Vec::new()); } // Anything that is left will be added to the (last) line. If this is too long it will // be truncated later. if !stack.is_empty() { if result.is_empty() { result.push(Vec::new()); } // unwrap: previous `if` ensures result can not be empty result.last_mut().unwrap().extend(stack.into_iter().rev()); } result } fn wrap_if_too_long<'a, S>( config: &'a Config, wrapped: &mut Vec<LineSections<'a, S>>, input_vec: LineSections<'a, S>, must_wrap: bool, line_width: usize, fill_style: &S, inline_hint_style: &Option<S>, ) -> (usize, usize) where S: Copy + Default + std::fmt::Debug, { let size_prev = wrapped.len(); if must_wrap { wrapped.append(&mut wrap_line( config, input_vec.into_iter(), line_width, fill_style, inline_hint_style, )); } else { wrapped.push(input_vec.to_vec()); } (size_prev, wrapped.len()) } /// Call [`wrap_line`] for the `syntax` and the `diff` lines if `wrapinfo` says /// a specific line was longer than `line_width`. Return an adjusted `alignment` /// with regard to the added wrapped lines. #[allow(clippy::comparison_chain, clippy::type_complexity)] pub fn wrap_minusplus_block<'c: 'a, 'a>( config: &'c Config, syntax: MinusPlus<Vec<LineSections<'a, SyntectStyle>>>, diff: MinusPlus<Vec<LineSections<'a, Style>>>, alignment: &[(Option<usize>, Option<usize>)], line_width: &SideBySideLineWidth, wrapinfo: &'a MinusPlus<Vec<bool>>, ) -> ( Vec<(Option<usize>, Option<usize>)>, MinusPlus<Vec<State>>, MinusPlus<Vec<LineSections<'a, SyntectStyle>>>, MinusPlus<Vec<LineSections<'a, Style>>>, ) { let mut new_alignment = Vec::new(); let mut new_states = MinusPlus::<Vec<State>>::default(); let mut new_wrapped_syntax = MinusPlus::default(); let mut new_wrapped_diff = MinusPlus::default(); // Turn all these into pairs of iterators so they can be advanced according // to the alignment and independently. let mut syntax = MinusPlus::new(syntax.minus.into_iter(), syntax.plus.into_iter()); let mut diff = MinusPlus::new(diff.minus.into_iter(), diff.plus.into_iter()); let mut wrapinfo = MinusPlus::new(wrapinfo[Left].iter(), wrapinfo[Right].iter()); let fill_style = MinusPlus::new(&config.minus_style, &config.plus_style); // Internal helper function to perform wrapping for both the syntax and the // diff highlighting (SyntectStyle and Style). #[allow(clippy::too_many_arguments)] pub fn wrap_syntax_and_diff<'a, ItSyn, ItDiff, ItWrap>( config: &'a Config, wrapped_syntax: &mut Vec<LineSections<'a, SyntectStyle>>, wrapped_diff: &mut Vec<LineSections<'a, Style>>, syntax_iter: &mut ItSyn, diff_iter: &mut ItDiff, wrapinfo_iter: &mut ItWrap, line_width: usize, fill_style: &Style, errhint: &'a str, ) -> (usize, usize) where ItSyn: Iterator<Item = LineSections<'a, SyntectStyle>>, ItDiff: Iterator<Item = LineSections<'a, Style>>, ItWrap: Iterator<Item = &'a bool>, { let must_wrap = *wrapinfo_iter .next() .unwrap_or_else(|| panic!("bad wrap info {}", errhint)); let (start, extended_to) = wrap_if_too_long( config, wrapped_syntax, syntax_iter .next() .unwrap_or_else(|| panic!("bad syntax alignment {}", errhint)), must_wrap, line_width, &config.null_syntect_style, &Some(config.wrap_config.inline_hint_syntect_style), ); // TODO: Why is the background color set to white when // ansi_term_style.background is None? let inline_hint_style = if config .inline_hint_style .ansi_term_style .background .is_some() { Some(config.inline_hint_style) } else { None }; let (start2, extended_to2) = wrap_if_too_long( config, wrapped_diff, diff_iter .next() .unwrap_or_else(|| panic!("bad diff alignment {}", errhint)), must_wrap, line_width, fill_style, &inline_hint_style, ); // The underlying text is the same for the style and diff, so // the length of the wrapping should be identical: assert_eq!( (start, extended_to), (start2, extended_to2), "syntax and diff wrapping differs {}", errhint ); (start, extended_to) } // This macro avoids having the same code block 4x in the alignment processing macro_rules! wrap_and_assert { ($side:tt, $errhint:tt, $have:tt, $expected:tt) => {{ assert_eq!(*$have, $expected, "bad alignment index {}", $errhint); $expected += 1; wrap_syntax_and_diff( &config, &mut new_wrapped_syntax[$side], &mut new_wrapped_diff[$side], &mut syntax[$side], &mut diff[$side], &mut wrapinfo[$side], line_width[$side], &fill_style[$side], $errhint, ) }}; } let mut m_expected = 0; let mut p_expected = 0; // Process blocks according to the alignment and build a new alignment. // If lines get added via wrapping these are assigned the state HunkMinusWrapped/HunkPlusWrapped. for (minus, plus) in alignment { let (minus_extended, plus_extended) = match (minus, plus) { (Some(m), None) => { let (minus_start, extended_to) = wrap_and_assert!(Left, "[*l*] (-)", m, m_expected); for i in minus_start..extended_to { new_alignment.push((Some(i), None)); } (extended_to - minus_start, 0) } (None, Some(p)) => { let (plus_start, extended_to) = wrap_and_assert!(Right, "(-) [*r*]", p, p_expected); for i in plus_start..extended_to { new_alignment.push((None, Some(i))); } (0, extended_to - plus_start) } (Some(m), Some(p)) => { let (minus_start, m_extended_to) = wrap_and_assert!(Left, "[*l*] (r)", m, m_expected); let (plus_start, p_extended_to) = wrap_and_assert!(Right, "(l) [*r*]", p, p_expected); for (new_m, new_p) in (minus_start..m_extended_to).zip(plus_start..p_extended_to) { new_alignment.push((Some(new_m), Some(new_p))); } // This Some(m):Some(p) alignment might have become uneven, so fill // up the shorter side with None. let minus_extended = m_extended_to - minus_start; let plus_extended = p_extended_to - plus_start; let plus_minus = (minus_extended as isize) - (plus_extended as isize); if plus_minus > 0 { for m in (m_extended_to as isize - plus_minus) as usize..m_extended_to { new_alignment.push((Some(m), None)); } } else if plus_minus < 0 { for p in (p_extended_to as isize + plus_minus) as usize..p_extended_to { new_alignment.push((None, Some(p))); } } (minus_extended, plus_extended) } _ => unreachable!("None-None alignment"), }; if minus_extended > 0 { new_states[Left].push(State::HunkMinus(DiffType::Unified, None)); for _ in 1..minus_extended { new_states[Left].push(State::HunkMinusWrapped); } } if plus_extended > 0 { new_states[Right].push(State::HunkPlus(DiffType::Unified, None)); for _ in 1..plus_extended { new_states[Right].push(State::HunkPlusWrapped); } } } ( new_alignment, new_states, new_wrapped_syntax, new_wrapped_diff, ) } #[allow(clippy::comparison_chain, clippy::type_complexity)] pub fn wrap_zero_block<'c: 'a, 'a>( config: &'c Config, raw_line: &str, mut states: Vec<State>, syntax_style_sections: Vec<LineSections<'a, SyntectStyle>>, diff_style_sections: Vec<LineSections<'a, Style>>, line_numbers_data: &Option<&mut line_numbers::LineNumbersData>, ) -> ( Vec<State>, Vec<LineSections<'a, SyntectStyle>>, Vec<LineSections<'a, Style>>, ) { // The width is the minimum of the left/right side. The panels should be equally sized, // but in rare cases the remaining panel width might differ due to the space the line // numbers take up. let line_width = if let Some(line_numbers_data) = line_numbers_data { let width = available_line_width(config, line_numbers_data); std::cmp::min(width[Left], width[Right]) } else { std::cmp::min( config.side_by_side_data[Left].width, config.side_by_side_data[Right].width, ) }; // Called with a single line, so no need to use the 1-sized bool vector. // If that changes the wrapping logic should be updated as well. debug_assert_eq!(diff_style_sections.len(), 1); let should_wrap = line_is_too_long(raw_line, line_width); if should_wrap { let syntax_style = wrap_line( config, syntax_style_sections.into_iter().flatten(), line_width, &SyntectStyle::default(), &Some(config.wrap_config.inline_hint_syntect_style), ); // TODO: Why is the background color set to white when // ansi_term_style.background is None? let inline_hint_style = if config .inline_hint_style .ansi_term_style .background .is_some() { Some(config.inline_hint_style) } else { None }; let diff_style = wrap_line( config, diff_style_sections.into_iter().flatten(), line_width, // To actually highlight inline hint characters: &Style { is_syntax_highlighted: true, ..config.null_style }, &inline_hint_style, ); states.resize_with(syntax_style.len(), || State::HunkZeroWrapped); (states, syntax_style, diff_style) } else { (states, syntax_style_sections, diff_style_sections) } } #[cfg(test)] mod tests { use lazy_static::lazy_static; use syntect::highlighting::Style as SyntectStyle; use super::wrap_line; use super::WrapConfig; use crate::config::Config; use crate::paint::LineSections; use crate::style::Style; use crate::tests::integration_test_utils::{make_config_from_args, DeltaTest}; lazy_static! { static ref S1: Style = Style { is_syntax_highlighted: true, ..Default::default() }; } lazy_static! { static ref S2: Style = Style { is_emph: true, ..Default::default() }; } lazy_static! { static ref SY: SyntectStyle = SyntectStyle::default(); } lazy_static! { static ref SD: Style = Style::default(); } const W: &str = "+"; // wrap const WR: &str = "<"; // wrap-right const RA: &str = ">"; // right-align lazy_static! { static ref WRAP_DEFAULT_ARGS: Vec<&'static str> = vec![ "--wrap-left-symbol", W, "--wrap-right-symbol", WR, "--wrap-right-prefix-symbol", RA, "--wrap-max-lines", "4", "--wrap-right-percent", "37.0%", ]; } lazy_static! { static ref TEST_WRAP_CFG: WrapConfig = make_config_from_args(&WRAP_DEFAULT_ARGS).wrap_config; } fn default_wrap_cfg_plus<'a>(args: &[&'a str]) -> Vec<&'a str> { let mut result = WRAP_DEFAULT_ARGS.clone(); result.extend_from_slice(args); result } fn mk_wrap_cfg(wrap_cfg: &WrapConfig) -> Config { let mut cfg: Config = make_config_from_args(&[]); cfg.wrap_config = wrap_cfg.clone(); cfg } fn wrap_test<'a, I, S>(cfg: &'a Config, line: I, line_width: usize) -> Vec<LineSections<'a, S>> where I: IntoIterator<Item = (S, &'a str)> + std::fmt::Debug, <I as IntoIterator>::IntoIter: DoubleEndedIterator, S: Copy + Default + std::fmt::Debug, { wrap_line(cfg, line, line_width, &S::default(), &None) } #[test] fn test_wrap_line_single() { let cfg = mk_wrap_cfg(&TEST_WRAP_CFG); { let line = vec![(*SY, "0")]; let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![vec![(*SY, "0")]]); } { let line = vec![(*S1, "012"), (*S2, "34")]; let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![vec![(*S1, "012"), (*S2, "34")]]); } { let line = vec![(*S1, "012"), (*S2, "345")]; let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![vec![(*S1, "012"), (*S2, "345")]]); } { // Empty input usually does not happen let line = vec![(*S1, "")]; let lines = wrap_test(&cfg, line, 6); assert!(lines.is_empty()); } { // Partially empty should not happen either let line = vec![(*S1, ""), (*S2, "0")]; let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![vec![(*S1, ""), (*S2, "0")]]); } { let line = vec![(*S1, "0"), (*S2, "")]; let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![vec![(*S1, "0"), (*S2, "")]]); } { let line = vec![ (*S1, "0"), (*S2, ""), (*S1, ""), (*S2, ""), (*S1, ""), (*S2, ""), (*S1, ""), (*S2, ""), (*S1, ""), (*S2, ""), ]; let lines = wrap_test(&cfg, line, 6); assert_eq!( lines, vec![vec![ (*S1, "0"), (*S2, ""), (*S1, ""), (*S2, ""), (*S1, ""), (*S2, ""), (*S1, ""), (*S2, ""), (*S1, ""), (*S2, "") ]] ); } } #[test] fn test_wrap_line_align_right_1() { let cfg = mk_wrap_cfg(&TEST_WRAP_CFG); let line = vec![(*S1, "0123456789ab")]; let lines = wrap_test(&cfg, line, 11); assert_eq!(lines.len(), 2); assert_eq!(lines[0].last().unwrap().1, WR); assert_eq!(lines[1], [(*SD, " "), (*SD, ">"), (*S1, "ab")]); } #[test] fn test_wrap_line_align_right_2() { let line = vec![(*S1, "012"), (*S2, "3456")]; { // Right align lines on the second line let cfg = mk_wrap_cfg(&TEST_WRAP_CFG); let lines = wrap_test(&cfg, line.clone(), 6); assert_eq!( lines, vec![ vec![(*S1, "012"), (*S2, "34"), (*SD, WR)], vec![(*SD, " "), (*SD, RA), (*S2, "56")] ] ); } { // Set right align percentage lower, normal wrapping let mut no_align_right = TEST_WRAP_CFG.clone(); no_align_right.use_wrap_right_permille = 1; // 0.1% let cfg_no_align_right = mk_wrap_cfg(&no_align_right); let lines = wrap_test(&cfg_no_align_right, line, 6); assert_eq!( lines, vec![vec![(*S1, "012"), (*S2, "34"), (*SD, W)], vec![(*S2, "56")]] ); } } #[test] fn test_wrap_line_newlines() { fn mk_input(len: usize) -> LineSections<'static, Style> { const IN: &str = "0123456789abcdefZ"; let v = &[*S1, *S2]; let s1s2 = v.iter().cycle(); let text: Vec<_> = IN.matches(|_| true).take(len + 1).collect(); s1s2.zip(text.iter()) .map(|(style, text)| (style.clone(), *text)) .collect() } fn mk_input_nl(len: usize) -> LineSections<'static, Style> { const NL: &str = "\n"; let mut line = mk_input(len); line.push((*S2, NL)); line } fn mk_expected<'a>( vec: &LineSections<'a, Style>, from: usize, to: usize, append: Option<(Style, &'a str)>, ) -> LineSections<'a, Style> { let mut result: Vec<_> = vec[from..to].iter().cloned().collect(); if let Some(val) = append { result.push(val); } result } let cfg = mk_wrap_cfg(&TEST_WRAP_CFG); { let line = vec![(*S1, "012"), (*S2, "345\n")]; let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![vec![(*S1, "012"), (*S2, "345\n")]]); } { for i in 0..=5 { let line = mk_input(i); let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![mk_input(i)]); let line = mk_input_nl(i); let lines = wrap_test(&cfg, line, 6); assert_eq!(lines, vec![mk_input_nl(i)]); } } { let line = mk_input_nl(9); let lines = wrap_test(&cfg, line, 3); let expected = mk_input_nl(9); let line1 = mk_expected(&expected, 0, 2, Some((*SD, W))); let line2 = mk_expected(&expected, 2, 4, Some((*SD, W))); let line3 = mk_expected(&expected, 4, 6, Some((*SD, W))); let line4 = mk_expected(&expected, 6, 8, Some((*SD, W))); let line5 = mk_expected(&expected, 8, 11, None); assert_eq!(lines, vec![line1, line2, line3, line4, line5]); } { let line = mk_input_nl(10); let lines = wrap_test(&cfg, line, 3); let expected = mk_input_nl(10); let line1 = mk_expected(&expected, 0, 2, Some((*SD, W))); let line2 = mk_expected(&expected, 2, 4, Some((*SD, W))); let line3 = mk_expected(&expected, 4, 6, Some((*SD, W))); let line4 = mk_expected(&expected, 6, 8, Some((*SD, W))); let line5 = mk_expected(&expected, 8, 11, Some((*S2, "\n"))); assert_eq!(lines, vec![line1, line2, line3, line4, line5]); } { let line = vec![(*S1, "abc"), (*S2, "01230123012301230123"), (*S1, "ZZZZZ")]; let wcfg1 = mk_wrap_cfg(&WrapConfig { max_lines: 1, ..TEST_WRAP_CFG.clone() }); let wcfg2 = mk_wrap_cfg(&WrapConfig { max_lines: 2, ..TEST_WRAP_CFG.clone() }); let wcfg3 = mk_wrap_cfg(&WrapConfig { max_lines: 3, ..TEST_WRAP_CFG.clone() }); let lines = wrap_line(&wcfg1, line.clone(), 4, &Style::default(), &None); assert_eq!(lines.len(), 1); assert_eq!(lines.last().unwrap().last().unwrap().1, "ZZZZZ"); let lines = wrap_line(&wcfg2, line.clone(), 4, &Style::default(), &None); assert_eq!(lines.len(), 2); assert_eq!(lines.last().unwrap().last().unwrap().1, "ZZZZZ"); let lines = wrap_line(&wcfg3, line.clone(), 4, &Style::default(), &None); assert_eq!(lines.len(), 3); assert_eq!(lines.last().unwrap().last().unwrap().1, "ZZZZZ"); } } #[test] fn test_wrap_line_unicode() { let cfg = mk_wrap_cfg(&TEST_WRAP_CFG); // from UnicodeSegmentation documentation and the linked // Unicode Standard Annex #29 let line = vec![(*S1, "abc"), (*S2, "mnö̲"), (*S1, "xyz")]; let lines = wrap_test(&cfg, line, 4); assert_eq!( lines, vec![ vec![(*S1, "abc"), (*SD, W)], vec![(*S2, "mnö̲"), (*SD, W)], vec![(*S1, "xyz")] ] ); // Not working: Tailored grapheme clusters: क्षि = क् + षि let line = vec![(*S1, "abc"), (*S2, "deநி"), (*S1, "ghij")]; let lines = wrap_test(&cfg, line, 4); assert_eq!( lines, vec![ vec![(*S1, "abc"), (*SD, W)], vec![(*S2, "deநி"), (*SD, W)], vec![(*S1, "ghij")] ] ); } const HUNK_ZERO_DIFF: &str = "\ diff --git i/a.py w/a.py index 223ca50..e69de29 100644 --- i/a.py +++ w/a.py @@ -4,3 +15,3 @@ abcdefghijklmnopqrstuvwxzy 0123456789 0123456789 0123456789 0123456789 0123456789 -a = 1 +a = 2 "; const HUNK_ZERO_LARGE_LINENUMBERS_DIFF: &str = "\ diff --git i/a.py w/a.py index 223ca50..e69de29 100644 --- i/a.py +++ w/a.py @@ -10,3 +101999,3 @@ abcdefghijklmnopqrstuvwxzy 0123456789 0123456789 0123456789 0123456789 0123456789 -a = 1 +a = 2 "; const HUNK_MP_DIFF: &str = "\ diff --git i/a.py w/a.py index 223ca50..e69de29 100644 --- i/a.py +++ w/a.py @@ -4,3 +15,3 @@ abcdefghijklmnopqrstuvwxzy 0123456789 0123456789 0123456789 0123456789 0123456789 -a = 0123456789 0123456789 0123456789 0123456789 0123456789 +b = 0123456789 0123456789 0123456789 0123456789 0123456789 "; const HUNK_ALIGN_DIFF_HEADER: &str = "--- a\n+++ b\n@@ -1,1 +1,1 @@\n"; const HUNK_ALIGN_DIFF_SHORT: &str = ".........1.........2....\n"; const HUNK_ALIGN_DIFF_LONG: &str = ".........1.........2.........3.........4.........5.........6\n"; #[test] fn test_wrap_with_unequal_hunk_zero_width() { DeltaTest::with(&default_wrap_cfg_plus(&[ "--side-by-side", "--line-numbers-left-format", "│L│", "--line-numbers-right-format", "│RRRR│", "--width", "40", "--line-fill-method", "spaces", ])) .set_cfg(|cfg| cfg.truncation_symbol = ">".into()) .with_input(HUNK_ZERO_DIFF) .expect( r#" │L│abcdefghijklm+ │RRRR│abcdefghijklm+ │L│nopqrstuvwxzy+ │RRRR│nopqrstuvwxzy+ │L│ 0123456789 0+ │RRRR│ 0123456789 0+ │L│123456789 012+ │RRRR│123456789 012+ │L│3456789 01234567>│RRRR│3456789 01234> │L│a = 1 │RRRR│a = 2 "#, ); } #[test] fn test_wrap_with_large_hunk_zero_line_numbers() { DeltaTest::with(&default_wrap_cfg_plus(&[ "--side-by-side", "--line-numbers-left-format", "│LLL│", "--line-numbers-right-format", "│WW {nm} +- {np:2} WW│", "--width", "60", "--line-fill-method", "ansi", ])) .set_cfg(|cfg| cfg.truncation_symbol = ">".into()) .with_input(HUNK_ZERO_LARGE_LINENUMBERS_DIFF) .expect( r#" │LLL│abcde+ │WW 10 +- 101999 WW│abcde+ │LLL│fghij+ │WW +- WW│fghij+ │LLL│klmno+ │WW +- WW│klmno+ │LLL│pqrst+ │WW +- WW│pqrst+ │LLL│uvwxzy 0123456789 012345>│WW +- WW│uvwxz> │LLL│a = 1 │WW +- 102000 WW│a = 2"#, ); } #[test] fn test_wrap_with_keep_markers() { use crate::features::side_by_side::ansifill::ODD_PAD_CHAR; let t = DeltaTest::with(&default_wrap_cfg_plus(&[ "--side-by-side", "--keep-plus-minus-markers", "--width", "45", ])) .set_cfg(|cfg| cfg.truncation_symbol = ">".into()) .with_input(HUNK_MP_DIFF); let output = t.expect( r#" │ 4 │ abcdefghijklmn+ │ 15 │ abcdefghijklmn+ │ │ opqrstuvwxzy 0+ │ │ opqrstuvwxzy 0+ │ │ 123456789 0123+ │ │ 123456789 0123+ │ │ 456789 0123456+ │ │ 456789 0123456+ │ │ 789 0123456789> │ │ 789 0123456789> │ 5 │-a = 0123456789+ │ 16 │+b = 0123456789+ │ │ 0123456789 01+ │ │ 0123456789 01+ │ │ 23456789 01234+ │ │ 23456789 01234+ │ │ 56789 01234567+ │ │ 56789 01234567+ │ │ 89 │ │ 89"#, // this column here is^ where ODD_PAD_CHAR is inserted due to the odd 45 width ); assert!(!output.is_empty()); for line in output.lines().skip(crate::config::HEADER_LEN) { assert_eq!(line.chars().nth(22), Some(ODD_PAD_CHAR)); } } #[test] fn test_alignment_2_lines_vs_3_lines() { let config = make_config_from_args(&default_wrap_cfg_plus(&["--side-by-side", "--width", "55"])); { DeltaTest::with_config_and_input( &config, &format!( "{}-{}+{}", HUNK_ALIGN_DIFF_HEADER, HUNK_ALIGN_DIFF_SHORT, HUNK_ALIGN_DIFF_LONG ), ) .expect( r#" │ 1 │.........1.........2< │ 1 │.........1.........2+ │ │ >.... │ │.........3.........4+ │ │ │ │.........5.........6"#, ); // the place where ODD_PAD_CHAR^ is inserted due to the odd 55 width } { DeltaTest::with_config_and_input( &config, &format!( "{}-{}+{}", HUNK_ALIGN_DIFF_HEADER, HUNK_ALIGN_DIFF_LONG, HUNK_ALIGN_DIFF_SHORT ), ) .expect( r#" │ 1 │.........1.........2+ │ 1 │.........1.........2< │ │.........3.........4+ │ │ >.... │ │.........5.........6 │ │"#, ); } } #[test] fn test_alignment_1_line_vs_3_lines() { let config = make_config_from_args(&default_wrap_cfg_plus(&[ "--side-by-side", "--width", "61", "--line-fill-method", "spaces", ])); { DeltaTest::with_config_and_input( &config, &format!( "{}-{}+{}", HUNK_ALIGN_DIFF_HEADER, HUNK_ALIGN_DIFF_SHORT, HUNK_ALIGN_DIFF_LONG ), ) .expect( r#" │ 1 │.........1.........2....│ 1 │.........1.........2...+ │ │ │ │......3.........4......+ │ │ │ │...5.........6 "#, ); } { DeltaTest::with_config_and_input( &config, &format!( "{}-{}+{}", HUNK_ALIGN_DIFF_HEADER, HUNK_ALIGN_DIFF_LONG, HUNK_ALIGN_DIFF_SHORT ), ) .expect( r#" │ 1 │.........1.........2...+│ 1 │.........1.........2.... │ │......3.........4......+│ │ │ │...5.........6 │ │"#, ); } } #[test] fn test_wrap_max_lines_2() { // TODO overriding is not possible, need to change config directly let mut config = make_config_from_args(&default_wrap_cfg_plus(&[ // "--wrap-max-lines", // "2", "--side-by-side", "--width", "72", "--line-fill-method", "spaces", ])); config.truncation_symbol = ">".into(); { DeltaTest::with_config_and_input( &config, &format!( "{}-{}+{}", HUNK_ALIGN_DIFF_HEADER, HUNK_ALIGN_DIFF_SHORT, HUNK_ALIGN_DIFF_LONG ), ) .expect( r#" │ 1 │.........1.........2.... │ 1 │.........1.........2.........+ │ │ │ │3.........4.........5........+ │ │ │ │.6 "#, ); } { config.wrap_config.max_lines = 2; DeltaTest::with_config_and_input( &config, &format!( "{}-{}+{}", HUNK_ALIGN_DIFF_HEADER, HUNK_ALIGN_DIFF_SHORT, HUNK_ALIGN_DIFF_LONG ), ) .expect( r#" │ 1 │.........1.........2.... │ 1 │.........1.........2.........+ │ │ │ │3.........4.........5........>"#, ); } } }
34.091316
101
0.495089
ff8187d8716224e74a383349f7affd1a3cb0eca9
15,111
use crate::{ pos::Spanned, syntax_pos::{BytePos, Span, DUMMY_SP}, }; use rustc_hash::FxHashMap; use std::{ cell::{Ref, RefCell}, rc::Rc, sync::Arc, }; /// Stores comment. /// /// ## Implementation notes /// /// Methods uses `(&self)` instead of `(&mut self)` for some reasons. Firstly, /// this is similar to the previous api. Secondly, typescript parser requires /// backtracking, which requires [Clone]. To avoid cloning large vectors, we /// must use [Rc<RefCell<Comments>>]. We have two option. We may implement it in /// the parser or in the implementation. If we decide to go with first option, /// we should pass [Comments] to parser, and as a result we need another method /// to take comments back. If we decide to go with second way, we can just pass /// [&Comments] to the parser. Thirdly, `(&self)` allows multi-threaded /// use-cases such as swc itself. /// /// We use [Option] instead of no-op Comments implementation to avoid allocation /// unless required. pub trait Comments { fn add_leading(&self, pos: BytePos, cmt: Comment); fn add_leading_comments(&self, pos: BytePos, comments: Vec<Comment>); fn has_leading(&self, pos: BytePos) -> bool; fn move_leading(&self, from: BytePos, to: BytePos); fn take_leading(&self, pos: BytePos) -> Option<Vec<Comment>>; fn get_leading(&self, pos: BytePos) -> Option<Vec<Comment>>; fn add_trailing(&self, pos: BytePos, cmt: Comment); fn add_trailing_comments(&self, pos: BytePos, comments: Vec<Comment>); fn has_trailing(&self, pos: BytePos) -> bool; fn move_trailing(&self, from: BytePos, to: BytePos); fn take_trailing(&self, pos: BytePos) -> Option<Vec<Comment>>; fn get_trailing(&self, pos: BytePos) -> Option<Vec<Comment>>; fn add_pure_comment(&self, pos: BytePos); fn with_leading<F, Ret>(&self, pos: BytePos, f: F) -> Ret where Self: Sized, F: FnOnce(&[Comment]) -> Ret, { let cmts = self.take_leading(pos); let ret = if let Some(cmts) = &cmts { f(&cmts) } else { f(&[]) }; if let Some(cmts) = cmts { self.add_leading_comments(pos, cmts); } ret } fn with_trailing<F, Ret>(&self, pos: BytePos, f: F) -> Ret where Self: Sized, F: FnOnce(&[Comment]) -> Ret, { let cmts = self.take_trailing(pos); let ret = if let Some(cmts) = &cmts { f(&cmts) } else { f(&[]) }; if let Some(cmts) = cmts { self.add_trailing_comments(pos, cmts); } ret } } macro_rules! delegate { () => { fn add_leading(&self, pos: BytePos, cmt: Comment) { (**self).add_leading(pos, cmt) } fn add_leading_comments(&self, pos: BytePos, comments: Vec<Comment>) { (**self).add_leading_comments(pos, comments) } fn has_leading(&self, pos: BytePos) -> bool { (**self).has_leading(pos) } fn move_leading(&self, from: BytePos, to: BytePos) { (**self).move_leading(from, to) } fn take_leading(&self, pos: BytePos) -> Option<Vec<Comment>> { (**self).take_leading(pos) } fn get_leading(&self, pos: BytePos) -> Option<Vec<Comment>> { (**self).get_leading(pos) } fn add_trailing(&self, pos: BytePos, cmt: Comment) { (**self).add_trailing(pos, cmt) } fn add_trailing_comments(&self, pos: BytePos, comments: Vec<Comment>) { (**self).add_trailing_comments(pos, comments) } fn has_trailing(&self, pos: BytePos) -> bool { (**self).has_trailing(pos) } fn move_trailing(&self, from: BytePos, to: BytePos) { (**self).move_trailing(from, to) } fn take_trailing(&self, pos: BytePos) -> Option<Vec<Comment>> { (**self).take_trailing(pos) } fn get_trailing(&self, pos: BytePos) -> Option<Vec<Comment>> { (**self).get_trailing(pos) } fn add_pure_comment(&self, pos: BytePos) { (**self).add_pure_comment(pos) } }; } impl<T> Comments for &'_ T where T: ?Sized + Comments, { delegate!(); } impl<T> Comments for Arc<T> where T: ?Sized + Comments, { delegate!(); } impl<T> Comments for Rc<T> where T: ?Sized + Comments, { delegate!(); } impl<T> Comments for Box<T> where T: ?Sized + Comments, { delegate!(); } /// Implementation of [Comments] which does not store any comments. #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Hash)] pub struct NoopComments; impl Comments for NoopComments { #[cfg_attr(not(debug_assertions), inline(always))] fn add_leading(&self, _: BytePos, _: Comment) {} #[cfg_attr(not(debug_assertions), inline(always))] fn add_leading_comments(&self, _: BytePos, _: Vec<Comment>) {} #[cfg_attr(not(debug_assertions), inline(always))] fn has_leading(&self, _: BytePos) -> bool { false } #[cfg_attr(not(debug_assertions), inline(always))] fn move_leading(&self, _: BytePos, _: BytePos) {} #[cfg_attr(not(debug_assertions), inline(always))] fn take_leading(&self, _: BytePos) -> Option<Vec<Comment>> { None } #[cfg_attr(not(debug_assertions), inline(always))] fn get_leading(&self, _: BytePos) -> Option<Vec<Comment>> { None } #[cfg_attr(not(debug_assertions), inline(always))] fn add_trailing(&self, _: BytePos, _: Comment) {} #[cfg_attr(not(debug_assertions), inline(always))] fn add_trailing_comments(&self, _: BytePos, _: Vec<Comment>) {} #[cfg_attr(not(debug_assertions), inline(always))] fn has_trailing(&self, _: BytePos) -> bool { false } #[cfg_attr(not(debug_assertions), inline(always))] fn move_trailing(&self, _: BytePos, _: BytePos) {} #[cfg_attr(not(debug_assertions), inline(always))] fn take_trailing(&self, _: BytePos) -> Option<Vec<Comment>> { None } #[cfg_attr(not(debug_assertions), inline(always))] fn get_trailing(&self, _: BytePos) -> Option<Vec<Comment>> { None } #[cfg_attr(not(debug_assertions), inline(always))] fn add_pure_comment(&self, _: BytePos) {} } /// This implementation behaves like [NoopComments] if it's [None]. impl<C> Comments for Option<C> where C: Comments, { fn add_leading(&self, pos: BytePos, cmt: Comment) { if let Some(c) = self { c.add_leading(pos, cmt) } } fn add_leading_comments(&self, pos: BytePos, comments: Vec<Comment>) { if let Some(c) = self { c.add_leading_comments(pos, comments) } } fn has_leading(&self, pos: BytePos) -> bool { if let Some(c) = self { c.has_leading(pos) } else { false } } fn move_leading(&self, from: BytePos, to: BytePos) { if let Some(c) = self { c.move_leading(from, to) } } fn take_leading(&self, pos: BytePos) -> Option<Vec<Comment>> { if let Some(c) = self { c.take_leading(pos) } else { None } } fn get_leading(&self, pos: BytePos) -> Option<Vec<Comment>> { if let Some(c) = self { c.get_leading(pos) } else { None } } fn add_trailing(&self, pos: BytePos, cmt: Comment) { if let Some(c) = self { c.add_trailing(pos, cmt) } } fn add_trailing_comments(&self, pos: BytePos, comments: Vec<Comment>) { if let Some(c) = self { c.add_trailing_comments(pos, comments) } } fn has_trailing(&self, pos: BytePos) -> bool { if let Some(c) = self { c.has_trailing(pos) } else { false } } fn move_trailing(&self, from: BytePos, to: BytePos) { if let Some(c) = self { c.move_trailing(from, to) } } fn take_trailing(&self, pos: BytePos) -> Option<Vec<Comment>> { if let Some(c) = self { c.take_trailing(pos) } else { None } } fn get_trailing(&self, pos: BytePos) -> Option<Vec<Comment>> { if let Some(c) = self { c.get_trailing(pos) } else { None } } fn add_pure_comment(&self, pos: BytePos) { if let Some(c) = self { c.add_pure_comment(pos) } } fn with_leading<F, Ret>(&self, pos: BytePos, f: F) -> Ret where Self: Sized, F: FnOnce(&[Comment]) -> Ret, { if let Some(c) = self { c.with_leading(pos, f) } else { f(&[]) } } fn with_trailing<F, Ret>(&self, pos: BytePos, f: F) -> Ret where Self: Sized, F: FnOnce(&[Comment]) -> Ret, { if let Some(c) = self { c.with_trailing(pos, f) } else { f(&[]) } } } pub type SingleThreadedCommentsMapInner = FxHashMap<BytePos, Vec<Comment>>; pub type SingleThreadedCommentsMap = Rc<RefCell<SingleThreadedCommentsMapInner>>; /// Single-threaded storage for comments. #[derive(Debug, Clone, Default)] pub struct SingleThreadedComments { leading: SingleThreadedCommentsMap, trailing: SingleThreadedCommentsMap, } impl Comments for SingleThreadedComments { fn add_leading(&self, pos: BytePos, cmt: Comment) { self.leading.borrow_mut().entry(pos).or_default().push(cmt); } fn add_leading_comments(&self, pos: BytePos, comments: Vec<Comment>) { self.leading .borrow_mut() .entry(pos) .or_default() .extend(comments); } fn has_leading(&self, pos: BytePos) -> bool { if let Some(v) = self.leading.borrow().get(&pos) { !v.is_empty() } else { false } } fn move_leading(&self, from: BytePos, to: BytePos) { let cmt = self.leading.borrow_mut().remove(&from); if let Some(cmt) = cmt { self.leading.borrow_mut().entry(to).or_default().extend(cmt); } } fn take_leading(&self, pos: BytePos) -> Option<Vec<Comment>> { self.leading.borrow_mut().remove(&pos) } fn get_leading(&self, pos: BytePos) -> Option<Vec<Comment>> { self.leading.borrow().get(&pos).map(|c| c.to_owned()) } fn add_trailing(&self, pos: BytePos, cmt: Comment) { self.trailing.borrow_mut().entry(pos).or_default().push(cmt); } fn add_trailing_comments(&self, pos: BytePos, comments: Vec<Comment>) { self.trailing .borrow_mut() .entry(pos) .or_default() .extend(comments); } fn has_trailing(&self, pos: BytePos) -> bool { if let Some(v) = self.trailing.borrow().get(&pos) { !v.is_empty() } else { false } } fn move_trailing(&self, from: BytePos, to: BytePos) { let cmt = self.trailing.borrow_mut().remove(&from); if let Some(cmt) = cmt { self.trailing .borrow_mut() .entry(to) .or_default() .extend(cmt); } } fn take_trailing(&self, pos: BytePos) -> Option<Vec<Comment>> { self.trailing.borrow_mut().remove(&pos) } fn get_trailing(&self, pos: BytePos) -> Option<Vec<Comment>> { self.trailing.borrow().get(&pos).map(|c| c.to_owned()) } fn add_pure_comment(&self, pos: BytePos) { let mut leading_map = self.leading.borrow_mut(); let leading = leading_map.entry(pos).or_default(); let pure_comment = Comment { kind: CommentKind::Block, span: DUMMY_SP, text: "#__PURE__".into(), }; if !leading.iter().any(|c| c.text == pure_comment.text) { leading.push(pure_comment); } } fn with_leading<F, Ret>(&self, pos: BytePos, f: F) -> Ret where Self: Sized, F: FnOnce(&[Comment]) -> Ret, { let b = self.leading.borrow(); let cmts = b.get(&pos); let ret = if let Some(cmts) = &cmts { f(&cmts) } else { f(&[]) }; ret } fn with_trailing<F, Ret>(&self, pos: BytePos, f: F) -> Ret where Self: Sized, F: FnOnce(&[Comment]) -> Ret, { let b = self.trailing.borrow(); let cmts = b.get(&pos); let ret = if let Some(cmts) = &cmts { f(&cmts) } else { f(&[]) }; ret } } impl SingleThreadedComments { /// Creates a new `SingleThreadedComments` from the provided leading and /// trailing. pub fn from_leading_and_trailing( leading: SingleThreadedCommentsMap, trailing: SingleThreadedCommentsMap, ) -> Self { SingleThreadedComments { leading, trailing } } /// Takes all the comments as (leading, trailing). pub fn take_all(self) -> (SingleThreadedCommentsMap, SingleThreadedCommentsMap) { (self.leading, self.trailing) } /// Borrows all the comments as (leading, trailing). pub fn borrow_all<'a>( &'a self, ) -> ( Ref<'a, SingleThreadedCommentsMapInner>, Ref<'a, SingleThreadedCommentsMapInner>, ) { (self.leading.borrow(), self.trailing.borrow()) } pub fn with_leading<F, Ret>(&self, pos: BytePos, op: F) -> Ret where F: FnOnce(&[Comment]) -> Ret, { if let Some(comments) = self.leading.borrow().get(&pos) { op(&*comments) } else { op(&[]) } } pub fn with_trailing<F, Ret>(&self, pos: BytePos, op: F) -> Ret where F: FnOnce(&[Comment]) -> Ret, { if let Some(comments) = self.trailing.borrow().get(&pos) { op(&*comments) } else { op(&[]) } } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct Comment { pub kind: CommentKind, pub span: Span, pub text: String, } impl Spanned for Comment { fn span(&self) -> Span { self.span } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum CommentKind { Line, Block, } #[deprecated( since = "0.13.5", note = "helper methods are merged into Comments itself" )] pub trait CommentsExt: Comments { fn with_leading<F, Ret>(&self, pos: BytePos, op: F) -> Ret where F: FnOnce(&[Comment]) -> Ret, { if let Some(comments) = self.get_leading(pos) { op(&comments) } else { op(&[]) } } fn with_trailing<F, Ret>(&self, pos: BytePos, op: F) -> Ret where F: FnOnce(&[Comment]) -> Ret, { if let Some(comments) = self.get_trailing(pos) { op(&comments) } else { op(&[]) } } } #[allow(deprecated)] impl<C> CommentsExt for C where C: Comments {}
26.053448
85
0.554761
fc8f51a32282c12dcc09639b50fe42dd56cecad8
1,265
// Copyright (c) 2021 jmjoy // Helper is licensed under Mulan PSL v2. // You can use this software according to the terms and conditions of the Mulan PSL v2. // You may obtain a copy of Mulan PSL v2 at: // http://license.coscl.org.cn/MulanPSL2 // THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR // FIT FOR A PARTICULAR PURPOSE. // See the Mulan PSL v2 for more details. use helper::option; #[test] pub fn test_option_0() { let x = option!(unwrap Some(1) or 2); assert_eq!(x, 1); let x = option!(unwrap None or 2); assert_eq!(x, 2); } #[test] pub fn test_option_1() { let mut b = false; let mut f = || { let _ = option!(unwrap Some(1) or return); b = true; }; f(); assert_eq!(b, true); let mut b = false; let mut f = || { let _ = option!(unwrap None::<()> or return); b = true; }; f(); assert_eq!(b, false); } #[test] pub fn test_option_2() { let mut x = 0; for i in 0..100usize { x = i; let o = if i < 10 { Some(i) } else { None }; let _ = option!(unwrap o or break); } assert_eq!(x, 10); }
23
88
0.585771
dd015398eb2423220baee63bd76849caa4ba58c9
24,741
//! Implementation for writing delta checkpoints. use arrow::datatypes::Schema as ArrowSchema; use arrow::error::ArrowError; use arrow::json::reader::Decoder; use chrono::Datelike; use log::*; use parquet::arrow::ArrowWriter; use parquet::errors::ParquetError; use parquet::file::writer::InMemoryWriteableCursor; use serde_json::Value; use std::collections::HashMap; use std::convert::TryFrom; use super::action; use super::delta_arrow::delta_log_schema_for_table; use super::open_table_with_version; use super::schema::*; use super::storage::{StorageBackend, StorageError}; use super::table_state::DeltaTableState; use super::writer::time_utils; use super::{CheckPoint, DeltaTableError}; use crate::DeltaTable; /// Error returned when there is an error during creating a checkpoint. #[derive(thiserror::Error, Debug)] pub enum CheckpointError { /// Error returned when the DeltaTableState does not contain a metadata action. #[error("DeltaTableMetadata not present in DeltaTableState")] MissingMetaData, /// Error returned when a string formatted partition value cannot be parsed to its appropriate /// data type. #[error("Partition value {0} cannot be parsed from string.")] PartitionValueNotParseable(String), /// Passthrough error returned when calling DeltaTable. #[error("DeltaTableError: {source}")] DeltaTable { /// The source DeltaTableError. #[from] source: DeltaTableError, }, /// Error returned when the parquet writer fails while writing the checkpoint. #[error("Failed to write parquet: {}", .source)] ParquetError { /// Parquet error details returned when writing the checkpoint failed. #[from] source: ParquetError, }, /// Error returned when converting the schema to Arrow format failed. #[error("Failed to convert into Arrow schema: {}", .source)] ArrowError { /// Arrow error details returned when converting the schema in Arrow format failed #[from] source: ArrowError, }, /// Passthrough error returned when calling StorageBackend. #[error("StorageError: {source}")] Storage { /// The source StorageError. #[from] source: StorageError, }, /// Passthrough error returned by serde_json. #[error("serde_json::Error: {source}")] JSONSerialization { /// The source serde_json::Error. #[from] source: serde_json::Error, }, } impl From<CheckpointError> for ArrowError { fn from(error: CheckpointError) -> Self { ArrowError::from_external_error(Box::new(error)) } } /// Loads table from given `table_uri` at given `version` and creates checkpoints for it. pub async fn create_checkpoint_from_table_uri( table_uri: &str, version: DeltaDataTypeVersion, ) -> Result<(), CheckpointError> { let table = open_table_with_version(table_uri, version).await?; create_checkpoint( version, table.get_state(), table.storage.as_ref(), table_uri, ) .await?; Ok(()) } /// Creates checkpoint at `table.version` for given `table`. pub async fn create_checkpoint_from_table(table: &DeltaTable) -> Result<(), CheckpointError> { create_checkpoint( table.version, table.get_state(), table.storage.as_ref(), &table.table_uri, ) .await?; Ok(()) } async fn create_checkpoint( version: DeltaDataTypeVersion, state: &DeltaTableState, storage: &dyn StorageBackend, table_uri: &str, ) -> Result<(), CheckpointError> { // TODO: checkpoints _can_ be multi-part... haven't actually found a good reference for // an appropriate split point yet though so only writing a single part currently. // See https://github.com/delta-io/delta-rs/issues/288 let delta_log_uri = storage.join_path(table_uri, "_delta_log"); let last_checkpoint_uri = storage.join_path(&delta_log_uri, "_last_checkpoint"); debug!("Writing parquet bytes to checkpoint buffer."); let parquet_bytes = parquet_bytes_from_state(state)?; let size = parquet_bytes.len() as i64; let checkpoint = CheckPoint::new(version, size, None); let file_name = format!("{:020}.checkpoint.parquet", version); let checkpoint_uri = storage.join_path(&delta_log_uri, &file_name); debug!("Writing checkpoint to {:?}.", checkpoint_uri); storage.put_obj(&checkpoint_uri, &parquet_bytes).await?; let last_checkpoint_content: Value = serde_json::to_value(&checkpoint)?; let last_checkpoint_content = serde_json::to_string(&last_checkpoint_content)?; debug!("Writing _last_checkpoint to {:?}.", last_checkpoint_uri); storage .put_obj(&last_checkpoint_uri, last_checkpoint_content.as_bytes()) .await?; Ok(()) } fn parquet_bytes_from_state(state: &DeltaTableState) -> Result<Vec<u8>, CheckpointError> { let current_metadata = state .current_metadata() .ok_or(CheckpointError::MissingMetaData)?; // Collect partition fields along with their data type from the current schema. // JSON add actions contain a `partitionValues` field which is a map<string, string>. // When loading `partitionValues_parsed` we have to convert the stringified partition values back to the correct data type. let partition_col_data_types: Vec<(&str, &SchemaDataType)> = current_metadata .schema .get_fields() .iter() .filter_map(|f| { if current_metadata .partition_columns .iter() .any(|s| s.as_str() == f.get_name()) { Some((f.get_name(), f.get_type())) } else { None } }) .collect(); // Collect a map of paths that require special stats conversion. let mut stats_conversions: Vec<(SchemaPath, SchemaDataType)> = Vec::new(); collect_stats_conversions(&mut stats_conversions, current_metadata.schema.get_fields()); let mut tombstones = state.unexpired_tombstones().cloned().collect::<Vec<_>>(); // if any, tombstones do not include extended file metadata, we must omit the extended metadata fields from the remove schema // See https://github.com/delta-io/delta/blob/master/PROTOCOL.md#add-file-and-remove-file // // DBR version 8.x and greater have different behaviors of reading the parquet file depending // on the `extended_file_metadata` flag, hence this is safer to set `extended_file_metadata=false` // and omit metadata columns if at least one remove action has `extended_file_metadata=false`. // We've added the additional check on `size.is_some` because in delta-spark the primitive long type // is used, hence we want to omit possible errors when `extended_file_metadata=true`, but `size=null` let use_extended_remove_schema = tombstones .iter() .all(|r| r.extended_file_metadata == Some(true) && r.size.is_some()); // If use_extended_remove_schema=false for some of the tombstones, then it should be for each. if !use_extended_remove_schema { for remove in tombstones.iter_mut() { remove.extended_file_metadata = Some(false); } } // protocol let mut jsons = std::iter::once(action::Action::protocol(action::Protocol { min_reader_version: state.min_reader_version(), min_writer_version: state.min_writer_version(), })) // metaData .chain(std::iter::once(action::Action::metaData( action::MetaData::try_from(current_metadata.clone())?, ))) // txns .chain( state .app_transaction_version() .iter() .map(|(app_id, version)| { action::Action::txn(action::Txn { app_id: app_id.clone(), version: *version, last_updated: None, }) }), ) // removes .chain(tombstones.iter().map(|r| { let mut r = (*r).clone(); // As a "new writer", we should always set `extendedFileMetadata` when writing, and include/ignore the other three fields accordingly. // https://github.com/delta-io/delta/blob/fb0452c2fb142310211c6d3604eefb767bb4a134/core/src/main/scala/org/apache/spark/sql/delta/actions/actions.scala#L311-L314 if None == r.extended_file_metadata { r.extended_file_metadata = Some(false); } action::Action::remove(r) })) .map(|a| serde_json::to_value(a).map_err(ArrowError::from)) // adds .chain(state.files().iter().map(|f| { checkpoint_add_from_state(f, partition_col_data_types.as_slice(), &stats_conversions) })); // Create the arrow schema that represents the Checkpoint parquet file. let arrow_schema = delta_log_schema_for_table( <ArrowSchema as TryFrom<&Schema>>::try_from(&current_metadata.schema)?, current_metadata.partition_columns.as_slice(), use_extended_remove_schema, ); debug!("Writing to checkpoint parquet buffer..."); // Write the Checkpoint parquet file. let writeable_cursor = InMemoryWriteableCursor::default(); let mut writer = ArrowWriter::try_new(writeable_cursor.clone(), arrow_schema.clone(), None)?; let batch_size = state.app_transaction_version().len() + tombstones.len() + state.files().len() + 2; // 1 (protocol) + 1 (metadata) let decoder = Decoder::new(arrow_schema, batch_size, None); while let Some(batch) = decoder.next_batch(&mut jsons)? { writer.write(&batch)?; } let _ = writer.close()?; debug!("Finished writing checkpoint parquet buffer."); Ok(writeable_cursor.data()) } fn checkpoint_add_from_state( add: &action::Add, partition_col_data_types: &[(&str, &SchemaDataType)], stats_conversions: &[(SchemaPath, SchemaDataType)], ) -> Result<Value, ArrowError> { let mut v = serde_json::to_value(action::Action::add(add.clone()))?; v["add"]["dataChange"] = Value::Bool(false); if !add.partition_values.is_empty() { let mut partition_values_parsed: HashMap<String, Value> = HashMap::new(); for (field_name, data_type) in partition_col_data_types.iter() { if let Some(string_value) = add.partition_values.get(*field_name) { let v = typed_partition_value_from_option_string(string_value, data_type)?; partition_values_parsed.insert(field_name.to_string(), v); } } let partition_values_parsed = serde_json::to_value(partition_values_parsed)?; v["add"]["partitionValues_parsed"] = partition_values_parsed; } if let Ok(Some(stats)) = add.get_stats() { let mut stats = serde_json::to_value(stats)?; let min_values = stats.get_mut("minValues").and_then(|v| v.as_object_mut()); if let Some(min_values) = min_values { for (path, data_type) in stats_conversions { apply_stats_conversion(min_values, path.as_slice(), data_type) } } let max_values = stats.get_mut("maxValues").and_then(|v| v.as_object_mut()); if let Some(max_values) = max_values { for (path, data_type) in stats_conversions { apply_stats_conversion(max_values, path.as_slice(), data_type) } } v["add"]["stats_parsed"] = stats; } Ok(v) } fn typed_partition_value_from_string( string_value: &str, data_type: &SchemaDataType, ) -> Result<Value, CheckpointError> { match data_type { SchemaDataType::primitive(primitive_type) => match primitive_type.as_str() { "string" | "binary" => Ok(string_value.to_owned().into()), "long" | "integer" | "short" | "byte" => Ok(string_value .parse::<i64>() .map_err(|_| CheckpointError::PartitionValueNotParseable(string_value.to_owned()))? .into()), "boolean" => Ok(string_value .parse::<bool>() .map_err(|_| CheckpointError::PartitionValueNotParseable(string_value.to_owned()))? .into()), "float" | "double" => Ok(string_value .parse::<f64>() .map_err(|_| CheckpointError::PartitionValueNotParseable(string_value.to_owned()))? .into()), "date" => { let d = chrono::naive::NaiveDate::parse_from_str(string_value, "%Y-%m-%d") .map_err(|_| { CheckpointError::PartitionValueNotParseable(string_value.to_owned()) })?; // day 0 is 1970-01-01 (719163 days from ce) Ok((d.num_days_from_ce() - 719_163).into()) } "timestamp" => { let ts = chrono::naive::NaiveDateTime::parse_from_str(string_value, "%Y-%m-%d %H:%M:%S") .map_err(|_| { CheckpointError::PartitionValueNotParseable(string_value.to_owned()) })?; Ok((ts.timestamp_millis() * 1000).into()) } s => unimplemented!( "Primitive type {} is not supported for partition column values.", s ), }, d => unimplemented!( "Data type {:?} is not supported for partition column values.", d ), } } fn typed_partition_value_from_option_string( string_value: &Option<String>, data_type: &SchemaDataType, ) -> Result<Value, CheckpointError> { match string_value { Some(s) => { if s.is_empty() { Ok(Value::Null) // empty string should be deserialized as null } else { typed_partition_value_from_string(s, data_type) } } None => Ok(Value::Null), } } type SchemaPath = Vec<String>; fn collect_stats_conversions( paths: &mut Vec<(SchemaPath, SchemaDataType)>, fields: &[SchemaField], ) { let mut _path = SchemaPath::new(); fields .iter() .for_each(|f| collect_field_conversion(&mut _path, paths, f)); } fn collect_field_conversion( current_path: &mut SchemaPath, all_paths: &mut Vec<(SchemaPath, SchemaDataType)>, field: &SchemaField, ) { match field.get_type() { SchemaDataType::primitive(type_name) => { if let "timestamp" = type_name.as_str() { let mut key_path = current_path.clone(); key_path.push(field.get_name().to_owned()); all_paths.push((key_path, field.get_type().to_owned())); } } SchemaDataType::r#struct(struct_field) => { let struct_fields = struct_field.get_fields(); current_path.push(field.get_name().to_owned()); struct_fields .iter() .for_each(|f| collect_field_conversion(current_path, all_paths, f)); current_path.pop(); } _ => { /* noop */ } } } fn apply_stats_conversion( context: &mut serde_json::Map<String, Value>, path: &[String], data_type: &SchemaDataType, ) { if path.len() == 1 { match data_type { SchemaDataType::primitive(type_name) if type_name == "timestamp" => { let v = context.get_mut(&path[0]); if let Some(v) = v { let ts = v .as_str() .and_then(|s| time_utils::timestamp_micros_from_stats_string(s).ok()) .map(|n| Value::Number(serde_json::Number::from(n))); if let Some(ts) = ts { *v = ts; } } } _ => { /* noop */ } } } else { let next_context = context.get_mut(&path[0]).and_then(|v| v.as_object_mut()); if let Some(next_context) = next_context { apply_stats_conversion(next_context, &path[1..], data_type); } } } #[cfg(test)] mod tests { use super::*; use lazy_static::lazy_static; #[test] fn typed_partition_value_from_string_test() { let string_value: Value = "Hello World!".into(); assert_eq!( string_value, typed_partition_value_from_option_string( &Some("Hello World!".to_string()), &SchemaDataType::primitive("string".to_string()), ) .unwrap() ); let bool_value: Value = true.into(); assert_eq!( bool_value, typed_partition_value_from_option_string( &Some("true".to_string()), &SchemaDataType::primitive("boolean".to_string()), ) .unwrap() ); let number_value: Value = 42.into(); assert_eq!( number_value, typed_partition_value_from_option_string( &Some("42".to_string()), &SchemaDataType::primitive("integer".to_string()), ) .unwrap() ); for (s, v) in [ ("2021-08-08", 18_847), ("1970-01-02", 1), ("1970-01-01", 0), ("1969-12-31", -1), ("1-01-01", -719_162), ] { let date_value: Value = v.into(); assert_eq!( date_value, typed_partition_value_from_option_string( &Some(s.to_string()), &SchemaDataType::primitive("date".to_string()), ) .unwrap() ); } for (s, v) in [ ("2021-08-08 01:00:01", 1628384401000000i64), ("1970-01-02 12:59:59", 133199000000i64), ("1970-01-01 13:00:01", 46801000000i64), ("1969-12-31 00:00:00", -86400000000i64), ("1677-09-21 00:12:44", -9223372036000000i64), ] { let timestamp_value: Value = v.into(); assert_eq!( timestamp_value, typed_partition_value_from_option_string( &Some(s.to_string()), &SchemaDataType::primitive("timestamp".to_string()), ) .unwrap() ); } let binary_value: Value = "\u{2081}\u{2082}\u{2083}\u{2084}".into(); assert_eq!( binary_value, typed_partition_value_from_option_string( &Some("₁₂₃₄".to_string()), &SchemaDataType::primitive("binary".to_string()), ) .unwrap() ); } #[test] fn null_partition_value_from_string_test() { assert_eq!( Value::Null, typed_partition_value_from_option_string( &None, &SchemaDataType::primitive("integer".to_string()), ) .unwrap() ); // empty string should be treated as null assert_eq!( Value::Null, typed_partition_value_from_option_string( &Some("".to_string()), &SchemaDataType::primitive("integer".to_string()), ) .unwrap() ); } #[test] fn collect_stats_conversions_test() { let delta_schema: Schema = serde_json::from_value(SCHEMA.clone()).unwrap(); let fields = delta_schema.get_fields(); let mut paths = Vec::new(); collect_stats_conversions(&mut paths, fields.as_slice()); assert_eq!(2, paths.len()); assert_eq!( ( vec!["some_struct".to_string(), "struct_timestamp".to_string()], SchemaDataType::primitive("timestamp".to_string()) ), paths[0] ); assert_eq!( ( vec!["some_timestamp".to_string()], SchemaDataType::primitive("timestamp".to_string()) ), paths[1] ); } #[test] fn apply_stats_conversion_test() { let mut stats = STATS_JSON.clone(); let min_values = stats.get_mut("minValues").unwrap().as_object_mut().unwrap(); apply_stats_conversion( min_values, &["some_struct".to_string(), "struct_string".to_string()], &SchemaDataType::primitive("string".to_string()), ); apply_stats_conversion( min_values, &["some_struct".to_string(), "struct_timestamp".to_string()], &SchemaDataType::primitive("timestamp".to_string()), ); apply_stats_conversion( min_values, &["some_string".to_string()], &SchemaDataType::primitive("string".to_string()), ); apply_stats_conversion( min_values, &["some_timestamp".to_string()], &SchemaDataType::primitive("timestamp".to_string()), ); let max_values = stats.get_mut("maxValues").unwrap().as_object_mut().unwrap(); apply_stats_conversion( max_values, &["some_struct".to_string(), "struct_string".to_string()], &SchemaDataType::primitive("string".to_string()), ); apply_stats_conversion( max_values, &["some_struct".to_string(), "struct_timestamp".to_string()], &SchemaDataType::primitive("timestamp".to_string()), ); apply_stats_conversion( max_values, &["some_string".to_string()], &SchemaDataType::primitive("string".to_string()), ); apply_stats_conversion( max_values, &["some_timestamp".to_string()], &SchemaDataType::primitive("timestamp".to_string()), ); // minValues assert_eq!( "A", stats["minValues"]["some_struct"]["struct_string"] .as_str() .unwrap() ); assert_eq!( 1627668684594000i64, stats["minValues"]["some_struct"]["struct_timestamp"] .as_i64() .unwrap() ); assert_eq!("P", stats["minValues"]["some_string"].as_str().unwrap()); assert_eq!( 1627668684594000i64, stats["minValues"]["some_timestamp"].as_i64().unwrap() ); // maxValues assert_eq!( "B", stats["maxValues"]["some_struct"]["struct_string"] .as_str() .unwrap() ); assert_eq!( 1627668685594000i64, stats["maxValues"]["some_struct"]["struct_timestamp"] .as_i64() .unwrap() ); assert_eq!("Q", stats["maxValues"]["some_string"].as_str().unwrap()); assert_eq!( 1627668685594000i64, stats["maxValues"]["some_timestamp"].as_i64().unwrap() ); } lazy_static! { static ref SCHEMA: Value = json!({ "type": "struct", "fields": [ { "name": "some_struct", "type": { "type": "struct", "fields": [ { "name": "struct_string", "type": "string", "nullable": true, "metadata": {} }, { "name": "struct_timestamp", "type": "timestamp", "nullable": true, "metadata": {} }] }, "nullable": true, "metadata": {} }, { "name": "some_string", "type": "string", "nullable": true, "metadata": {} }, { "name": "some_timestamp", "type": "timestamp", "nullable": true, "metadata": {} }, ] }); static ref STATS_JSON: Value = json!({ "minValues": { "some_struct": { "struct_string": "A", "struct_timestamp": "2021-07-30T18:11:24.594Z" }, "some_string": "P", "some_timestamp": "2021-07-30T18:11:24.594Z" }, "maxValues": { "some_struct": { "struct_string": "B", "struct_timestamp": "2021-07-30T18:11:25.594Z" }, "some_string": "Q", "some_timestamp": "2021-07-30T18:11:25.594Z" } }); } }
35.39485
169
0.565741
39127b9703b6b0fcc0695b5ff966663792b85577
192
// build-fail // compile-flags: -C debug-assertions #![deny(arithmetic_overflow, const_err)] fn main() { let _x = -1_i32 >> -1; //~^ ERROR: this arithmetic operation will overflow }
19.2
55
0.65625
87a7afcf1071c4004b23c5217625b756fec5ac28
2,291
use lightning_codegen::KernelSpecializationPolicy; use std::env; use std::path::PathBuf; use crate::prelude::*; #[derive(Debug)] pub struct Config { pub driver: DriverConfig, pub worker: WorkerConfig, } impl Config { pub fn new(driver: DriverConfig, worker: WorkerConfig) -> Self { Self { driver, worker } } pub fn from_env() -> Self { Self { driver: DriverConfig::from_env(), worker: WorkerConfig::from_env(), } } } #[derive(Debug)] pub struct WorkerConfig { pub storage_dir: Option<PathBuf>, pub storage_capacity: u64, pub host_mem_max: usize, pub host_mem_block: usize, pub device_mem_max: Option<usize>, pub scheduling_lookahead_size: usize, pub specialization_policy: KernelSpecializationPolicy, } impl WorkerConfig { pub fn from_env() -> Self { let mut specialization_policy = default(); if let Ok(level) = env::var("LIGHTNING_SPECIALIZATION") { use KernelSpecializationPolicy::*; specialization_policy = match level.trim() { "0" | "none" => None, "1" | "mild" => Mild, "2" | "standard" | "" => Standard, "3" | "aggressive" => Aggressive, "4" => VeryAggressive, s => { warn!("unknown specialization level {:?}, reverting to level 2", s); Standard } } } Self { storage_dir: None, storage_capacity: u64::MAX, host_mem_max: 40_000_000_000, host_mem_block: 1_000_000_000, scheduling_lookahead_size: 1_000_000_000, device_mem_max: None, specialization_policy, } } } #[derive(Debug, Clone)] pub struct DriverConfig { pub trace_file: Option<PathBuf>, } impl DriverConfig { pub fn from_env() -> Self { let mut out = Self { trace_file: None }; if let Ok(filename) = env::var("LIGHTNING_TRACE") { let filename = filename.trim(); if !filename.is_empty() { info!("writing trace to {:?}", filename); out.trace_file = Some(filename.into()); } } out } }
25.455556
88
0.552597
2925941b93089ad0fdb8d2433f4acea63575fe50
10,859
/* AUTO GENERATED FILE DO NOT EDIT codegen/elder_dragon_quicktype.py */ use serde::{Serialize, Deserialize}; extern crate serde_json; use self::serde_json::Error; pub fn serialize(json: &str) -> Result<AlistarJson,Error>{ serde_json::from_str(json) } use std::collections::HashMap; #[derive(Serialize, Deserialize)] pub struct AlistarJson { #[serde(rename = "type")] alistar_json_type: GroupEnum, format: Format, version: Version, data: Data, } #[derive(Serialize, Deserialize)] pub struct Data { #[serde(rename = "Alistar")] alistar: Alistar, } #[derive(Serialize, Deserialize)] pub struct Alistar { id: ChampionEnum, key: String, name: String, title: String, image: Image, skins: Vec<Skin>, lore: String, blurb: String, allytips: Vec<String>, enemytips: Vec<String>, tags: Vec<Tag>, partype: String, info: Info, stats: HashMap<String, f64>, spells: Vec<Spell>, passive: Passive, recommended: Vec<Recommended>, } #[derive(Serialize, Deserialize)] pub struct Image { full: Full, sprite: Sprite, group: GroupEnum, x: i64, y: i64, w: i64, h: i64, } #[derive(Serialize, Deserialize)] pub struct Info { attack: i64, defense: i64, magic: i64, difficulty: i64, } #[derive(Serialize, Deserialize)] pub struct Passive { name: String, description: String, image: Image, } #[derive(Serialize, Deserialize)] pub struct Recommended { champion: ChampionEnum, title: Title, map: Map, mode: Mode, #[serde(rename = "type")] recommended_type: RecommendedType, #[serde(rename = "customTag")] custom_tag: Option<String>, sortrank: Option<i64>, #[serde(rename = "extensionPage")] extension_page: Option<bool>, #[serde(rename = "customPanel")] custom_panel: Option<serde_json::Value>, blocks: Vec<Block>, #[serde(rename = "useObviousCheckmark")] use_obvious_checkmark: Option<bool>, priority: Option<bool>, } #[derive(Serialize, Deserialize)] pub struct Block { #[serde(rename = "type")] block_type: BlockType, #[serde(rename = "recMath")] rec_math: Option<bool>, #[serde(rename = "recSteps")] rec_steps: Option<bool>, #[serde(rename = "minSummonerLevel")] min_summoner_level: Option<i64>, #[serde(rename = "maxSummonerLevel")] max_summoner_level: Option<i64>, #[serde(rename = "showIfSummonerSpell")] show_if_summoner_spell: Option<IfSummonerSpell>, #[serde(rename = "hideIfSummonerSpell")] hide_if_summoner_spell: Option<IfSummonerSpell>, #[serde(rename = "appendAfterSection")] append_after_section: Option<String>, #[serde(rename = "visibleWithAllOf")] visible_with_all_of: Option<Vec<String>>, #[serde(rename = "hiddenWithAnyOf")] hidden_with_any_of: Option<Vec<String>>, items: Vec<Item>, } #[derive(Serialize, Deserialize)] pub struct Item { id: String, count: i64, #[serde(rename = "hideCount")] hide_count: Option<bool>, } #[derive(Serialize, Deserialize)] pub struct Skin { id: String, num: i64, name: String, chromas: bool, } #[derive(Serialize, Deserialize)] pub struct Spell { id: SpellId, name: String, description: String, tooltip: String, leveltip: Leveltip, maxrank: i64, cooldown: Vec<f64>, #[serde(rename = "cooldownBurn")] cooldown_burn: CooldownBurn, cost: Vec<i64>, #[serde(rename = "costBurn")] cost_burn: String, datavalues: Datavalues, effect: Vec<Option<Vec<f64>>>, #[serde(rename = "effectBurn")] effect_burn: Vec<Option<String>>, vars: Vec<Var>, #[serde(rename = "costType")] cost_type: CostType, maxammo: String, range: Vec<i64>, #[serde(rename = "rangeBurn")] range_burn: String, image: Image, resource: Resource, } #[derive(Serialize, Deserialize)] pub struct Datavalues { } #[derive(Serialize, Deserialize)] pub struct Leveltip { label: Vec<String>, effect: Vec<Effect>, } #[derive(Serialize, Deserialize)] pub struct Var { link: Link, coeff: f64, key: Key, } #[derive(Serialize, Deserialize)] pub enum GroupEnum { #[serde(rename = "champion")] Champion, #[serde(rename = "passive")] Passive, #[serde(rename = "spell")] Spell, } #[derive(Serialize, Deserialize)] pub enum ChampionEnum { Alistar, } #[derive(Serialize, Deserialize)] pub enum Full { #[serde(rename = "Alistar_E.png")] AlistarEPng, #[serde(rename = "Alistar.png")] AlistarPng, #[serde(rename = "FerociousHowl.png")] FerociousHowlPng, #[serde(rename = "AlistarE.png")] FullAlistarEPng, #[serde(rename = "Headbutt.png")] HeadbuttPng, #[serde(rename = "Pulverize.png")] PulverizePng, } #[derive(Serialize, Deserialize)] pub enum Sprite { #[serde(rename = "champion0.png")] Champion0Png, #[serde(rename = "passive0.png")] Passive0Png, #[serde(rename = "spell0.png")] Spell0Png, } #[derive(Serialize, Deserialize)] pub enum BlockType { #[serde(rename = "aggressive")] Aggressive, #[serde(rename = "beginner_Advanced")] BeginnerAdvanced, #[serde(rename = "beginner_LegendaryItem")] BeginnerLegendaryItem, #[serde(rename = "beginner_MoreLegendaryItems")] BeginnerMoreLegendaryItems, #[serde(rename = "beginner_MovementSpeed")] BeginnerMovementSpeed, #[serde(rename = "beginner_Starter")] BeginnerStarter, #[serde(rename = "consumables")] Consumables, #[serde(rename = "defensive")] Defensive, #[serde(rename = "early")] Early, #[serde(rename = "earlyjungle")] Earlyjungle, #[serde(rename = "essential")] Essential, #[serde(rename = "essentialjungle")] Essentialjungle, KingPoroSnax, #[serde(rename = "offensive")] Offensive, #[serde(rename = "offmeta")] Offmeta, #[serde(rename = "protective")] Protective, #[serde(rename = "siegeDefense")] SiegeDefense, #[serde(rename = "siegeOffense")] SiegeOffense, #[serde(rename = "starting")] Starting, #[serde(rename = "startingjungle")] Startingjungle, } #[derive(Serialize, Deserialize)] pub enum IfSummonerSpell { #[serde(rename = "")] Empty, SummonerSmite, } #[derive(Serialize, Deserialize)] pub enum Map { CrystalScar, #[serde(rename = "HA")] Ha, #[serde(rename = "SL")] Sl, #[serde(rename = "SR")] Sr, #[serde(rename = "TT")] Tt, } #[derive(Serialize, Deserialize)] pub enum Mode { #[serde(rename = "ARAM")] Aram, #[serde(rename = "CLASSIC")] Classic, #[serde(rename = "FIRSTBLOOD")] Firstblood, #[serde(rename = "GAMEMODEX")] Gamemodex, #[serde(rename = "INTRO")] Intro, #[serde(rename = "KINGPORO")] Kingporo, #[serde(rename = "ODIN")] Odin, #[serde(rename = "SIEGE")] Siege, } #[derive(Serialize, Deserialize)] pub enum RecommendedType { #[serde(rename = "riot")] Riot, } #[derive(Serialize, Deserialize)] pub enum Title { #[serde(rename = "AlistarARAM")] AlistarAram, #[serde(rename = "AlistarCS")] AlistarCs, #[serde(rename = "AlistarFIRSTBLOOD")] AlistarFirstblood, #[serde(rename = "AlistarKINGPORO")] AlistarKingporo, #[serde(rename = "AlistarSIEGE")] AlistarSiege, #[serde(rename = "AlistarSL")] AlistarSl, #[serde(rename = "AlistarSR")] AlistarSr, #[serde(rename = "AlistarTT")] AlistarTt, Beginner, } #[derive(Serialize, Deserialize)] pub enum CooldownBurn { #[serde(rename = "120/100/80")] The12010080, #[serde(rename = "12/11.5/11/10.5/10")] The121151110510, #[serde(rename = "14/13/12/11/10")] The1413121110, #[serde(rename = "17/16/15/14/13")] The1716151413, } #[derive(Serialize, Deserialize)] pub enum CostType { #[serde(rename = " {{ abilityresourcename }}")] Abilityresourcename, #[serde(rename = ": {{ cost }}")] Cost, #[serde(rename = "{{ abilityresourcename }}")] CostTypeAbilityresourcename, #[serde(rename = " {{ cost }}")] CostTypeCost, #[serde(rename = " de {{ abilityresourcename }}")] DeAbilityresourcename, #[serde(rename = " pkt. ({{ abilityresourcename }})")] PktAbilityresourcename, } #[derive(Serialize, Deserialize)] pub enum SpellId { AlistarE, FerociousHowl, Headbutt, Pulverize, } #[derive(Serialize, Deserialize)] pub enum Effect { #[serde(rename = "{{ cooldown }} -> {{ cooldownNL }}")] CooldownCooldownNl, #[serde(rename = "{{ cost }} -> {{ costNL }}")] CostCostNl, #[serde(rename = "{{ e1 }} -> {{ e1NL }}")] E1E1Nl, #[serde(rename = "{{ e2 }} -> {{ e2NL }}")] E2E2Nl, #[serde(rename = "{{ cooldown }}->{{ cooldownNL }}")] EffectCooldownCooldownNl, #[serde(rename = "{{ cost }}->{{ costNL }}")] EffectCostCostNl, #[serde(rename = "{{ e1 }}->{{ e1NL }}")] EffectE1E1Nl, #[serde(rename = "{{ e2 }}->{{ e2NL }}")] EffectE2E2Nl, #[serde(rename = "{{ rdamagereduction }}% -> {{ rdamagereductionNL }}%")] EffectRdamagereductionRdamagereductionNl, #[serde(rename = "{{ rdamagereduction }}%->{{ rdamagereductionNL }}%")] FluffyRdamagereductionRdamagereductionNl, #[serde(rename = "%{{ rdamagereduction }} -> %{{ rdamagereductionNL }}")] PurpleRdamagereductionRdamagereductionNl, #[serde(rename = "{{ rdamagereduction }}&nbsp;% -> {{ rdamagereductionNL }}&nbsp;%")] RdamagereductionNbspRdamagereductionNlNbsp, #[serde(rename = "{{ rdamagereduction }} % -> {{ rdamagereductionNL }} %")] RdamagereductionRdamagereductionNl, } #[derive(Serialize, Deserialize)] pub enum Resource { #[serde(rename = "{{ abilityresourcename }}: {{ cost }}")] AbilityresourcenameCost, #[serde(rename = "{{ cost }} {{ abilityresourcename }}")] CostAbilityresourcename, #[serde(rename = "{{ cost }} de {{ abilityresourcename }}")] CostDeAbilityresourcename, #[serde(rename = "{{ cost }} pkt. ({{ abilityresourcename }})")] CostPktAbilityresourcename, #[serde(rename = "{{ abilityresourcename }} {{ cost }}")] ResourceAbilityresourcenameCost, #[serde(rename = "{{ cost }}{{ abilityresourcename }}")] ResourceCostAbilityresourcename, } #[derive(Serialize, Deserialize)] pub enum Key { #[serde(rename = "a1")] A1, } #[derive(Serialize, Deserialize)] pub enum Link { #[serde(rename = "spelldamage")] Spelldamage, } #[derive(Serialize, Deserialize)] pub enum Tag { Support, Tank, } #[derive(Serialize, Deserialize)] pub enum Format { #[serde(rename = "standAloneComplex")] StandAloneComplex, } #[derive(Serialize, Deserialize)] pub enum Version { #[serde(rename = "9.23.1")] The9231, }
23.453564
89
0.628143
874f883b8695f78b53ba771b547ca5632ae2a6c1
4,075
use clap::Parser; use serde_json::Value; use std::fs::{self, File}; use std::io::{ErrorKind, Write}; use substrate_api_client::extrinsic::log::info; #[derive(Debug, Parser)] #[clap(version = "1.0")] pub struct Config { /// URL address of the node RPC endpoint for the chain you are forking #[clap(long, default_value = "http://127.0.0.1:9933")] pub http_rpc_endpoint: String, /// path to write the initial chainspec of the fork /// as generated with the `bootstrap-chain` command #[clap(long, default_value = "../docker/data/chainspec.json")] pub fork_spec_path: String, /// where to write the forked genesis chainspec #[clap(long, default_value = "../docker/data/chainspec.fork.json")] pub write_to_path: String, /// which modules to set in forked spec #[clap(long, default_value = "Aura, Bankless")] pub prefixes: Vec<String>, } #[tokio::main] async fn main() -> anyhow::Result<()> { let Config { http_rpc_endpoint, fork_spec_path, write_to_path, prefixes, } = Config::parse(); env_logger::init(); info!( "Running with config: \n\thttp_rpc_endpoint {}\n \tfork_spec_path: {}\n \twrite_to_path{}", http_rpc_endpoint, fork_spec_path, write_to_path ); let mut fork_spec: Value = serde_json::from_str( &fs::read_to_string(&fork_spec_path).expect("Could not read chainspec file"), )?; let storage = get_chain_state(http_rpc_endpoint).await; info!("Succesfully retrieved chain state"); // move the desired storage values from the snapshot of the chain to the forked chain genesis spec info!( "Looking for the following storage items to be moved to the fork: {:?}", prefixes ); storage .iter() .filter(|pair| { prefixes .iter() .map(|p| prefix_as_hex(p)) .chain(vec!["0x3a636f6465".to_string()]) .any(|prefix| { let pair = pair.as_array().unwrap(); let storage_key = pair[0].as_str().unwrap(); storage_key.starts_with(&format!("0x{}", prefix_as_hex(&prefix))) }) }) .for_each(|pair| { let pair = pair.as_array().unwrap(); let k = &pair[0].as_str().unwrap(); let v = &pair[1]; info!("Moving {} to the fork", k); fork_spec["genesis"]["raw"]["top"][k] = v.to_owned(); }); // write out the fork spec let json = serde_json::to_string(&fork_spec)?; info!("Writing forked chain spec to {}", &write_to_path); write_to_file(write_to_path, json.as_bytes()); info!("Done!"); Ok(()) } async fn get_chain_state(http_rpc_endpoint: String) -> Vec<Value> { let storage: Value = reqwest::Client::new() .post(http_rpc_endpoint) .json(&serde_json::json!({ "jsonrpc": "2.0", "id": 1, "method": "state_getPairs", "params": ["0x"] })) .send() .await .expect("Storage request has failed") .json() .await .expect("Could not deserialize response as JSON"); storage["result"] .as_array() .expect("No result in response") .to_owned() } fn write_to_file(write_to_path: String, data: &[u8]) { let mut file = match fs::OpenOptions::new() .write(true) .truncate(true) .open(&write_to_path) { Ok(file) => file, Err(error) => match error.kind() { ErrorKind::NotFound => match File::create(&write_to_path) { Ok(file) => file, Err(why) => panic!("Cannot create file: {:?}", why), }, _ => panic!("Unexpected error when creating file: {}", &write_to_path), }, }; file.write_all(data).expect("Could not write to file"); } fn prefix_as_hex(module: &str) -> String { let pallet_name = sp_io::hashing::twox_128(module.as_bytes()); hex::encode(pallet_name) }
30.639098
102
0.57227
d7514f31551917de81a872d85d73594888ab1b65
10,955
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 use anyhow::{anyhow, bail, Result}; use directories::{BaseDirs, ProjectDirs}; use erased_serde::Serialize; use log::LevelFilter; use nix::sys::signal::{self, Signal}; use nix::unistd::{dup2, setsid, Pid}; use std::env::{self, args_os, ArgsOs}; use std::ffi::OsString; use std::fs::{self, read_to_string, File}; use std::io::{self, ErrorKind, Write}; use std::iter::{IntoIterator, Iterator}; use std::os::unix::io::AsRawFd; use std::path::PathBuf; use std::process::{self, ChildStdout, Command, Stdio}; use std::str::FromStr; use std::time::Duration; use structopt::StructOpt; use opentitanlib::backend; use opentitanlib::proxy::SessionHandler; #[derive(Debug, StructOpt)] #[structopt( name = "opentitansession", about = "A tool for interacting with OpenTitan chips." )] struct Opts { #[structopt( long, default_value = "config", help = "Filename of a default flagsfile. Relative to $XDG_CONFIG_HOME/opentitantool." )] rcfile: PathBuf, #[structopt(long, default_value = "off")] logging: LevelFilter, #[structopt(flatten)] backend_opts: backend::BackendOpts, #[structopt( long, help = "Stop a running session, optionally combine with --listen_port for disambiguation" )] stop: bool, #[structopt( long, help = "Optional, defaults to 9900 or nearest higher available port." )] listen_port: Option<u16>, #[structopt(long, help = "Start session, staying in foreground (do not daemonize)")] debug: bool, #[structopt( long, help = "Internal, used to tell the child process run as a daemon." )] child: bool, } // Given some existing option configuration, maybe re-evaluate command // line options by reading an `rcfile`. fn parse_command_line(opts: Opts, mut args: ArgsOs) -> Result<Opts> { // Initialize the logger if the user requested the non-defualt option. let logging = opts.logging; if logging != LevelFilter::Off { env_logger::Builder::from_default_env() .filter(None, opts.logging) .init(); } if opts.rcfile.as_os_str().is_empty() { // No rcfile to parse. return Ok(opts); } // Construct the rcfile path based on the user's config directory // (ie: $HOME/.config/opentitantool/<filename>). let rcfile = if let Some(base) = ProjectDirs::from("org", "opentitan", "opentitantool") { base.config_dir().join(&opts.rcfile) } else { opts.rcfile }; // argument[0] is the executable name. let mut arguments = vec![args.next().unwrap()]; // Read in the rcfile and extend the argument list. match read_to_string(&rcfile) { Ok(content) => { for line in content.split('\n') { // Strip basic comments as shellwords won't handle comments. let (line, _) = line.split_once('#').unwrap_or((line, "")); arguments.extend(shellwords::split(line)?.iter().map(OsString::from)); } Ok(()) } Err(e) if e.kind() == ErrorKind::NotFound => { log::warn!("Could not read {:?}. Ignoring.", rcfile); Ok(()) } Err(e) => Err(anyhow::Error::new(e).context(format!("Reading file {:?}", rcfile))), }?; // Extend the argument list with all remaining command line arguments. arguments.extend(args.into_iter()); let opts = Opts::from_iter(&arguments); if opts.logging != logging { // Try re-initializing the logger. Ignore errors. let _ = env_logger::Builder::from_default_env() .filter(None, opts.logging) .try_init(); } Ok(opts) } #[derive(serde::Serialize, serde::Deserialize)] pub struct SessionStartResult { port: u16, } /// Spawn a child process, passing all the same arguments to the child, letting it instantiate a /// Transport based on the command line arguments, listen on a TCP socket, and run as a daemon /// process serving network requests. Success of the child is verified by means of a /// `SessionStartResult` JSON message sent through the standard output pipe. fn start_session(run_file_fn: impl FnOnce(u16) -> PathBuf) -> Result<Box<dyn Serialize>> { let mut child = Command::new(env::current_exe()?) // Same executable .arg("--child") // Add argument to let the new process know it is the daemon child .args(args_os().skip(1)) // Propagate all existing arguments: --interface, etc. .stdin(Stdio::null()) // Not used by child, disconnect from terminal .stdout(Stdio::piped()) // Used for signalling completion of daemon startup .stderr(Stdio::inherit()) // May be used for error messages during daemon startup .spawn()?; match serde_json::from_reader::<&mut ChildStdout, Result<SessionStartResult, String>>( child.stdout.as_mut().unwrap(), ) { Ok(Ok(result)) => { // Create a pid file corresponding to the requested TCP port. let path = run_file_fn(result.port); File::create(path)?.write_all(format!("{}\n", child.id()).as_bytes())?; Ok(Box::new(result)) } Ok(Err(e)) => bail!(e), Err(e) => bail!("Child process failed to start: {}", e), } } // This method runs in the daemon child. It will instantiate SessionHandler to bind to a // socket, then report the chosen port number to the parent process by means of a serialized // `SessionStartResult` sent through the stdout anonymous pipe, and finally enter an infnite // loop, processing connections on that socket fn session_child(listen_port: Option<u16>, backend_opts: &backend::BackendOpts) -> Result<()> { let transport = backend::create(backend_opts)?; let mut session = SessionHandler::init(&transport, listen_port)?; // Instantiation of Transport backend, and binding to a socket was successful, now go // through the process of making this process a daemon, disconnected from the // terminal that was used to start it. // All configuration files have been processed (relative to current direction), we can now // drop the reference to the file system, (in case the admin wants to unmount while this // daemon is still running.) env::set_current_dir("/")?; // Close stderr, which remained open in order to allow any errors from the above code to // surface, but needs to be severed in order for the daemon to avoid being killed by SIGHUP // if the user closes the terminal window. dup2(File::open("/dev/null")?.as_raw_fd(), 2)?; // After severing the only connection to the controlling terminal inherited from the parent, // we can now establish a new Unix "session" for this process, which will not be // "controlled" by any terminal. This means that this daemon will not be killed by SIGHUP, // in case the terminal that was used for running `session start` is later closed. setsid()?; // Report startup success to parent process. serde_json::to_writer::<io::Stdout, Result<SessionStartResult, String>>( io::stdout(), &Ok(SessionStartResult { port: session.get_port(), }), )?; io::stdout().flush()?; // Closing the standard output pipe is the signal to the parent process that this child has // started up successfully. We close the pipe indirectly, by replacing file descriptor 1 // with one pointing to /dev/null. This will ensure that any subsequent accidentally // executed println!() will be a no-op, rather than trigger termination via SIGPIPE. dup2(2, 1)?; // Indefinitely run command processing loop in this daemon process. session.run_loop() } #[derive(serde::Serialize, serde::Deserialize)] pub struct SessionStopResult {} /// Load .pid file based on given port number, and send SIGTERM to the process identified in the /// file, to request the daemon gracefully shut down. fn stop_session(run_file_fn: impl FnOnce(u16) -> PathBuf, port: u16) -> Result<Box<dyn Serialize>> { // Read the pid file corresponding to the requested TCP port. let path = run_file_fn(port); let pid: i32 = FromStr::from_str(&fs::read_to_string(&path)?.trim())?; // Send signal to daemon process, asking it to terminate. signal::kill(Pid::from_raw(pid), Signal::SIGTERM)?; // Wait for daemon process to stop. loop { std::thread::sleep(Duration::from_millis(100)); // Send "signal 0", meaning that the kernel performs error checks (among those, checking // that the target process exists), without actually sending any signal. match signal::kill(Pid::from_raw(pid), None) { Ok(()) => (), // Process still running, repeat. Err(nix::Error::Sys(nix::errno::Errno::ESRCH)) => { // Process could not be found, meaning that it has terminated, as expected. fs::remove_file(&path)?; return Ok(Box::new(SessionStopResult {})); } Err(e) => bail!("Unexpected error querying process presence: {}", e), } } } fn main() -> Result<()> { let opts = parse_command_line(Opts::from_args(), args_os())?; if opts.debug { // Start session process in foreground (do not daemonize) let transport = backend::create(&opts.backend_opts)?; let mut session = SessionHandler::init(&transport, opts.listen_port)?; println!("Listening on port {}", session.get_port()); session.run_loop()?; return Ok(()); } if opts.child { // This process is a child, which is supposed to stay running as a daemon. match session_child(opts.listen_port, &opts.backend_opts) { Ok(()) => process::exit(0), Err(e) => { // Report any error to parent process though stdout pipe. serde_json::to_writer::<io::Stdout, Result<SessionStartResult, String>>( io::stdout(), &Err(format!("{}", e).to_string()), )?; process::exit(1) } } } // Locate directory to use for .pid files let base_dirs = BaseDirs::new().unwrap(); let run_user_dir = base_dirs .runtime_dir() .ok_or(anyhow!("No /run/user directory"))?; let run_file_fn = |port: u16| { let mut p = PathBuf::from(run_user_dir); p.push(format!("opentitansession.{}.pid", port)); p }; let value = if opts.stop { // Send signal to daemon process to stop stop_session(run_file_fn, opts.listen_port.unwrap_or(9900))? } else { // Fork a daemon process start_session(run_file_fn)? }; println!("{}", serde_json::to_string_pretty(&value)?); Ok(()) }
39.692029
100
0.638065
ef55eba4041a6c472a3ef1012e2d235c69687dc0
773
use oysterpack_smart_near::near_sdk::serde::{Deserialize, Serialize}; use std::fmt::{self, Display, Formatter}; use std::ops::Deref; #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(crate = "oysterpack_smart_near::near_sdk::serde")] pub struct TransferCallMessage(pub String); impl Deref for TransferCallMessage { type Target = str; fn deref(&self) -> &Self::Target { &self.0 } } impl From<&str> for TransferCallMessage { fn from(memo: &str) -> Self { Self(memo.to_string()) } } impl From<String> for TransferCallMessage { fn from(memo: String) -> Self { Self(memo) } } impl Display for TransferCallMessage { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { self.0.fmt(f) } }
22.735294
69
0.648124
8ab59073536a336ce8b6eb1ceb201d123e1da660
417
use bytes::{Buf, BufMut}; pub(crate) fn peek_u8<B: Buf>(b: &B) -> Option<u8> { b.chunk().get(0).copied() } pub(crate) fn get_ssh_string<B: Buf>(mut b: B) -> Vec<u8> { let len = b.get_u32(); let mut s = vec![0u8; len as usize]; b.copy_to_slice(&mut s[..]); s } pub(crate) fn put_ssh_string<B: BufMut>(mut b: B, s: &[u8]) { let len = s.len() as u32; b.put_u32(len); b.put_slice(s); }
21.947368
61
0.561151
72e93aaf62defa226027a723af426ef87e98b27a
7,035
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Crate docs #![allow(bad_style, overflowing_literals, improper_ctypes, unknown_lints)] #![crate_type = "rlib"] #![crate_name = "libc"] #![cfg_attr(cross_platform_docs, feature(no_core, lang_items, const_fn))] #![cfg_attr(cross_platform_docs, no_core)] #![doc( html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico" )] #![cfg_attr( all(target_os = "linux", target_arch = "x86_64"), doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-linux-gnu" ) )] #![cfg_attr( all(target_os = "linux", target_arch = "x86"), doc( html_root_url = "https://rust-lang.github.io/libc/i686-unknown-linux-gnu" ) )] #![cfg_attr( all(target_os = "linux", target_arch = "arm"), doc( html_root_url = "https://rust-lang.github.io/libc/arm-unknown-linux-gnueabihf" ) )] #![cfg_attr( all(target_os = "linux", target_arch = "mips"), doc( html_root_url = "https://rust-lang.github.io/libc/mips-unknown-linux-gnu" ) )] #![cfg_attr( all(target_os = "linux", target_arch = "aarch64"), doc( html_root_url = "https://rust-lang.github.io/libc/aarch64-unknown-linux-gnu" ) )] #![cfg_attr( all(target_os = "linux", target_env = "musl"), doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-linux-musl" ) )] #![cfg_attr( all(target_os = "macos", target_arch = "x86_64"), doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-apple-darwin" ) )] #![cfg_attr( all(target_os = "macos", target_arch = "x86"), doc(html_root_url = "https://rust-lang.github.io/libc/i686-apple-darwin") )] #![cfg_attr( all(windows, target_arch = "x86_64", target_env = "gnu"), doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-pc-windows-gnu" ) )] #![cfg_attr( all(windows, target_arch = "x86", target_env = "gnu"), doc( html_root_url = "https://rust-lang.github.io/libc/i686-pc-windows-gnu" ) )] #![cfg_attr( all(windows, target_arch = "x86_64", target_env = "msvc"), doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-pc-windows-msvc" ) )] #![cfg_attr( all(windows, target_arch = "x86", target_env = "msvc"), doc( html_root_url = "https://rust-lang.github.io/libc/i686-pc-windows-msvc" ) )] #![cfg_attr( target_os = "android", doc( html_root_url = "https://rust-lang.github.io/libc/arm-linux-androideabi" ) )] #![cfg_attr( target_os = "freebsd", doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-freebsd" ) )] #![cfg_attr( target_os = "openbsd", doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-openbsd" ) )] #![cfg_attr( target_os = "bitrig", doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-bitrig" ) )] #![cfg_attr( target_os = "netbsd", doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-netbsd" ) )] #![cfg_attr( target_os = "dragonfly", doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-unknown-dragonfly" ) )] #![cfg_attr( target_os = "solaris", doc( html_root_url = "https://rust-lang.github.io/libc/x86_64-sun-solaris" ) )] #![cfg_attr( all(target_os = "emscripten", target_arch = "asmjs"), doc( html_root_url = "https://rust-lang.github.io/libc/asmjs-unknown-emscripten" ) )] #![cfg_attr( all(target_os = "emscripten", target_arch = "wasm32"), doc( html_root_url = "https://rust-lang.github.io/libc/wasm32-unknown-emscripten" ) )] #![cfg_attr( all(target_os = "linux", target_arch = "sparc64"), doc( html_root_url = "https://rust-lang.github.io/libc/sparc64-unknown-linux-gnu" ) )] // Attributes needed when building as part of the standard library #![cfg_attr(feature = "rustc-dep-of-std", feature(cfg_target_vendor))] #![cfg_attr(feature = "rustc-dep-of-std", feature(link_cfg))] #![cfg_attr(feature = "rustc-dep-of-std", feature(no_core))] #![cfg_attr(feature = "rustc-dep-of-std", no_core)] #![cfg_attr(feature = "rustc-dep-of-std", allow(warnings))] #![cfg_attr( not(any(feature = "use_std", feature = "rustc-dep-of-std")), no_std )] // Enable lints #![cfg_attr(feature = "extra_traits", deny(missing_debug_implementations))] #![deny(missing_copy_implementations, safe_packed_borrows)] #[cfg(all(not(cross_platform_docs), feature = "use_std"))] extern crate std as core; #[macro_use] mod macros; cfg_if! { if #[cfg(feature = "rustc-dep-of-std")] { extern crate rustc_std_workspace_core as core; #[allow(unused_imports)] use core::iter; #[allow(unused_imports)] use core::option; } } cfg_if! { if #[cfg(not(cross_platform_docs))] { cfg_if! { if #[cfg(libc_priv_mod_use)] { #[cfg(libc_core_cvoid)] #[allow(unused_imports)] use core::ffi; #[allow(unused_imports)] use core::fmt; #[allow(unused_imports)] use core::hash; #[allow(unused_imports)] use core::num; #[allow(unused_imports)] use core::mem; } else { #[doc(hidden)] #[allow(unused_imports)] pub use core::fmt; #[doc(hidden)] #[allow(unused_imports)] pub use core::hash; #[doc(hidden)] #[allow(unused_imports)] pub use core::num; #[doc(hidden)] #[allow(unused_imports)] pub use core::mem; } } } } mod dox; cfg_if! { if #[cfg(windows)] { mod windows; pub use windows::*; } else if #[cfg(target_os = "redox")] { mod redox; pub use redox::*; } else if #[cfg(target_os = "cloudabi")] { mod cloudabi; pub use cloudabi::*; } else if #[cfg(target_os = "fuchsia")] { mod fuchsia; pub use fuchsia::*; } else if #[cfg(target_os = "switch")] { mod switch; pub use switch::*; } else if #[cfg(unix)] { mod unix; pub use unix::*; } else if #[cfg(target_env = "sgx")] { mod sgx; pub use sgx::*; } else { // non-supported targets: empty... } }
29.070248
86
0.59403
e572fe21e9ebafa1cb8e246a08c3e51d190934f1
1,173
// vim: tw=80 #[cfg(target_os = "freebsd")] fn main() { use std::env; use std::path::PathBuf; println!("cargo:rerun-if-env-changed=LLVM_CONFIG_PATH"); println!("cargo:rustc-link-lib=geom"); let bindings = bindgen::Builder::default() .header("/usr/include/libgeom.h") .header("/usr/include/sys/devicestat.h") .whitelist_function("geom_.*") .whitelist_function("gctl_.*") .whitelist_function("g_.*") .whitelist_type("devstat_trans_flags") .parse_callbacks(Box::new(bindgen::CargoCallbacks)) .generate() .expect("Unable to generate bindings"); let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); bindings .write_to_file(out_path.join("bindings.rs")) .expect("Couldn't write bindings!"); } #[cfg(not(target_os = "freebsd"))] fn main() { // If we're building not on FreeBSD, there's no way the build can succeed. // This probably means we're building docs on docs.rs, so set this config // variable. We'll use it to stub out the crate well enough that // freebsd-libgeom's docs can build. println!("cargo:rustc-cfg=crossdocs"); }
34.5
78
0.638534
2938ae706e4901d2e14c65011a57a8dcf6f7a71d
362
use ndarray::prelude::*; use ndarray::ShapeError; pub struct Flat<'a> { pixels: ArrayView2<'a, u8>, } impl<'a> Flat<'a> { pub fn new(pixels: &[u8]) -> Result<Flat, ShapeError> { Ok(Flat { pixels: ArrayView2::from_shape((64, 64), pixels)?, }) } pub fn view(&self) -> ArrayView2<'a, u8> { self.pixels } }
19.052632
62
0.535912
509310a296a63503f80a3bba16d0f65a63bf4a61
23,039
// Copyright (c) The Diem Core Contributors // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 //! This module contains verification of usage of dependencies for modules and scripts. use move_binary_format::{ access::{ModuleAccess, ScriptAccess}, binary_views::BinaryIndexedView, errors::{verification_error, Location, PartialVMError, PartialVMResult, VMResult}, file_format::{ AbilitySet, Bytecode, CodeOffset, CompiledModule, CompiledScript, FunctionDefinitionIndex, FunctionHandleIndex, ModuleHandleIndex, SignatureToken, StructHandleIndex, StructTypeParameter, TableIndex, Visibility, }, file_format_common::VERSION_5, IndexKind, }; use move_core_types::{identifier::Identifier, language_storage::ModuleId, vm_status::StatusCode}; use std::collections::{BTreeMap, BTreeSet}; struct Context<'a, 'b> { resolver: BinaryIndexedView<'a>, // (Module -> CompiledModule) for (at least) all immediate dependencies dependency_map: BTreeMap<ModuleId, &'b CompiledModule>, // (Module::StructName -> handle) for all types of all dependencies struct_id_to_handle_map: BTreeMap<(ModuleId, Identifier), StructHandleIndex>, // (Module::FunctionName -> handle) for all functions that can ever be called by this // module/script in all dependencies func_id_to_handle_map: BTreeMap<(ModuleId, Identifier), FunctionHandleIndex>, // (handle -> visibility) for all function handles found in the module being checked function_visibilities: BTreeMap<FunctionHandleIndex, Visibility>, // all function handles found in the module being checked that are script functions in <V5 // None if the current module/script >= V5 script_functions: Option<BTreeSet<FunctionHandleIndex>>, } impl<'a, 'b> Context<'a, 'b> { fn module( module: &'a CompiledModule, dependencies: impl IntoIterator<Item = &'b CompiledModule>, ) -> Self { Self::new(BinaryIndexedView::Module(module), dependencies) } fn script( script: &'a CompiledScript, dependencies: impl IntoIterator<Item = &'b CompiledModule>, ) -> Self { Self::new(BinaryIndexedView::Script(script), dependencies) } fn new( resolver: BinaryIndexedView<'a>, dependencies: impl IntoIterator<Item = &'b CompiledModule>, ) -> Self { let self_module = resolver.self_id(); let self_module_idx = resolver.self_handle_idx(); let empty_defs = &vec![]; let self_function_defs = match &resolver { BinaryIndexedView::Module(m) => m.function_defs(), BinaryIndexedView::Script(_) => empty_defs, }; let dependency_map = dependencies .into_iter() .filter(|d| Some(d.self_id()) != self_module) .map(|d| (d.self_id(), d)) .collect(); let script_functions = if resolver.version() < VERSION_5 { Some(BTreeSet::new()) } else { None }; let mut context = Self { resolver, dependency_map, struct_id_to_handle_map: BTreeMap::new(), func_id_to_handle_map: BTreeMap::new(), function_visibilities: BTreeMap::new(), script_functions, }; let mut dependency_visibilities = BTreeMap::new(); for (module_id, module) in &context.dependency_map { let friend_module_ids: BTreeSet<_> = module.immediate_friends().into_iter().collect(); // Module::StructName -> def handle idx for struct_def in module.struct_defs() { let struct_handle = module.struct_handle_at(struct_def.struct_handle); let struct_name = module.identifier_at(struct_handle.name); context.struct_id_to_handle_map.insert( (module_id.clone(), struct_name.to_owned()), struct_def.struct_handle, ); } // Module::FuncName -> def handle idx for func_def in module.function_defs() { let func_handle = module.function_handle_at(func_def.function); let func_name = module.identifier_at(func_handle.name); dependency_visibilities.insert( (module_id.clone(), func_name.to_owned()), (func_def.visibility, func_def.is_entry), ); let may_be_called = match func_def.visibility { Visibility::Public => true, Visibility::Friend => self_module .as_ref() .map_or(false, |self_id| friend_module_ids.contains(self_id)), Visibility::Private => false, }; if may_be_called { context .func_id_to_handle_map .insert((module_id.clone(), func_name.to_owned()), func_def.function); } } } for function_def in self_function_defs { context .function_visibilities .insert(function_def.function, function_def.visibility); if function_def.is_entry { context .script_functions .as_mut() .map(|s| s.insert(function_def.function)); } } for (idx, function_handle) in context.resolver.function_handles().iter().enumerate() { if Some(function_handle.module) == self_module_idx { continue; } let dep_module_id = context .resolver .module_id_for_handle(context.resolver.module_handle_at(function_handle.module)); let function_name = context.resolver.identifier_at(function_handle.name); let dep_file_format_version = context.dependency_map.get(&dep_module_id).unwrap().version; let dep_function = (dep_module_id, function_name.to_owned()); let (visibility, is_entry) = match dependency_visibilities.get(&dep_function) { // The visibility does not need to be set here. If the function does not // link, it will be reported by verify_imported_functions None => continue, Some(vis_entry) => *vis_entry, }; let fhandle_idx = FunctionHandleIndex(idx as TableIndex); context .function_visibilities .insert(fhandle_idx, visibility); if dep_file_format_version < VERSION_5 && is_entry { context .script_functions .as_mut() .map(|s| s.insert(fhandle_idx)); } } context } } pub fn verify_module<'a>( module: &CompiledModule, dependencies: impl IntoIterator<Item = &'a CompiledModule>, ) -> VMResult<()> { verify_module_impl(module, dependencies) .map_err(|e| e.finish(Location::Module(module.self_id()))) } fn verify_module_impl<'a>( module: &CompiledModule, dependencies: impl IntoIterator<Item = &'a CompiledModule>, ) -> PartialVMResult<()> { let context = &Context::module(module, dependencies); verify_imported_modules(context)?; verify_imported_structs(context)?; verify_imported_functions(context)?; verify_all_script_visibility_usage(context) } pub fn verify_script<'a>( script: &CompiledScript, dependencies: impl IntoIterator<Item = &'a CompiledModule>, ) -> VMResult<()> { verify_script_impl(script, dependencies).map_err(|e| e.finish(Location::Script)) } pub fn verify_script_impl<'a>( script: &CompiledScript, dependencies: impl IntoIterator<Item = &'a CompiledModule>, ) -> PartialVMResult<()> { let context = &Context::script(script, dependencies); verify_imported_modules(context)?; verify_imported_structs(context)?; verify_imported_functions(context)?; verify_all_script_visibility_usage(context) } fn verify_imported_modules(context: &Context) -> PartialVMResult<()> { let self_module = context.resolver.self_handle_idx(); for (idx, module_handle) in context.resolver.module_handles().iter().enumerate() { let module_id = context.resolver.module_id_for_handle(module_handle); if Some(ModuleHandleIndex(idx as u16)) != self_module && !context.dependency_map.contains_key(&module_id) { return Err(verification_error( StatusCode::MISSING_DEPENDENCY, IndexKind::ModuleHandle, idx as TableIndex, )); } } Ok(()) } fn verify_imported_structs(context: &Context) -> PartialVMResult<()> { let self_module = context.resolver.self_handle_idx(); for (idx, struct_handle) in context.resolver.struct_handles().iter().enumerate() { if Some(struct_handle.module) == self_module { continue; } let owner_module_id = context .resolver .module_id_for_handle(context.resolver.module_handle_at(struct_handle.module)); // TODO: remove unwrap let owner_module = context.dependency_map.get(&owner_module_id).unwrap(); let struct_name = context.resolver.identifier_at(struct_handle.name); match context .struct_id_to_handle_map .get(&(owner_module_id, struct_name.to_owned())) { Some(def_idx) => { let def_handle = owner_module.struct_handle_at(*def_idx); if !compatible_struct_abilities(struct_handle.abilities, def_handle.abilities) || !compatible_struct_type_parameters( &struct_handle.type_parameters, &def_handle.type_parameters, ) { return Err(verification_error( StatusCode::TYPE_MISMATCH, IndexKind::StructHandle, idx as TableIndex, )); } } None => { return Err(verification_error( StatusCode::LOOKUP_FAILED, IndexKind::StructHandle, idx as TableIndex, )) } } } Ok(()) } fn verify_imported_functions(context: &Context) -> PartialVMResult<()> { let self_module = context.resolver.self_handle_idx(); for (idx, function_handle) in context.resolver.function_handles().iter().enumerate() { if Some(function_handle.module) == self_module { continue; } let owner_module_id = context .resolver .module_id_for_handle(context.resolver.module_handle_at(function_handle.module)); let function_name = context.resolver.identifier_at(function_handle.name); // TODO: remove unwrap let owner_module = context.dependency_map.get(&owner_module_id).unwrap(); match context .func_id_to_handle_map .get(&(owner_module_id.clone(), function_name.to_owned())) { Some(def_idx) => { let def_handle = owner_module.function_handle_at(*def_idx); // compatible type parameter constraints if !compatible_fun_type_parameters( &function_handle.type_parameters, &def_handle.type_parameters, ) { return Err(verification_error( StatusCode::TYPE_MISMATCH, IndexKind::FunctionHandle, idx as TableIndex, )); } // same parameters let handle_params = context.resolver.signature_at(function_handle.parameters); let def_params = match context.dependency_map.get(&owner_module_id) { Some(module) => module.signature_at(def_handle.parameters), None => { return Err(verification_error( StatusCode::LOOKUP_FAILED, IndexKind::FunctionHandle, idx as TableIndex, )) } }; compare_cross_module_signatures( context, &handle_params.0, &def_params.0, owner_module, ) .map_err(|e| e.at_index(IndexKind::FunctionHandle, idx as TableIndex))?; // same return_ let handle_return = context.resolver.signature_at(function_handle.return_); let def_return = match context.dependency_map.get(&owner_module_id) { Some(module) => module.signature_at(def_handle.return_), None => { return Err(verification_error( StatusCode::LOOKUP_FAILED, IndexKind::FunctionHandle, idx as TableIndex, )) } }; compare_cross_module_signatures( context, &handle_return.0, &def_return.0, owner_module, ) .map_err(|e| e.at_index(IndexKind::FunctionHandle, idx as TableIndex))?; } None => { return Err(verification_error( StatusCode::LOOKUP_FAILED, IndexKind::FunctionHandle, idx as TableIndex, )); } } } Ok(()) } // The local view must be a subset of (or equal to) the defined set of abilities. Conceptually, the // local view can be more constrained than the defined one. Removing abilities locally does nothing // but limit the local usage. // (Note this works because there are no negative constraints, i.e. you cannot constrain a type // parameter with the absence of an ability) fn compatible_struct_abilities( local_struct_abilities_declaration: AbilitySet, defined_struct_abilities: AbilitySet, ) -> bool { local_struct_abilities_declaration.is_subset(defined_struct_abilities) } // - The number of type parameters must be the same // - Each pair of parameters must satisfy [`compatible_type_parameter_constraints`] fn compatible_fun_type_parameters( local_type_parameters_declaration: &[AbilitySet], defined_type_parameters: &[AbilitySet], ) -> bool { local_type_parameters_declaration.len() == defined_type_parameters.len() && local_type_parameters_declaration .iter() .zip(defined_type_parameters) .all( |( local_type_parameter_constraints_declaration, defined_type_parameter_constraints, )| { compatible_type_parameter_constraints( *local_type_parameter_constraints_declaration, *defined_type_parameter_constraints, ) }, ) } // - The number of type parameters must be the same // - Each pair of parameters must satisfy [`compatible_type_parameter_constraints`] and [`compatible_type_parameter_phantom_decl`] fn compatible_struct_type_parameters( local_type_parameters_declaration: &[StructTypeParameter], defined_type_parameters: &[StructTypeParameter], ) -> bool { local_type_parameters_declaration.len() == defined_type_parameters.len() && local_type_parameters_declaration .iter() .zip(defined_type_parameters) .all( |(local_type_parameter_declaration, defined_type_parameter)| { compatible_type_parameter_phantom_decl( local_type_parameter_declaration, defined_type_parameter, ) && compatible_type_parameter_constraints( local_type_parameter_declaration.constraints, defined_type_parameter.constraints, ) }, ) } // The local view of a type parameter must be a superset of (or equal to) the defined // constraints. Conceptually, the local view can be more constrained than the defined one as the // local context is only limiting usage, and cannot take advantage of the additional constraints. fn compatible_type_parameter_constraints( local_type_parameter_constraints_declaration: AbilitySet, defined_type_parameter_constraints: AbilitySet, ) -> bool { defined_type_parameter_constraints.is_subset(local_type_parameter_constraints_declaration) } // Adding phantom declarations relaxes the requirements for clients, thus, the local view may // lack a phantom declaration present in the definition. fn compatible_type_parameter_phantom_decl( local_type_parameter_declaration: &StructTypeParameter, defined_type_parameter: &StructTypeParameter, ) -> bool { // local_type_parameter_declaration.is_phantom => defined_type_parameter.is_phantom !local_type_parameter_declaration.is_phantom || defined_type_parameter.is_phantom } fn compare_cross_module_signatures( context: &Context, handle_sig: &[SignatureToken], def_sig: &[SignatureToken], def_module: &CompiledModule, ) -> PartialVMResult<()> { if handle_sig.len() != def_sig.len() { return Err(PartialVMError::new(StatusCode::TYPE_MISMATCH)); } for (handle_type, def_type) in handle_sig.iter().zip(def_sig) { compare_types(context, handle_type, def_type, def_module)?; } Ok(()) } fn compare_types( context: &Context, handle_type: &SignatureToken, def_type: &SignatureToken, def_module: &CompiledModule, ) -> PartialVMResult<()> { match (handle_type, def_type) { (SignatureToken::Bool, SignatureToken::Bool) | (SignatureToken::U8, SignatureToken::U8) | (SignatureToken::U64, SignatureToken::U64) | (SignatureToken::U128, SignatureToken::U128) | (SignatureToken::Address, SignatureToken::Address) | (SignatureToken::Signer, SignatureToken::Signer) => Ok(()), (SignatureToken::Vector(ty1), SignatureToken::Vector(ty2)) => { compare_types(context, ty1, ty2, def_module) } (SignatureToken::Struct(idx1), SignatureToken::Struct(idx2)) => { compare_structs(context, *idx1, *idx2, def_module) } ( SignatureToken::StructInstantiation(idx1, inst1), SignatureToken::StructInstantiation(idx2, inst2), ) => { compare_structs(context, *idx1, *idx2, def_module)?; compare_cross_module_signatures(context, inst1, inst2, def_module) } (SignatureToken::Reference(ty1), SignatureToken::Reference(ty2)) | (SignatureToken::MutableReference(ty1), SignatureToken::MutableReference(ty2)) => { compare_types(context, ty1, ty2, def_module) } (SignatureToken::TypeParameter(idx1), SignatureToken::TypeParameter(idx2)) => { if idx1 != idx2 { Err(PartialVMError::new(StatusCode::TYPE_MISMATCH)) } else { Ok(()) } } _ => Err(PartialVMError::new(StatusCode::TYPE_MISMATCH)), } } fn compare_structs( context: &Context, idx1: StructHandleIndex, idx2: StructHandleIndex, def_module: &CompiledModule, ) -> PartialVMResult<()> { // grab ModuleId and struct name for the module being verified let struct_handle = context.resolver.struct_handle_at(idx1); let module_handle = context.resolver.module_handle_at(struct_handle.module); let module_id = context.resolver.module_id_for_handle(module_handle); let struct_name = context.resolver.identifier_at(struct_handle.name); // grab ModuleId and struct name for the definition let def_struct_handle = def_module.struct_handle_at(idx2); let def_module_handle = def_module.module_handle_at(def_struct_handle.module); let def_module_id = def_module.module_id_for_handle(def_module_handle); let def_struct_name = def_module.identifier_at(def_struct_handle.name); if module_id != def_module_id || struct_name != def_struct_name { Err(PartialVMError::new(StatusCode::TYPE_MISMATCH)) } else { Ok(()) } } fn verify_all_script_visibility_usage(context: &Context) -> PartialVMResult<()> { // script visibility deprecated after V5 let script_functions = match &context.script_functions { None => return Ok(()), Some(s) => s, }; debug_assert!(context.resolver.version() < VERSION_5); match &context.resolver { BinaryIndexedView::Module(m) => { for (idx, fdef) in m.function_defs().iter().enumerate() { let code = match &fdef.code { None => continue, Some(code) => &code.code, }; verify_script_visibility_usage( &context.resolver, script_functions, fdef.is_entry, FunctionDefinitionIndex(idx as TableIndex), code, )? } Ok(()) } BinaryIndexedView::Script(s) => verify_script_visibility_usage( &context.resolver, script_functions, true, FunctionDefinitionIndex(0), &s.code().code, ), } } fn verify_script_visibility_usage( resolver: &BinaryIndexedView, script_functions: &BTreeSet<FunctionHandleIndex>, current_is_entry: bool, fdef_idx: FunctionDefinitionIndex, code: &[Bytecode], ) -> PartialVMResult<()> { for (idx, instr) in code.iter().enumerate() { let idx = idx as CodeOffset; let fhandle_idx = match instr { Bytecode::Call(fhandle_idx) => fhandle_idx, Bytecode::CallGeneric(finst_idx) => { &resolver.function_instantiation_at(*finst_idx).handle } _ => continue, }; match (current_is_entry, script_functions.contains(&fhandle_idx)) { (true, true) => (), (_, true) => { return Err(PartialVMError::new( StatusCode::CALLED_SCRIPT_VISIBLE_FROM_NON_SCRIPT_VISIBLE, ) .at_code_offset(fdef_idx, idx) .with_message( "script-visible functions can only be called from scripts or other \ script-visible functions" .to_string(), )); } _ => (), } } Ok(()) }
40.277972
130
0.603021
fc23f8608393cc89fb7cf4005a38a8763b372ec2
3,972
// Copyright (c) 2018 Chef Software Inc. and/or applicable contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use reqwest::{header::HeaderMap, Body}; use builder_core::http_client::{HttpClient, ACCEPT_APPLICATION_JSON, CONTENT_TYPE_FORM_URL_ENCODED}; use crate::{config::OAuth2Cfg, error::{Error, Result}, types::*}; use async_trait::async_trait; pub struct AzureAD; #[derive(Deserialize)] struct AuthOk { pub access_token: String, } #[derive(Deserialize)] struct User { pub sub: String, pub upn: String, } impl AzureAD { async fn user(&self, config: &OAuth2Cfg, client: &HttpClient, token: &str) -> Result<OAuth2User> { let header_values = vec![ACCEPT_APPLICATION_JSON.clone(),]; let headers = header_values.into_iter().collect::<HeaderMap<_>>(); let resp = client.get(&config.userinfo_url) .headers(headers) .bearer_auth(token) .send() .await .map_err(Error::HttpClient)?; let status = resp.status(); let body = resp.text().await.map_err(Error::HttpClient)?; debug!("AzureAD response body: {}", body); if status.is_success() { let user = match serde_json::from_str::<User>(&body) { Ok(msg) => msg, Err(e) => return Err(Error::Serialization(e)), }; Ok(OAuth2User { id: user.sub, username: user.upn, email: None, }) } else { Err(Error::HttpResponse(status, body)) } } } #[async_trait] impl OAuth2Provider for AzureAD { async fn authenticate(&self, config: &OAuth2Cfg, client: &HttpClient, code: &str) -> Result<(String, OAuth2User)> { let url = config.token_url.to_string(); let body = format!("client_id={}&client_secret={}&grant_type=authorization_code&code={}&\ redirect_uri={}", config.client_id, config.client_secret, code, config.redirect_url); let header_values = vec![ACCEPT_APPLICATION_JSON.clone(), CONTENT_TYPE_FORM_URL_ENCODED.clone()]; let headers = header_values.into_iter().collect::<HeaderMap<_>>(); let body: Body = body.into(); let resp = client.post(&url) .headers(headers) .body(body) .send() .await .map_err(Error::HttpClient)?; let status = resp.status(); let body = resp.text().await.map_err(Error::HttpClient)?; debug!("AzureAD response body: {}", body); let token = if status.is_success() { match serde_json::from_str::<AuthOk>(&body) { Ok(msg) => msg.access_token, Err(e) => return Err(Error::Serialization(e)), } } else { return Err(Error::HttpResponse(status, body)); }; let user = self.user(config, client, &token).await?; Ok((token, user)) } }
33.948718
97
0.530715
feef12e97ebab4f0b44993e2a03bd39ca91c41a1
4,896
use std::sync::{ atomic::{AtomicUsize, Ordering}, Arc, }; use robespierre_cache::{Cache, CacheConfig, CommitToCache, HasCache}; use robespierre_events::{Authentication, Connection, RawEventHandler}; use robespierre_http::{Http, HttpAuthentication}; use robespierre_models::{autumn::AutumnTag, channel::ReplyData}; #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { tracing_subscriber::fmt::init(); let token = std::env::var("TOKEN") .expect("Cannot get token; set environment variable TOKEN=... and run again"); let http = Http::new(HttpAuthentication::BotToken { token: &token }).await?; let connection = Connection::connect(Authentication::Bot { token: &token }).await?; let cache = Cache::new(CacheConfig::default()); let context = Context(Arc::new(http), cache, Arc::new(AtomicUsize::new(0))); let handler = Handler; connection.run(context, handler).await?; Ok(()) } #[derive(Clone)] struct Context(Arc<Http>, Arc<Cache>, Arc<AtomicUsize>); impl HasCache for Context { fn get_cache(&self) -> Option<&Cache> { Some(&self.1) } } impl robespierre_events::Context for Context { fn set_messanger(self, _messanger: robespierre_events::ConnectionMessanger) -> Self { // ignore for now self } } #[derive(Copy, Clone)] struct Handler; #[async_trait::async_trait] impl RawEventHandler for Handler { type Context = Context; async fn handle( self, ctx: Self::Context, event: robespierre_models::events::ServerToClientEvent, ) { event.commit_to_cache_ref(&ctx).await; match event { robespierre_models::events::ServerToClientEvent::Message { message } => { if message.content == "Hello" { let author = ctx.0.fetch_user(message.author).await.unwrap(); let channel = ctx.0.fetch_channel(message.channel).await.unwrap(); let server = if let Some(server) = channel.server_id() { Some(ctx.0.fetch_server(server).await.unwrap()) } else { None }; // let _session = message.channel.start_typing(ctx); // tokio::time::sleep(std::time::Duration::from_secs(10)).await; let att_id = ctx .0 .upload_autumn( AutumnTag::Attachments, "help".to_string(), "help me".to_string().into_bytes(), ) .await .unwrap(); let _ = ctx .0 .send_message( message.channel, format!( "Hello <@{}> from <#{}>{}", author.id, channel.id(), server .map_or_else(Default::default, |it| format!(" in {}", it.name)) ), rusty_ulid::generate_ulid_string(), vec![att_id], vec![ReplyData { id: message.id, mention: true, }], ) .await; } // framework commands if message.content == "!ping" || message.content == "!pong" { let num = ctx.2.fetch_add(1, Ordering::Relaxed); let _ = ctx .0 .send_message( message.channel, "Who pinged me?!", rusty_ulid::generate_ulid_string(), vec![], vec![ReplyData { id: message.id, mention: false, }], ) .await; let _ = ctx .0 .send_message( message.channel, format!("I got {} pings since I came online", num), rusty_ulid::generate_ulid_string(), vec![], vec![ReplyData { id: message.id, mention: false, }], ) .await; } } _ => {} } } }
34.237762
99
0.426675
b9b23e44bfaa91ee009b6329aff076be0e495049
519
use std::io; use std::io::Read; use regex::Regex; fn main() { let mut input = String::new(); io::stdin().read_to_string(&mut input).unwrap(); let re = Regex::new(r"(?m)^(\d+)x(\d+)x(\d+)$").unwrap(); println!("{}", re.captures_iter(&input).map(|x| { let (l, w, h): (u32, u32, u32) = (x[1].parse().unwrap(), x[2].parse().unwrap(), x[3].parse().unwrap()); let sides = vec![l * w, w * h, l * h]; 2 * sides.iter().sum::<u32>() + sides.iter().min().unwrap() }).sum::<u32>()); }
32.4375
111
0.504817
1d8ca5c34edf133a322de29d360d88cd64e5ad40
487
use std::{iter, str::from_utf8}; use strip_ansi_escapes::strip; pub fn ansi_len(s: &str) -> usize { from_utf8(&strip(s.as_bytes()).unwrap()) .unwrap() .chars() .count() } pub fn pad_to_size(s: &str, rows: usize, columns: usize) -> String { s.lines() .map(|l| [l, &str::repeat(" ", columns - ansi_len(l))].concat()) .chain(iter::repeat(str::repeat(" ", columns))) .take(rows) .collect::<Vec<_>>() .join("\n\r") }
24.35
72
0.533881
f55c3d6c765c9b061798276859961977b9fc1fc2
406
//TODO 5、调用不安全的函数或者方法 unsafe fn dangerous() { println!("do something dangerous"); } fn foo() { let mut num = 5; let r1 = &num as *const i32; let r2 = &mut num as *mut i32; unsafe { println!("*r1 = {}", *r1); println!("*r2 = {}", *r2); } } fn main() { unsafe { dangerous(); } //dangerous(); //error foo(); println!("Hello, world!"); }
15.615385
39
0.482759
03a64edffc88bfcca9a3c9a8abb9f85985c4455e
3,959
pub mod vcx; pub mod connection; pub mod issuer_credential; pub mod utils; pub mod proof; pub mod credential_def; pub mod schema; pub mod credential; pub mod disclosed_proof; pub mod wallet; pub mod logger; pub mod return_types_u32; use std::fmt; /// This macro allows the VcxStateType to be /// serialized within serde as an integer (represented as /// a string, because its still JSON). macro_rules! enum_number { ($name:ident { $($variant:ident = $value:expr, )* }) => { #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum $name { $($variant = $value,)* } impl ::serde::Serialize for $name { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ::serde::Serializer { // Serialize the enum as a u64. serializer.serialize_u64(*self as u64) } } impl<'de> ::serde::Deserialize<'de> for $name { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: ::serde::Deserializer<'de> { struct Visitor; impl<'de> ::serde::de::Visitor<'de> for Visitor { type Value = $name; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("positive integer") } fn visit_u64<E>(self, value: u64) -> Result<$name, E> where E: ::serde::de::Error { // Rust does not come with a simple way of converting a // number to an enum, so use a big `match`. match value { $( $value => Ok($name::$variant), )* _ => Err(E::custom( format!("unknown {} value: {}", stringify!($name), value))), } } } // Deserialize the enum from a u64. deserializer.deserialize_u64(Visitor) } } } } enum_number!(VcxStateType { VcxStateNone = 0, VcxStateInitialized = 1, VcxStateOfferSent = 2, VcxStateRequestReceived = 3, VcxStateAccepted = 4, VcxStateUnfulfilled = 5, VcxStateExpired = 6, VcxStateRevoked = 7, VcxStateRedirected = 8, VcxStateRejected = 9, }); impl VcxStateType { pub fn from_u32(state: u32) -> VcxStateType { match state { 0 => VcxStateType::VcxStateNone, 1 => VcxStateType::VcxStateInitialized, 2 => VcxStateType::VcxStateOfferSent, 3 => VcxStateType::VcxStateRequestReceived, 4 => VcxStateType::VcxStateAccepted, 5 => VcxStateType::VcxStateUnfulfilled, 6 => VcxStateType::VcxStateExpired, 7 => VcxStateType::VcxStateRevoked, _ => VcxStateType::VcxStateNone, } } } // undefined is correlated with VcxStateNon -> Haven't received Proof // Validated is both validated by indy-sdk and by comparing proof-request // Invalid is that it failed one or both of validation processes enum_number!(ProofStateType { ProofUndefined = 0, ProofValidated = 1, ProofInvalid = 2, }); enum_number!(PublicEntityStateType { Built = 0, Published = 1, }); impl Default for PublicEntityStateType{ fn default() -> Self { PublicEntityStateType::Published } } #[repr(C)] pub struct VcxStatus { pub handle: libc::c_int, pub status: libc::c_int, pub msg: *mut libc::c_char, } #[cfg(test)] mod tests { use super::*; use serde_json; use self::VcxStateType::*; #[test] fn test_serialize_vcx_state_type(){ let z = VcxStateNone; let y = serde_json::to_string(&z).unwrap(); assert_eq!(y,"0"); } }
27.880282
88
0.544077
39ab6cfb1b9c88a2e3ecb7a2d93cd29568e004e4
181
use catalysa_parser::tokenizer::Tokenizer; fn main() { let mut tokenizer = Tokenizer { code: "1 + 2;".to_string() }; println!("{:?}", tokenizer.tokenize()); }
18.1
43
0.58011
f7b9c3c9e4901bb44a4bf47a0be13ba47d63a932
8,444
//! Provides access to the symbolserver config use std::env; use std::fs; use std::path::{Path, PathBuf}; use std::borrow::Cow; use std::io::BufReader; use num_cpus; use serde_yaml; use url::Url; use rusoto::Region; use chrono::Duration; use log::LogLevelFilter; use super::{Result, ResultExt, ErrorKind}; use super::utils::{is_docker, IgnorePatterns}; #[derive(Deserialize, Debug, Default, Clone)] struct AwsConfig { access_key: Option<String>, secret_key: Option<String>, bucket_url: Option<String>, region: Option<String>, } #[derive(Deserialize, Debug, Default, Clone)] struct ServerConfig { host: Option<String>, port: Option<u16>, healthcheck_interval: Option<i64>, threads: Option<usize>, } #[derive(Deserialize, Debug, Default, Clone)] struct LogConfig { level: Option<String>, file: Option<PathBuf>, } #[derive(Deserialize, Debug, Default, Clone)] struct SyncConfig { #[serde(default)] ignore: IgnorePatterns, interval: Option<i64>, } /// Central config object that exposes the information from /// the symbolserver yaml config. #[derive(Deserialize, Debug, Default, Clone)] pub struct Config { #[serde(default)] aws: AwsConfig, #[serde(default)] server: ServerConfig, #[serde(default)] log: LogConfig, symbol_dir: Option<PathBuf>, #[serde(default)] sync: SyncConfig, } impl Config { /// Loads a config from a given file pub fn load_file<P: AsRef<Path>>(path: P) -> Result<Config> { let f = fs::File::open(path)?; serde_yaml::from_reader(BufReader::new(f)).map_err(|err| { ErrorKind::ConfigError(err).into() }) } /// Loads a config from the default location pub fn load_default() -> Result<Config> { let mut home = match env::home_dir() { Some(home) => home, None => { return Ok(Default::default()) }, }; home.push(".sentry-symbolserver.yml"); Ok(if let Ok(_) = fs::metadata(&home) { Config::load_file(&home)? } else { Default::default() }) } /// Return the AWS access key pub fn get_aws_access_key<'a>(&'a self) -> Option<&str> { self.aws.access_key.as_ref().map(|x| &**x) } /// Return the AWS secret key pub fn get_aws_secret_key<'a>(&'a self) -> Option<&str> { self.aws.secret_key.as_ref().map(|x| &**x) } /// Return the AWS S3 bucket URL pub fn get_aws_bucket_url<'a>(&'a self) -> Result<Url> { let url = if let Some(ref value) = self.aws.bucket_url { Url::parse(value)? } else if let Ok(value) = env::var("SYMBOLSERVER_BUCKET_URL") { Url::parse(&value)? } else { return Err(ErrorKind::MissingConfigKey( "aws.bucket_url").into()); }; if url.scheme() != "s3" { return Err(ErrorKind::BadConfigKey( "aws.bucket_url", "The scheme for the bucket URL needs to be s3").into()); } else if url.host_str().is_none() { return Err(ErrorKind::BadConfigKey( "aws.bucket_url", "The bucket URL is missing a name").into()); } Ok(url) } /// Overrides the AWS bucket URL. pub fn set_aws_bucket_url(&mut self, value: &str) { self.aws.bucket_url = Some(value.to_string()); } /// Return the AWS region pub fn get_aws_region(&self) -> Result<Region> { let region_opt = self.aws.region .as_ref() .map(|x| x.to_string()) .or_else(|| env::var("AWS_DEFAULT_REGION").ok()); if let Some(region) = region_opt { if let Ok(rv) = region.parse() { Ok(rv) } else { Err(ErrorKind::BadConfigKey( "aws.region", "An unknown AWS region was provided").into()) } } else { Ok(Region::UsEast1) } } /// Overrides the AWS region pub fn set_aws_region(&mut self, value: Region) { self.aws.region = Some(value.to_string()); } /// Return the path where symbols are stored. pub fn get_symbol_dir<'a>(&'a self) -> Result<Cow<'a, Path>> { if let Some(ref path) = self.symbol_dir { Ok(Cow::Borrowed(path.as_path())) } else if let Ok(dir) = env::var("SYMBOLSERVER_SYMBOL_DIR") { Ok(Cow::Owned(PathBuf::from(dir))) } else { Err(ErrorKind::MissingConfigKey("symbol_dir").into()) } } /// Override the symbol dir. pub fn set_symbol_dir<P: AsRef<Path>>(&mut self, value: P) { self.symbol_dir = Some(value.as_ref().to_path_buf()); } fn get_server_host(&self) -> Result<String> { if let Some(ref host) = self.server.host { Ok(host.clone()) } else if let Ok(var) = env::var("IP") { Ok(var) } else if is_docker() { Ok("0.0.0.0".into()) } else { Ok("127.0.0.1".into()) } } fn get_server_port(&self) -> Result<u16> { if let Some(port) = self.server.port { Ok(port) } else if let Ok(portstr) = env::var("PORT") { Ok(portstr.parse().chain_err(|| "Invalid value for port")?) } else { Ok(3000) } } /// Return the bind target for the http server pub fn get_server_socket_addr(&self) -> Result<(String, u16)> { Ok((self.get_server_host()?, self.get_server_port()?)) } /// Return the server healthcheck interval pub fn get_server_healthcheck_interval(&self) -> Result<Duration> { let ttl = if let Some(ttl) = self.server.healthcheck_interval { ttl } else if let Ok(ttlstr) = env::var("SYMBOLSERVER_HEALTHCHECK_INTERVAL") { ttlstr.parse().chain_err(|| "Invalid value for healthcheck interval")? } else { return Ok(Duration::seconds(30)); }; if ttl < 0 { return Err(ErrorKind::BadConfigKey( "server.healthcheck_interval", "Healthcheck interval has to be positive").into()); } Ok(Duration::seconds(ttl)) } /// Return the server sync interval pub fn get_server_sync_interval(&self) -> Result<Duration> { let interval = if let Some(interval) = self.sync.interval { interval } else if let Ok(intervalstr) = env::var("SYMBOLSERVER_SYNC_INTERVAL") { intervalstr.parse().chain_err(|| "Invalid value for sync interval")? } else { return Ok(Duration::minutes(1)); }; if interval < 0 { return Err(ErrorKind::BadConfigKey( "sync.interval", "Sync interval has to be positive").into()); } Ok(Duration::seconds(interval)) } /// Return the number of threads to listen on pub fn get_server_threads(&self) -> Result<usize> { if let Some(threads) = self.server.threads { Ok(threads) } else if let Ok(threadstr) = env::var("SYMBOLSERVER_THREADS") { Ok(threadstr.parse().chain_err(|| "Invalid value for thread count")?) } else { Ok(num_cpus::get() * 5 / 4) } } /// Return the log level filter pub fn get_log_level_filter(&self) -> Result<LogLevelFilter> { let level_opt = self.log.level .as_ref() .map(|x| x.to_string()) .or_else(|| env::var("SYMBOLSERVER_LOG_LEVEL").ok()); if let Some(lvl) = level_opt { lvl.parse().map_err(|_| ErrorKind::BadConfigKey( "log.level", "unknown log level").into()) } else { Ok(LogLevelFilter::Info) } } /// Override the log level filter in the config pub fn set_log_level_filter(&mut self, value: LogLevelFilter) { self.log.level = Some(value.to_string()); } /// Return the log filename pub fn get_log_filename<'a>(&'a self) -> Result<Option<Cow<'a, Path>>> { if let Some(ref path) = self.log.file { Ok(Some(Cow::Borrowed(&*path))) } else if let Ok(path) = env::var("SYMBOLSERVER_LOG_FILE") { Ok(Some(Cow::Owned(PathBuf::from(path)))) } else { Ok(None) } } /// Return the sync ignore patterns pub fn get_ignore_patterns(&self) -> Result<&IgnorePatterns> { Ok(&self.sync.ignore) } }
31.390335
90
0.568451
878c60d74d22ad4dc6ef97cb82ac253b3946088f
3,716
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{block::Block, common::Payload}; use anyhow::ensure; use libra_crypto::hash::HashValue; use libra_types::validator_verifier::ValidatorVerifier; use serde::{Deserialize, Serialize}; use std::fmt; /// RPC to get a chain of block of the given length starting from the given block id. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct BlockRetrievalRequest { block_id: HashValue, num_blocks: u64, } impl BlockRetrievalRequest { pub fn new(block_id: HashValue, num_blocks: u64) -> Self { Self { block_id, num_blocks, } } pub fn block_id(&self) -> HashValue { self.block_id } pub fn num_blocks(&self) -> u64 { self.num_blocks } } impl fmt::Display for BlockRetrievalRequest { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "[BlockRetrievalRequest starting from id {} with {} blocks]", self.block_id, self.num_blocks ) } } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub enum BlockRetrievalStatus { // Successfully fill in the request. Succeeded, // Can not find the block corresponding to block_id. IdNotFound, // Can not find enough blocks but find some. NotEnoughBlocks, } /// Carries the returned blocks and the retrieval status. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] pub struct BlockRetrievalResponse<T> { status: BlockRetrievalStatus, #[serde(bound(deserialize = "Block<T>: Deserialize<'de>"))] blocks: Vec<Block<T>>, } impl<T: Payload> BlockRetrievalResponse<T> { pub fn new(status: BlockRetrievalStatus, blocks: Vec<Block<T>>) -> Self { Self { status, blocks } } pub fn status(&self) -> BlockRetrievalStatus { self.status.clone() } pub fn blocks(&self) -> &Vec<Block<T>> { &self.blocks } pub fn verify( &self, block_id: HashValue, num_blocks: u64, sig_verifier: &ValidatorVerifier, ) -> anyhow::Result<()> { ensure!( self.status != BlockRetrievalStatus::Succeeded || self.blocks.len() as u64 == num_blocks, "not enough blocks returned, expect {}, get {}", num_blocks, self.blocks.len(), ); self.blocks .iter() .try_fold(block_id, |expected_id, block| { block.validate_signatures(sig_verifier)?; block.verify_well_formed()?; ensure!( block.id() == expected_id, "blocks doesn't form a chain: expect {}, get {}", expected_id, block.id() ); Ok(block.parent_id()) }) .map(|_| ()) } } impl<T: Payload> fmt::Display for BlockRetrievalResponse<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.status() { BlockRetrievalStatus::Succeeded => { let block_ids = self .blocks .iter() .map(|b| b.id().short_str()) .collect::<Vec<String>>(); write!( f, "[BlockRetrievalResponse: status: {:?}, num_blocks: {}, block_ids: {:?}]", self.status(), self.blocks().len(), block_ids ) } _ => write!(f, "[BlockRetrievalResponse: status: {:?}", self.status()), } } }
29.728
94
0.547632
69092c1af55d46ada6a7e1310dad5fb02ea403b8
50,972
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use crate::{ buffer::MutableBuffer, datatypes::DataType, error::{ArrowError, Result}, util::bit_util, }; use std::mem; use super::{ data::{into_buffers, new_buffers}, ArrayData, ArrayDataBuilder, }; use crate::array::StringOffsetSizeTrait; mod boolean; mod fixed_binary; mod list; mod null; mod primitive; mod structure; mod utils; mod variable_size; type ExtendNullBits<'a> = Box<dyn Fn(&mut _MutableArrayData, usize, usize) + 'a>; // function that extends `[start..start+len]` to the mutable array. // this is dynamic because different data_types influence how buffers and children are extended. type Extend<'a> = Box<dyn Fn(&mut _MutableArrayData, usize, usize, usize) + 'a>; type ExtendNulls = Box<dyn Fn(&mut _MutableArrayData, usize)>; /// A mutable [ArrayData] that knows how to freeze itself into an [ArrayData]. /// This is just a data container. #[derive(Debug)] struct _MutableArrayData<'a> { pub data_type: DataType, pub null_count: usize, pub len: usize, pub null_buffer: MutableBuffer, // arrow specification only allows up to 3 buffers (2 ignoring the nulls above). // Thus, we place them in the stack to avoid bound checks and greater data locality. pub buffer1: MutableBuffer, pub buffer2: MutableBuffer, pub child_data: Vec<MutableArrayData<'a>>, } impl<'a> _MutableArrayData<'a> { fn freeze(self, dictionary: Option<ArrayData>) -> ArrayDataBuilder { let buffers = into_buffers(&self.data_type, self.buffer1, self.buffer2); let child_data = match self.data_type { DataType::Dictionary(_, _) => vec![dictionary.unwrap()], _ => { let mut child_data = Vec::with_capacity(self.child_data.len()); for child in self.child_data { child_data.push(child.freeze()); } child_data } }; let mut array_data_builder = ArrayDataBuilder::new(self.data_type) .offset(0) .len(self.len) .null_count(self.null_count) .buffers(buffers) .child_data(child_data); if self.null_count > 0 { array_data_builder = array_data_builder.null_bit_buffer(self.null_buffer.into()); } array_data_builder } } fn build_extend_null_bits(array: &ArrayData, use_nulls: bool) -> ExtendNullBits { if let Some(bitmap) = array.null_bitmap() { let bytes = bitmap.bits.as_slice(); Box::new(move |mutable, start, len| { utils::resize_for_bits(&mut mutable.null_buffer, mutable.len + len); mutable.null_count += utils::set_bits( mutable.null_buffer.as_slice_mut(), bytes, mutable.len, array.offset() + start, len, ); }) } else if use_nulls { Box::new(|mutable, _, len| { utils::resize_for_bits(&mut mutable.null_buffer, mutable.len + len); let write_data = mutable.null_buffer.as_slice_mut(); let offset = mutable.len; (0..len).for_each(|i| { bit_util::set_bit(write_data, offset + i); }); }) } else { Box::new(|_, _, _| {}) } } /// Struct to efficiently and interactively create an [ArrayData] from an existing [ArrayData] by /// copying chunks. /// The main use case of this struct is to perform unary operations to arrays of arbitrary types, such as `filter` and `take`. /// # Example: /// /// ``` /// use arrow::{array::{Int32Array, Array, MutableArrayData}}; /// /// let array = Int32Array::from(vec![1, 2, 3, 4, 5]); /// let array = array.data(); /// // Create a new `MutableArrayData` from an array and with a capacity of 4. /// // Capacity here is equivalent to `Vec::with_capacity` /// let arrays = vec![array]; /// let mut mutable = MutableArrayData::new(arrays, false, 4); /// mutable.extend(0, 1, 3); // extend from the slice [1..3], [2,3] /// mutable.extend(0, 0, 3); // extend from the slice [0..3], [1,2,3] /// // `.freeze()` to convert `MutableArrayData` into a `ArrayData`. /// let new_array = Int32Array::from(mutable.freeze()); /// assert_eq!(Int32Array::from(vec![2, 3, 1, 2, 3]), new_array); /// ``` pub struct MutableArrayData<'a> { arrays: Vec<&'a ArrayData>, // The attributes in [_MutableArrayData] cannot be in [MutableArrayData] due to // mutability invariants (interior mutability): // [MutableArrayData] contains a function that can only mutate [_MutableArrayData], not // [MutableArrayData] itself data: _MutableArrayData<'a>, // the child data of the `Array` in Dictionary arrays. // This is not stored in `MutableArrayData` because these values constant and only needed // at the end, when freezing [_MutableArrayData]. dictionary: Option<ArrayData>, // function used to extend values from arrays. This function's lifetime is bound to the array // because it reads values from it. extend_values: Vec<Extend<'a>>, // function used to extend nulls from arrays. This function's lifetime is bound to the array // because it reads nulls from it. extend_null_bits: Vec<ExtendNullBits<'a>>, // function used to extend nulls. // this is independent of the arrays and therefore has no lifetime. extend_nulls: ExtendNulls, } impl<'a> std::fmt::Debug for MutableArrayData<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { // ignores the closures. f.debug_struct("MutableArrayData") .field("data", &self.data) .finish() } } /// Builds an extend that adds `offset` to the source primitive /// Additionally validates that `max` fits into the /// the underlying primitive returning None if not fn build_extend_dictionary( array: &ArrayData, offset: usize, max: usize, ) -> Option<Extend> { use crate::datatypes::*; use std::convert::TryInto; match array.data_type() { DataType::Dictionary(child_data_type, _) => match child_data_type.as_ref() { DataType::UInt8 => { let _: u8 = max.try_into().ok()?; let offset: u8 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::UInt16 => { let _: u16 = max.try_into().ok()?; let offset: u16 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::UInt32 => { let _: u32 = max.try_into().ok()?; let offset: u32 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::UInt64 => { let _: u64 = max.try_into().ok()?; let offset: u64 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::Int8 => { let _: i8 = max.try_into().ok()?; let offset: i8 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::Int16 => { let _: i16 = max.try_into().ok()?; let offset: i16 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::Int32 => { let _: i32 = max.try_into().ok()?; let offset: i32 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } DataType::Int64 => { let _: i64 = max.try_into().ok()?; let offset: i64 = offset.try_into().ok()?; Some(primitive::build_extend_with_offset(array, offset)) } _ => unreachable!(), }, _ => None, } } fn build_extend(array: &ArrayData) -> Extend { use crate::datatypes::*; match array.data_type() { DataType::Null => null::build_extend(array), DataType::Boolean => boolean::build_extend(array), DataType::UInt8 => primitive::build_extend::<u8>(array), DataType::UInt16 => primitive::build_extend::<u16>(array), DataType::UInt32 => primitive::build_extend::<u32>(array), DataType::UInt64 => primitive::build_extend::<u64>(array), DataType::Int8 => primitive::build_extend::<i8>(array), DataType::Int16 => primitive::build_extend::<i16>(array), DataType::Int32 => primitive::build_extend::<i32>(array), DataType::Int64 => primitive::build_extend::<i64>(array), DataType::Float32 => primitive::build_extend::<f32>(array), DataType::Float64 => primitive::build_extend::<f64>(array), DataType::Date32 | DataType::Time32(_) | DataType::Interval(IntervalUnit::YearMonth) => { primitive::build_extend::<i32>(array) } DataType::Date64 | DataType::Time64(_) | DataType::Timestamp(_, _) | DataType::Duration(_) | DataType::Interval(IntervalUnit::DayTime) => { primitive::build_extend::<i64>(array) } DataType::Utf8 | DataType::Binary => variable_size::build_extend::<i32>(array), DataType::LargeUtf8 | DataType::LargeBinary => { variable_size::build_extend::<i64>(array) } DataType::List(_) => list::build_extend::<i32>(array), DataType::LargeList(_) => list::build_extend::<i64>(array), DataType::Dictionary(_, _) => unreachable!("should use build_extend_dictionary"), DataType::Struct(_) => structure::build_extend(array), DataType::FixedSizeBinary(_) => fixed_binary::build_extend(array), DataType::Float16 => unreachable!(), /* DataType::FixedSizeList(_, _) => {} DataType::Union(_) => {} */ _ => todo!("Take and filter operations still not supported for this datatype"), } } fn build_extend_nulls(data_type: &DataType) -> ExtendNulls { use crate::datatypes::*; Box::new(match data_type { DataType::Null => null::extend_nulls, DataType::Boolean => boolean::extend_nulls, DataType::UInt8 => primitive::extend_nulls::<u8>, DataType::UInt16 => primitive::extend_nulls::<u16>, DataType::UInt32 => primitive::extend_nulls::<u32>, DataType::UInt64 => primitive::extend_nulls::<u64>, DataType::Int8 => primitive::extend_nulls::<i8>, DataType::Int16 => primitive::extend_nulls::<i16>, DataType::Int32 => primitive::extend_nulls::<i32>, DataType::Int64 => primitive::extend_nulls::<i64>, DataType::Float32 => primitive::extend_nulls::<f32>, DataType::Float64 => primitive::extend_nulls::<f64>, DataType::Date32 | DataType::Time32(_) | DataType::Interval(IntervalUnit::YearMonth) => primitive::extend_nulls::<i32>, DataType::Date64 | DataType::Time64(_) | DataType::Timestamp(_, _) | DataType::Duration(_) | DataType::Interval(IntervalUnit::DayTime) => primitive::extend_nulls::<i64>, DataType::Utf8 | DataType::Binary => variable_size::extend_nulls::<i32>, DataType::LargeUtf8 | DataType::LargeBinary => variable_size::extend_nulls::<i64>, DataType::List(_) => list::extend_nulls::<i32>, DataType::LargeList(_) => list::extend_nulls::<i64>, DataType::Dictionary(child_data_type, _) => match child_data_type.as_ref() { DataType::UInt8 => primitive::extend_nulls::<u8>, DataType::UInt16 => primitive::extend_nulls::<u16>, DataType::UInt32 => primitive::extend_nulls::<u32>, DataType::UInt64 => primitive::extend_nulls::<u64>, DataType::Int8 => primitive::extend_nulls::<i8>, DataType::Int16 => primitive::extend_nulls::<i16>, DataType::Int32 => primitive::extend_nulls::<i32>, DataType::Int64 => primitive::extend_nulls::<i64>, _ => unreachable!(), }, DataType::Struct(_) => structure::extend_nulls, DataType::FixedSizeBinary(_) => fixed_binary::extend_nulls, DataType::Float16 => unreachable!(), /* DataType::FixedSizeList(_, _) => {} DataType::Union(_) => {} */ _ => todo!("Take and filter operations still not supported for this datatype"), }) } fn preallocate_offset_and_binary_buffer<Offset: StringOffsetSizeTrait>( capacity: usize, binary_size: usize, ) -> [MutableBuffer; 2] { // offsets let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<Offset>()); // safety: `unsafe` code assumes that this buffer is initialized with one element if Offset::is_large() { buffer.push(0i64); } else { buffer.push(0i32) } [ buffer, MutableBuffer::new(binary_size * mem::size_of::<u8>()), ] } /// Define capacities of child data or data buffers. #[derive(Debug, Clone)] pub enum Capacities { /// Binary, Utf8 and LargeUtf8 data types /// Define /// * the capacity of the array offsets /// * the capacity of the binary/ str buffer Binary(usize, Option<usize>), /// List and LargeList data types /// Define /// * the capacity of the array offsets /// * the capacity of the child data List(usize, Option<Box<Capacities>>), /// Struct type /// * the capacity of the array /// * the capacities of the fields Struct(usize, Option<Vec<Capacities>>), /// Dictionary type /// * the capacity of the array/keys /// * the capacity of the values Dictionary(usize, Option<Box<Capacities>>), /// Don't preallocate inner buffers and rely on array growth strategy Array(usize), } impl<'a> MutableArrayData<'a> { /// returns a new [MutableArrayData] with capacity to `capacity` slots and specialized to create an /// [ArrayData] from multiple `arrays`. /// /// `use_nulls` is a flag used to optimize insertions. It should be `false` if the only source of nulls /// are the arrays themselves and `true` if the user plans to call [MutableArrayData::extend_nulls]. /// In other words, if `use_nulls` is `false`, calling [MutableArrayData::extend_nulls] should not be used. pub fn new(arrays: Vec<&'a ArrayData>, use_nulls: bool, capacity: usize) -> Self { Self::with_capacities(arrays, use_nulls, Capacities::Array(capacity)) } /// Similar to [MutableArray::new], but lets users define the preallocated capacities of the array. /// See also [MutableArray::new] for more information on the arguments. /// /// # Panic /// This function panics if the given `capacities` don't match the data type of `arrays`. Or when /// a [Capacities] variant is not yet supported. pub fn with_capacities( arrays: Vec<&'a ArrayData>, mut use_nulls: bool, capacities: Capacities, ) -> Self { let data_type = arrays[0].data_type(); use crate::datatypes::*; // if any of the arrays has nulls, insertions from any array requires setting bits // as there is at least one array with nulls. if arrays.iter().any(|array| array.null_count() > 0) { use_nulls = true; }; let mut array_capacity; let [buffer1, buffer2] = match (data_type, &capacities) { (DataType::LargeUtf8, Capacities::Binary(capacity, Some(value_cap))) | (DataType::LargeBinary, Capacities::Binary(capacity, Some(value_cap))) => { array_capacity = *capacity; preallocate_offset_and_binary_buffer::<i64>(*capacity, *value_cap) } (DataType::Utf8, Capacities::Binary(capacity, Some(value_cap))) | (DataType::Binary, Capacities::Binary(capacity, Some(value_cap))) => { array_capacity = *capacity; preallocate_offset_and_binary_buffer::<i32>(*capacity, *value_cap) } (_, Capacities::Array(capacity)) => { array_capacity = *capacity; new_buffers(data_type, *capacity) } _ => panic!("Capacities: {:?} not yet supported", capacities), }; let child_data = match &data_type { DataType::Null | DataType::Boolean | DataType::UInt8 | DataType::UInt16 | DataType::UInt32 | DataType::UInt64 | DataType::Int8 | DataType::Int16 | DataType::Int32 | DataType::Int64 | DataType::Float32 | DataType::Float64 | DataType::Date32 | DataType::Date64 | DataType::Time32(_) | DataType::Time64(_) | DataType::Duration(_) | DataType::Timestamp(_, _) | DataType::Utf8 | DataType::Binary | DataType::LargeUtf8 | DataType::LargeBinary | DataType::Interval(_) | DataType::FixedSizeBinary(_) => vec![], DataType::List(_) | DataType::LargeList(_) => { let childs = arrays .iter() .map(|array| &array.child_data()[0]) .collect::<Vec<_>>(); let capacities = if let Capacities::List(capacity, ref child_capacities) = capacities { array_capacity = capacity; child_capacities .clone() .map(|c| *c) .unwrap_or(Capacities::Array(array_capacity)) } else { Capacities::Array(array_capacity) }; vec![MutableArrayData::with_capacities( childs, use_nulls, capacities, )] } // the dictionary type just appends keys and clones the values. DataType::Dictionary(_, _) => vec![], DataType::Float16 => unreachable!(), DataType::Struct(fields) => match capacities { Capacities::Struct(capacity, Some(ref child_capacities)) => { array_capacity = capacity; (0..fields.len()) .zip(child_capacities) .map(|(i, child_cap)| { let child_arrays = arrays .iter() .map(|array| &array.child_data()[i]) .collect::<Vec<_>>(); MutableArrayData::with_capacities( child_arrays, use_nulls, child_cap.clone(), ) }) .collect::<Vec<_>>() } Capacities::Struct(capacity, None) => { array_capacity = capacity; (0..fields.len()) .map(|i| { let child_arrays = arrays .iter() .map(|array| &array.child_data()[i]) .collect::<Vec<_>>(); MutableArrayData::new(child_arrays, use_nulls, capacity) }) .collect::<Vec<_>>() } _ => (0..fields.len()) .map(|i| { let child_arrays = arrays .iter() .map(|array| &array.child_data()[i]) .collect::<Vec<_>>(); MutableArrayData::new(child_arrays, use_nulls, array_capacity) }) .collect::<Vec<_>>(), }, _ => { todo!("Take and filter operations still not supported for this datatype") } }; let dictionary = match &data_type { DataType::Dictionary(_, _) => match arrays.len() { 0 => unreachable!(), 1 => Some(arrays[0].child_data()[0].clone()), _ => { if let Capacities::Dictionary(_, _) = capacities { panic!("dictionary capacity not yet supported") } // Concat dictionaries together let dictionaries: Vec<_> = arrays.iter().map(|array| &array.child_data()[0]).collect(); let lengths: Vec<_> = dictionaries .iter() .map(|dictionary| dictionary.len()) .collect(); let capacity = lengths.iter().sum(); let mut mutable = MutableArrayData::new(dictionaries, false, capacity); for (i, len) in lengths.iter().enumerate() { mutable.extend(i, 0, *len) } Some(mutable.freeze()) } }, _ => None, }; let extend_nulls = build_extend_nulls(data_type); let extend_null_bits = arrays .iter() .map(|array| build_extend_null_bits(array, use_nulls)) .collect(); let null_buffer = if use_nulls { let null_bytes = bit_util::ceil(array_capacity, 8); MutableBuffer::from_len_zeroed(null_bytes) } else { // create 0 capacity mutable buffer with the intention that it won't be used MutableBuffer::with_capacity(0) }; let extend_values = match &data_type { DataType::Dictionary(_, _) => { let mut next_offset = 0; let extend_values: Result<Vec<_>> = arrays .iter() .map(|array| { let offset = next_offset; next_offset += array.child_data()[0].len(); build_extend_dictionary(array, offset, next_offset) .ok_or(ArrowError::DictionaryKeyOverflowError) }) .collect(); extend_values.expect("MutableArrayData::new is infallible") } _ => arrays.iter().map(|array| build_extend(array)).collect(), }; let data = _MutableArrayData { data_type: data_type.clone(), len: 0, null_count: 0, null_buffer, buffer1, buffer2, child_data, }; Self { arrays, data, dictionary, extend_values, extend_null_bits, extend_nulls, } } /// Extends this [MutableArrayData] with elements from the bounded [ArrayData] at `start` /// and for a size of `len`. /// # Panic /// This function panics if the range is out of bounds, i.e. if `start + len >= array.len()`. pub fn extend(&mut self, index: usize, start: usize, end: usize) { let len = end - start; (self.extend_null_bits[index])(&mut self.data, start, len); (self.extend_values[index])(&mut self.data, index, start, len); self.data.len += len; } /// Extends this [MutableArrayData] with null elements, disregarding the bound arrays pub fn extend_nulls(&mut self, len: usize) { // TODO: null_buffer should probably be extended here as well // otherwise is_valid() could later panic // add test to confirm self.data.null_count += len; (self.extend_nulls)(&mut self.data, len); self.data.len += len; } /// Returns the current length #[inline] pub fn len(&self) -> usize { self.data.len } /// Returns true if len is 0 #[inline] pub fn is_empty(&self) -> bool { self.data.len == 0 } /// Returns the current null count #[inline] pub fn null_count(&self) -> usize { self.data.null_count } /// Creates a [ArrayData] from the pushed regions up to this point, consuming `self`. pub fn freeze(self) -> ArrayData { self.data.freeze(self.dictionary).build() } /// Creates a [ArrayDataBuilder] from the pushed regions up to this point, consuming `self`. /// This is useful for extending the default behavior of MutableArrayData. pub fn into_builder(self) -> ArrayDataBuilder { self.data.freeze(self.dictionary) } } #[cfg(test)] mod tests { use std::{convert::TryFrom, sync::Arc}; use super::*; use crate::{ array::{ Array, ArrayData, ArrayRef, BooleanArray, DictionaryArray, FixedSizeBinaryArray, Int16Array, Int16Type, Int32Array, Int64Array, Int64Builder, ListBuilder, NullArray, PrimitiveBuilder, StringArray, StringDictionaryBuilder, StructArray, UInt8Array, }, buffer::Buffer, datatypes::Field, }; use crate::{ array::{ListArray, StringBuilder}, error::Result, }; /// tests extending from a primitive array w/ offset nor nulls #[test] fn test_primitive() { let b = UInt8Array::from(vec![Some(1), Some(2), Some(3)]); let arrays = vec![b.data()]; let mut a = MutableArrayData::new(arrays, false, 3); a.extend(0, 0, 2); let result = a.freeze(); let array = UInt8Array::from(result); let expected = UInt8Array::from(vec![Some(1), Some(2)]); assert_eq!(array, expected); } /// tests extending from a primitive array with offset w/ nulls #[test] fn test_primitive_offset() { let b = UInt8Array::from(vec![Some(1), Some(2), Some(3)]); let b = b.slice(1, 2); let arrays = vec![b.data()]; let mut a = MutableArrayData::new(arrays, false, 2); a.extend(0, 0, 2); let result = a.freeze(); let array = UInt8Array::from(result); let expected = UInt8Array::from(vec![Some(2), Some(3)]); assert_eq!(array, expected); } /// tests extending from a primitive array with offset and nulls #[test] fn test_primitive_null_offset() { let b = UInt8Array::from(vec![Some(1), None, Some(3)]); let b = b.slice(1, 2); let arrays = vec![b.data()]; let mut a = MutableArrayData::new(arrays, false, 2); a.extend(0, 0, 2); let result = a.freeze(); let array = UInt8Array::from(result); let expected = UInt8Array::from(vec![None, Some(3)]); assert_eq!(array, expected); } #[test] fn test_primitive_null_offset_nulls() { let b = UInt8Array::from(vec![Some(1), Some(2), Some(3)]); let b = b.slice(1, 2); let arrays = vec![b.data()]; let mut a = MutableArrayData::new(arrays, true, 2); a.extend(0, 0, 2); a.extend_nulls(3); a.extend(0, 1, 2); let result = a.freeze(); let array = UInt8Array::from(result); let expected = UInt8Array::from(vec![Some(2), Some(3), None, None, None, Some(3)]); assert_eq!(array, expected); } #[test] fn test_list_null_offset() -> Result<()> { let int_builder = Int64Builder::new(24); let mut builder = ListBuilder::<Int64Builder>::new(int_builder); builder.values().append_slice(&[1, 2, 3])?; builder.append(true)?; builder.values().append_slice(&[4, 5])?; builder.append(true)?; builder.values().append_slice(&[6, 7, 8])?; builder.append(true)?; let array = builder.finish(); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 0, 1); let result = mutable.freeze(); let array = ListArray::from(result); let int_builder = Int64Builder::new(24); let mut builder = ListBuilder::<Int64Builder>::new(int_builder); builder.values().append_slice(&[1, 2, 3])?; builder.append(true)?; let expected = builder.finish(); assert_eq!(array, expected); Ok(()) } /// tests extending from a variable-sized (strings and binary) array w/ offset with nulls #[test] fn test_variable_sized_nulls() { let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); let result = mutable.freeze(); let result = StringArray::from(result); let expected = StringArray::from(vec![Some("bc"), None]); assert_eq!(result, expected); } /// tests extending from a variable-sized (strings and binary) array /// with an offset and nulls #[test] fn test_variable_sized_offsets() { let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]); let array = array.slice(1, 3); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 0, 3); let result = mutable.freeze(); let result = StringArray::from(result); let expected = StringArray::from(vec![Some("bc"), None, Some("defh")]); assert_eq!(result, expected); } #[test] fn test_string_offsets() { let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]); let array = array.slice(1, 3); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 0, 3); let result = mutable.freeze(); let result = StringArray::from(result); let expected = StringArray::from(vec![Some("bc"), None, Some("defh")]); assert_eq!(result, expected); } #[test] fn test_multiple_with_nulls() { let array1 = StringArray::from(vec!["hello", "world"]); let array2 = StringArray::from(vec![Some("1"), None]); let arrays = vec![array1.data(), array2.data()]; let mut mutable = MutableArrayData::new(arrays, false, 5); mutable.extend(0, 0, 2); mutable.extend(1, 0, 2); let result = mutable.freeze(); let result = StringArray::from(result); let expected = StringArray::from(vec![Some("hello"), Some("world"), Some("1"), None]); assert_eq!(result, expected); } #[test] fn test_string_null_offset_nulls() { let array = StringArray::from(vec![Some("a"), Some("bc"), None, Some("defh")]); let array = array.slice(1, 3); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, true, 0); mutable.extend(0, 1, 3); mutable.extend_nulls(1); let result = mutable.freeze(); let result = StringArray::from(result); let expected = StringArray::from(vec![None, Some("defh"), None]); assert_eq!(result, expected); } #[test] fn test_bool() { let array = BooleanArray::from(vec![Some(false), Some(true), None, Some(false)]); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); let result = mutable.freeze(); let result = BooleanArray::from(result); let expected = BooleanArray::from(vec![Some(true), None]); assert_eq!(result, expected); } #[test] fn test_null() { let array1 = NullArray::new(10); let array2 = NullArray::new(5); let arrays = vec![array1.data(), array2.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); mutable.extend(1, 0, 1); let result = mutable.freeze(); let result = NullArray::from(result); let expected = NullArray::new(3); assert_eq!(result, expected); } fn create_dictionary_array(values: &[&str], keys: &[Option<&str>]) -> ArrayData { let values = StringArray::from(values.to_vec()); let mut builder = StringDictionaryBuilder::new_with_dictionary( PrimitiveBuilder::<Int16Type>::new(3), &values, ) .unwrap(); for key in keys { if let Some(v) = key { builder.append(v).unwrap(); } else { builder.append_null().unwrap() } } builder.finish().data().clone() } #[test] fn test_dictionary() { // (a, b, c), (0, 1, 0, 2) => (a, b, a, c) let array = create_dictionary_array( &["a", "b", "c"], &[Some("a"), Some("b"), None, Some("c")], ); let arrays = vec![&array]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); let result = mutable.freeze(); let result = DictionaryArray::from(result); let expected = Int16Array::from(vec![Some(1), None]); assert_eq!(result.keys(), &expected); } #[test] fn test_struct() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, Some("mark"), Some("doe"), ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![ Some(1), Some(2), Some(3), Some(4), Some(5), ])); let array = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]) .unwrap(); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); let data = mutable.freeze(); let array = StructArray::from(data); let expected = StructArray::try_from(vec![ ("f1", strings.slice(1, 2)), ("f2", ints.slice(1, 2)), ]) .unwrap(); assert_eq!(array, expected) } #[test] fn test_struct_offset() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, Some("mark"), Some("doe"), ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![ Some(1), Some(2), Some(3), Some(4), Some(5), ])); let array = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]) .unwrap() .slice(1, 3); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); let data = mutable.freeze(); let array = StructArray::from(data); let expected_strings: ArrayRef = Arc::new(StringArray::from(vec![None, Some("mark")])); let expected = StructArray::try_from(vec![ ("f1", expected_strings), ("f2", ints.slice(2, 2)), ]) .unwrap(); assert_eq!(array, expected); } #[test] fn test_struct_nulls() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, Some("mark"), Some("doe"), ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![ Some(1), Some(2), None, Some(4), Some(5), ])); let array = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]) .unwrap(); let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); let data = mutable.freeze(); let array = StructArray::from(data); let expected_string = Arc::new(StringArray::from(vec![None, None])) as ArrayRef; let expected_int = Arc::new(Int32Array::from(vec![Some(2), None])) as ArrayRef; let expected = StructArray::try_from(vec![("f1", expected_string), ("f2", expected_int)]) .unwrap(); assert_eq!(array, expected) } #[test] fn test_struct_many() { let strings: ArrayRef = Arc::new(StringArray::from(vec![ Some("joe"), None, None, Some("mark"), Some("doe"), ])); let ints: ArrayRef = Arc::new(Int32Array::from(vec![ Some(1), Some(2), None, Some(4), Some(5), ])); let array = StructArray::try_from(vec![("f1", strings.clone()), ("f2", ints.clone())]) .unwrap(); let arrays = vec![array.data(), array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 3); mutable.extend(1, 0, 2); let data = mutable.freeze(); let array = StructArray::from(data); let expected_string = Arc::new(StringArray::from(vec![None, None, Some("joe"), None])) as ArrayRef; let expected_int = Arc::new(Int32Array::from(vec![Some(2), None, Some(1), Some(2)])) as ArrayRef; let expected = StructArray::try_from(vec![("f1", expected_string), ("f2", expected_int)]) .unwrap(); assert_eq!(array, expected) } #[test] fn test_binary_fixed_sized_offsets() { let array = FixedSizeBinaryArray::try_from_iter( vec![vec![0, 0], vec![0, 1], vec![0, 2]].into_iter(), ) .expect("Failed to create FixedSizeBinaryArray from iterable"); let array = array.slice(1, 2); // = [[0, 1], [0, 2]] due to the offset = 1 let arrays = vec![array.data()]; let mut mutable = MutableArrayData::new(arrays, false, 0); mutable.extend(0, 1, 2); mutable.extend(0, 0, 1); let result = mutable.freeze(); let result = FixedSizeBinaryArray::from(result); let expected = FixedSizeBinaryArray::try_from_iter(vec![vec![0, 2], vec![0, 1]].into_iter()) .expect("Failed to create FixedSizeBinaryArray from iterable"); assert_eq!(result, expected); } #[test] fn test_list_append() -> Result<()> { let mut builder = ListBuilder::<Int64Builder>::new(Int64Builder::new(24)); builder.values().append_slice(&[1, 2, 3])?; builder.append(true)?; builder.values().append_slice(&[4, 5])?; builder.append(true)?; builder.values().append_slice(&[6, 7, 8])?; builder.values().append_slice(&[9, 10, 11])?; builder.append(true)?; let a = builder.finish(); let a_builder = Int64Builder::new(24); let mut a_builder = ListBuilder::<Int64Builder>::new(a_builder); a_builder.values().append_slice(&[12, 13])?; a_builder.append(true)?; a_builder.append(true)?; a_builder.values().append_slice(&[14, 15])?; a_builder.append(true)?; let b = a_builder.finish(); let c = b.slice(1, 2); let mut mutable = MutableArrayData::new(vec![a.data(), b.data(), c.data()], false, 1); mutable.extend(0, 0, a.len()); mutable.extend(1, 0, b.len()); mutable.extend(2, 0, c.len()); let finished = mutable.freeze(); let expected_int_array = Int64Array::from(vec![ Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), Some(7), Some(8), Some(9), Some(10), Some(11), // append first array Some(12), Some(13), Some(14), Some(15), // append second array Some(14), Some(15), ]); let list_value_offsets = Buffer::from_slice_ref(&[0i32, 3, 5, 11, 13, 13, 15, 15, 17]); let expected_list_data = ArrayData::new( DataType::List(Box::new(Field::new("item", DataType::Int64, true))), 8, None, None, 0, vec![list_value_offsets], vec![expected_int_array.data().clone()], ); assert_eq!(finished, expected_list_data); Ok(()) } #[test] fn test_list_nulls_append() -> Result<()> { let mut builder = ListBuilder::<Int64Builder>::new(Int64Builder::new(32)); builder.values().append_slice(&[1, 2, 3])?; builder.append(true)?; builder.values().append_slice(&[4, 5])?; builder.append(true)?; builder.append(false)?; builder.values().append_slice(&[6, 7, 8])?; builder.values().append_null()?; builder.values().append_null()?; builder.values().append_slice(&[9, 10, 11])?; builder.append(true)?; let a = builder.finish(); let a = a.data(); let mut builder = ListBuilder::<Int64Builder>::new(Int64Builder::new(32)); builder.values().append_slice(&[12, 13])?; builder.append(true)?; builder.append(false)?; builder.append(true)?; builder.values().append_null()?; builder.values().append_null()?; builder.values().append_slice(&[14, 15])?; builder.append(true)?; let b = builder.finish(); let b = b.data(); let c = b.slice(1, 2); let d = b.slice(2, 2); let mut mutable = MutableArrayData::new(vec![a, b, &c, &d], false, 10); mutable.extend(0, 0, a.len()); mutable.extend(1, 0, b.len()); mutable.extend(2, 0, c.len()); mutable.extend(3, 0, d.len()); let result = mutable.freeze(); let expected_int_array = Int64Array::from(vec![ Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), Some(7), Some(8), None, None, Some(9), Some(10), Some(11), // second array Some(12), Some(13), None, None, Some(14), Some(15), // slice(1, 2) results in no values added None, None, Some(14), Some(15), ]); let list_value_offsets = Buffer::from_slice_ref(&[0, 3, 5, 5, 13, 15, 15, 15, 19, 19, 19, 19, 23]); let expected_list_data = ArrayData::new( DataType::List(Box::new(Field::new("item", DataType::Int64, true))), 12, None, Some(Buffer::from(&[0b11011011, 0b1110])), 0, vec![list_value_offsets], vec![expected_int_array.data().clone()], ); assert_eq!(result, expected_list_data); Ok(()) } #[test] fn test_list_of_strings_append() -> Result<()> { // [["alpha", "beta", None]] let mut builder = ListBuilder::new(StringBuilder::new(32)); builder.values().append_value("Hello")?; builder.values().append_value("Arrow")?; builder.values().append_null()?; builder.append(true)?; let a = builder.finish(); // [["alpha", "beta"], [None], ["gamma", "delta", None]] let mut builder = ListBuilder::new(StringBuilder::new(32)); builder.values().append_value("alpha")?; builder.values().append_value("beta")?; builder.append(true)?; builder.values().append_null()?; builder.append(true)?; builder.values().append_value("gamma")?; builder.values().append_value("delta")?; builder.values().append_null()?; builder.append(true)?; let b = builder.finish(); let mut mutable = MutableArrayData::new(vec![a.data(), b.data()], false, 10); mutable.extend(0, 0, a.len()); mutable.extend(1, 0, b.len()); mutable.extend(1, 1, 3); mutable.extend(1, 0, 0); let result = mutable.freeze(); let expected_string_array = StringArray::from(vec![ // extend a[0..a.len()] // a[0] Some("Hello"), Some("Arrow"), None, // extend b[0..b.len()] // b[0] Some("alpha"), Some("beta"), // b[1] None, // b[2] Some("gamma"), Some("delta"), None, // extend b[1..3] // b[1] None, // b[2] Some("gamma"), Some("delta"), None, // extend b[0..0] ]); let list_value_offsets = Buffer::from_slice_ref(&[0, 3, 5, 6, 9, 10, 13]); let expected_list_data = ArrayData::new( DataType::List(Box::new(Field::new("item", DataType::Utf8, true))), 6, None, None, 0, vec![list_value_offsets], vec![expected_string_array.data().clone()], ); assert_eq!(result, expected_list_data); Ok(()) } #[test] fn test_fixed_size_binary_append() { let a = vec![Some(vec![1, 2]), Some(vec![3, 4]), Some(vec![5, 6])]; let a = FixedSizeBinaryArray::try_from_sparse_iter(a.into_iter()) .expect("Failed to create FixedSizeBinaryArray from iterable"); let b = vec![ None, Some(vec![7, 8]), Some(vec![9, 10]), None, Some(vec![13, 14]), None, ]; let b = FixedSizeBinaryArray::try_from_sparse_iter(b.into_iter()) .expect("Failed to create FixedSizeBinaryArray from iterable"); let mut mutable = MutableArrayData::new(vec![a.data(), b.data()], false, 10); mutable.extend(0, 0, a.len()); mutable.extend(1, 0, b.len()); mutable.extend(1, 1, 4); mutable.extend(1, 2, 3); mutable.extend(1, 5, 5); let result = mutable.freeze(); let expected = vec![ // a Some(vec![1, 2]), Some(vec![3, 4]), Some(vec![5, 6]), // b None, Some(vec![7, 8]), Some(vec![9, 10]), None, Some(vec![13, 14]), None, // b[1..4] Some(vec![7, 8]), Some(vec![9, 10]), None, // b[2..3] Some(vec![9, 10]), // b[4..4] ]; let expected = FixedSizeBinaryArray::try_from_sparse_iter(expected.into_iter()) .expect("Failed to create FixedSizeBinaryArray from iterable"); assert_eq!(&result, expected.data()); } /* // this is an old test used on a meanwhile removed dead code // that is still useful when `MutableArrayData` supports fixed-size lists. #[test] fn test_fixed_size_list_append() -> Result<()> { let int_builder = UInt16Builder::new(64); let mut builder = FixedSizeListBuilder::<UInt16Builder>::new(int_builder, 2); builder.values().append_slice(&[1, 2])?; builder.append(true)?; builder.values().append_slice(&[3, 4])?; builder.append(false)?; builder.values().append_slice(&[5, 6])?; builder.append(true)?; let a_builder = UInt16Builder::new(64); let mut a_builder = FixedSizeListBuilder::<UInt16Builder>::new(a_builder, 2); a_builder.values().append_slice(&[7, 8])?; a_builder.append(true)?; a_builder.values().append_slice(&[9, 10])?; a_builder.append(true)?; a_builder.values().append_slice(&[11, 12])?; a_builder.append(false)?; a_builder.values().append_slice(&[13, 14])?; a_builder.append(true)?; a_builder.values().append_null()?; a_builder.values().append_null()?; a_builder.append(true)?; let a = a_builder.finish(); // append array builder.append_data(&[ a.data(), a.slice(1, 3).data(), a.slice(2, 1).data(), a.slice(5, 0).data(), ])?; let finished = builder.finish(); let expected_int_array = UInt16Array::from(vec![ Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), // append first array Some(7), Some(8), Some(9), Some(10), Some(11), Some(12), Some(13), Some(14), None, None, // append slice(1, 3) Some(9), Some(10), Some(11), Some(12), Some(13), Some(14), // append slice(2, 1) Some(11), Some(12), ]); let expected_list_data = ArrayData::new( DataType::FixedSizeList( Box::new(Field::new("item", DataType::UInt16, true)), 2, ), 12, None, None, 0, vec![], vec![expected_int_array.data()], ); let expected_list = FixedSizeListArray::from(Arc::new(expected_list_data) as ArrayData); assert_eq!(&expected_list.values(), &finished.values()); assert_eq!(expected_list.len(), finished.len()); Ok(()) } */ }
34.984214
126
0.536412
014ea07f8e7d28b3753a11f213acd15271e5fb37
4,004
use crate::trans::TRANS; use crate::CONFIG; use anyhow::Context; use futures::TryFutureExt; use once_cell::sync::Lazy; use regex::Regex; use reqwest::header::*; use reqwest::{Client, Response}; use std::borrow::Cow; use std::io::Write; use std::time::SystemTime; use tempfile::NamedTempFile; pub static HOST: Lazy<&'static str> = Lazy::new(|| { CONFIG .exhentai .search_url .host_str() .expect("failed to extract host from search_url") }); /// 将图片地址格式化为 html pub fn img_urls_to_html(img_urls: &[String]) -> String { img_urls .iter() .filter(|s| !s.is_empty()) .map(|s| format!(r#"<img src="{}">"#, s)) .collect::<Vec<_>>() .join("") } /// 左填充空格 fn pad_left(s: &str, len: usize) -> Cow<str> { let width = unicode_width::UnicodeWidthStr::width(s); if width >= len { Cow::Borrowed(s) } else { Cow::Owned(" ".repeat(len - width) + s) } } /// 将 tag 转换为可以直接发送至 tg 的文本格式 pub fn tags_to_string(tags: &[(String, Vec<String>)]) -> String { let replace_table = vec![ (" ", "_"), ("_|_", " #"), ("-", "_"), ("/", "_"), ("·", "_"), ]; let trans = |namespace: &str, string: &str| -> String { // 形如 "usashiro mani | mani" 的 tag 只需要取第一部分翻译 let to_translate = string.split(" | ").next().unwrap(); let mut result = TRANS.trans(namespace, to_translate).to_owned(); // 没有翻译的话,还是使用原始字符串 if result == to_translate { result = string.to_owned(); } for (from, to) in replace_table.iter() { result = result.replace(from, to); } format!("#{}", result) }; let mut ret = vec![]; for (k, v) in tags { let v = v.iter().map(|s| trans(k, s)).collect::<Vec<_>>().join(" "); ret.push(format!( "<code>{}</code>: {}", pad_left(TRANS.trans("rows", k), 6), v )) } ret.join("\n") } /// 从 e 站 url 中获取数字格式的 id,第二项为 token pub fn get_id_from_gallery(url: &str) -> (i32, String) { let url = url.split('/').collect::<Vec<_>>(); (url[4].parse::<i32>().unwrap(), url[5].to_owned()) } /// 从图片 url 中获取数字格式的 id,第一个为 id,第二个为图片序号 /// 图片格式示例: /// https://bhoxhym.oddgxmtpzgse.hath.network/h/33f789fab8ecb4667521e6b1ad3b201936a96415-382043-1280-1817-jpg/keystamp=1619024700-fff70cfa32;fileindex=91876552;xres=2400/00000000.jpg pub fn get_id_from_image(url: &str) -> Option<i32> { static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"fileindex=(\d+)").unwrap()); let caps = RE.captures(url)?; caps.get(1).and_then(|s| s.as_str().parse::<i32>().ok()) } /// 提取图片哈希,此处为原图哈希的前十位 /// 链接示例:https://exhentai.org/s/03af734602/1932743-1 pub fn get_hash_from_image(url: &str) -> Option<&str> { static RE: Lazy<Regex> = Lazy::new(|| Regex::new(r"/s/([0-9a-f]+)/").unwrap()); let caps = RE.captures(url)?; caps.get(1).map(|s| s.as_str()) } /// 根据消息 id 生成当前频道的消息直链 pub fn get_message_url(id: i32) -> String { format!("https://t.me/{}/{}", CONFIG.telegram.channel_id, id) .replace("/-100", "/") .replace("@", "") } pub fn get_timestamp() -> u64 { SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("您穿越了?") .as_secs() } pub fn extract_telegraph_path(s: &str) -> &str { s.split('/') .last() .and_then(|s| s.split('?').next()) .unwrap() } pub async fn download_to_temp(client: &Client, url: &str) -> anyhow::Result<NamedTempFile> { let bytes = client .get(url) .header(CONNECTION, "keep-alive") .header(REFERER, "https://exhentai.org/") .send() .and_then(Response::bytes) .await?; let suffix = String::from(".") + url.rsplit_once('.').context("找不到图片后缀")?.1; let mut tmp = tempfile::Builder::new() .prefix("exloli_") .suffix(&suffix) .rand_bytes(5) .tempfile()?; tmp.write_all(bytes.as_ref())?; Ok(tmp) }
29.226277
182
0.56019
22acf1d1378fc0d662d5dbac45cf0e621c46c70d
10,074
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! Serial peripheral interface/Inter-IC sound //! //! Used by: stm32g431, stm32g441 #[cfg(not(feature = "nosync"))] pub use crate::stm32g4::peripherals::spi::Instance; pub use crate::stm32g4::peripherals::spi::{RegisterBlock, ResetValues}; pub use crate::stm32g4::peripherals::spi::{ CR1, CR2, CRCPR, DR, I2SCFGR, I2SPR, RXCRCR, SR, TXCRCR, }; /// Access functions for the SPI1 peripheral instance pub mod SPI1 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40013000, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in SPI1 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000700, SR: 0x00000002, DR: 0x00000000, CRCPR: 0x00000007, RXCRCR: 0x00000000, TXCRCR: 0x00000000, I2SCFGR: 0x00000000, I2SPR: 0x00000002, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut SPI1_TAKEN: bool = false; /// Safe access to SPI1 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if SPI1_TAKEN { None } else { SPI1_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to SPI1 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if SPI1_TAKEN && inst.addr == INSTANCE.addr { SPI1_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal SPI1 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { SPI1_TAKEN = true; INSTANCE } } /// Raw pointer to SPI1 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const SPI1: *const RegisterBlock = 0x40013000 as *const _; /// Access functions for the SPI2 peripheral instance pub mod SPI2 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40003800, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in SPI2 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000700, SR: 0x00000002, DR: 0x00000000, CRCPR: 0x00000007, RXCRCR: 0x00000000, TXCRCR: 0x00000000, I2SCFGR: 0x00000000, I2SPR: 0x00000002, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut SPI2_TAKEN: bool = false; /// Safe access to SPI2 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if SPI2_TAKEN { None } else { SPI2_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to SPI2 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if SPI2_TAKEN && inst.addr == INSTANCE.addr { SPI2_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal SPI2 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { SPI2_TAKEN = true; INSTANCE } } /// Raw pointer to SPI2 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const SPI2: *const RegisterBlock = 0x40003800 as *const _; /// Access functions for the SPI3 peripheral instance pub mod SPI3 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40003c00, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in SPI3 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000700, SR: 0x00000002, DR: 0x00000000, CRCPR: 0x00000007, RXCRCR: 0x00000000, TXCRCR: 0x00000000, I2SCFGR: 0x00000000, I2SPR: 0x00000002, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut SPI3_TAKEN: bool = false; /// Safe access to SPI3 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if SPI3_TAKEN { None } else { SPI3_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to SPI3 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if SPI3_TAKEN && inst.addr == INSTANCE.addr { SPI3_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal SPI3 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { SPI3_TAKEN = true; INSTANCE } } /// Raw pointer to SPI3 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const SPI3: *const RegisterBlock = 0x40003c00 as *const _;
32.496774
71
0.608497
330e1211af2b9e979f9af8400c1607d7fafcd229
243
// functions5.rs // Make me compile! Execute `rustlings hint functions5` for hints :) fn main() { let answer = square(3); println!("The answer is {}", answer); } fn square(num: i32) -> i32 { let result = num * num; result }
17.357143
68
0.604938
ef747638e33ec83459e4b9f9fe02d052e41c4812
50,465
// Format list-like macro invocations. These are invocations whose token trees // can be interpreted as expressions and separated by commas. // Note that these token trees do not actually have to be interpreted as // expressions by the compiler. An example of an invocation we would reformat is // foo!( x, y, z ). The token x may represent an identifier in the code, but we // interpreted as an expression. // Macro uses which are not-list like, such as bar!(key => val), will not be // reformatted. // List-like invocations with parentheses will be formatted as function calls, // and those with brackets will be formatted as array literals. use std::collections::HashMap; use std::panic::{catch_unwind, AssertUnwindSafe}; use rustc_ast::token::{BinOpToken, DelimToken, Token, TokenKind}; use rustc_ast::tokenstream::{Cursor, Spacing, TokenStream, TokenTree}; use rustc_ast::{ast, ptr}; use rustc_ast_pretty::pprust; use rustc_parse::parser::{ForceCollect, Parser}; use rustc_parse::{stream_to_parser, MACRO_ARGUMENTS}; use rustc_span::{ symbol::{self, kw}, BytePos, Span, Symbol, DUMMY_SP, }; use crate::comment::{ contains_comment, CharClasses, FindUncommented, FullCodeCharKind, LineClasses, }; use crate::config::lists::*; use crate::expr::rewrite_array; use crate::lists::{itemize_list, write_list, ListFormatting}; use crate::overflow; use crate::rewrite::{Rewrite, RewriteContext}; use crate::shape::{Indent, Shape}; use crate::source_map::SpanUtils; use crate::spanned::Spanned; use crate::utils::{ format_visibility, indent_next_line, is_empty_line, mk_sp, remove_trailing_white_spaces, rewrite_ident, trim_left_preserve_layout, wrap_str, NodeIdExt, }; use crate::visitor::FmtVisitor; const FORCED_BRACKET_MACROS: &[&str] = &["vec!"]; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum MacroPosition { Item, Statement, Expression, Pat, } #[derive(Debug)] pub(crate) enum MacroArg { Expr(ptr::P<ast::Expr>), Ty(ptr::P<ast::Ty>), Pat(ptr::P<ast::Pat>), Item(ptr::P<ast::Item>), Keyword(symbol::Ident, Span), } impl MacroArg { fn is_item(&self) -> bool { match self { MacroArg::Item(..) => true, _ => false, } } } impl Rewrite for ast::Item { fn rewrite(&self, context: &RewriteContext<'_>, shape: Shape) -> Option<String> { let mut visitor = crate::visitor::FmtVisitor::from_context(context); visitor.block_indent = shape.indent; visitor.last_pos = self.span().lo(); visitor.visit_item(self); Some(visitor.buffer.to_owned()) } } impl Rewrite for MacroArg { fn rewrite(&self, context: &RewriteContext<'_>, shape: Shape) -> Option<String> { match *self { MacroArg::Expr(ref expr) => expr.rewrite(context, shape), MacroArg::Ty(ref ty) => ty.rewrite(context, shape), MacroArg::Pat(ref pat) => pat.rewrite(context, shape), MacroArg::Item(ref item) => item.rewrite(context, shape), MacroArg::Keyword(ident, _) => Some(ident.name.to_string()), } } } fn build_parser<'a>(context: &RewriteContext<'a>, cursor: Cursor) -> Parser<'a> { stream_to_parser( context.parse_sess.inner(), cursor.collect(), MACRO_ARGUMENTS, ) } fn parse_macro_arg<'a, 'b: 'a>(parser: &'a mut Parser<'b>) -> Option<MacroArg> { macro_rules! parse_macro_arg { ($macro_arg:ident, $parser:expr, $f:expr) => { let mut cloned_parser = (*parser).clone(); match $parser(&mut cloned_parser) { Ok(x) => { if parser.sess.span_diagnostic.has_errors() { parser.sess.span_diagnostic.reset_err_count(); } else { // Parsing succeeded. *parser = cloned_parser; return Some(MacroArg::$macro_arg($f(x)?)); } } Err(mut e) => { e.cancel(); parser.sess.span_diagnostic.reset_err_count(); } } }; } parse_macro_arg!( Expr, |parser: &mut rustc_parse::parser::Parser<'b>| parser.parse_expr(), |x: ptr::P<ast::Expr>| Some(x) ); parse_macro_arg!( Ty, |parser: &mut rustc_parse::parser::Parser<'b>| parser.parse_ty(), |x: ptr::P<ast::Ty>| Some(x) ); parse_macro_arg!( Pat, |parser: &mut rustc_parse::parser::Parser<'b>| parser.parse_pat_no_top_alt(None), |x: ptr::P<ast::Pat>| Some(x) ); // `parse_item` returns `Option<ptr::P<ast::Item>>`. parse_macro_arg!( Item, |parser: &mut rustc_parse::parser::Parser<'b>| parser.parse_item(ForceCollect::No), |x: Option<ptr::P<ast::Item>>| x ); None } /// Rewrite macro name without using pretty-printer if possible. fn rewrite_macro_name( context: &RewriteContext<'_>, path: &ast::Path, extra_ident: Option<symbol::Ident>, ) -> String { let name = if path.segments.len() == 1 { // Avoid using pretty-printer in the common case. format!("{}!", rewrite_ident(context, path.segments[0].ident)) } else { format!("{}!", pprust::path_to_string(path)) }; match extra_ident { Some(ident) if ident.name != kw::Empty => format!("{} {}", name, ident), _ => name, } } // Use this on failing to format the macro call. fn return_macro_parse_failure_fallback( context: &RewriteContext<'_>, indent: Indent, span: Span, ) -> Option<String> { // Mark this as a failure however we format it context.macro_rewrite_failure.replace(true); // Heuristically determine whether the last line of the macro uses "Block" style // rather than using "Visual" style, or another indentation style. let is_like_block_indent_style = context .snippet(span) .lines() .last() .map(|closing_line| { closing_line .trim() .chars() .all(|ch| matches!(ch, '}' | ')' | ']')) }) .unwrap_or(false); if is_like_block_indent_style { return trim_left_preserve_layout(context.snippet(span), indent, context.config); } context.skipped_range.borrow_mut().push(( context.parse_sess.line_of_byte_pos(span.lo()), context.parse_sess.line_of_byte_pos(span.hi()), )); // Return the snippet unmodified if the macro is not block-like Some(context.snippet(span).to_owned()) } pub(crate) fn rewrite_macro( mac: &ast::MacCall, extra_ident: Option<symbol::Ident>, context: &RewriteContext<'_>, shape: Shape, position: MacroPosition, ) -> Option<String> { let should_skip = context .skip_context .skip_macro(&context.snippet(mac.path.span).to_owned()); if should_skip { None } else { let guard = context.enter_macro(); let result = catch_unwind(AssertUnwindSafe(|| { rewrite_macro_inner( mac, extra_ident, context, shape, position, guard.is_nested(), ) })); match result { Err(..) | Ok(None) => { context.macro_rewrite_failure.replace(true); None } Ok(rw) => rw, } } } fn check_keyword<'a, 'b: 'a>(parser: &'a mut Parser<'b>) -> Option<MacroArg> { for &keyword in RUST_KW.iter() { if parser.token.is_keyword(keyword) && parser.look_ahead(1, |t| { t.kind == TokenKind::Eof || t.kind == TokenKind::Comma || t.kind == TokenKind::CloseDelim(DelimToken::NoDelim) }) { parser.bump(); return Some(MacroArg::Keyword( symbol::Ident::with_dummy_span(keyword), parser.prev_token.span, )); } } None } fn rewrite_macro_inner( mac: &ast::MacCall, extra_ident: Option<symbol::Ident>, context: &RewriteContext<'_>, shape: Shape, position: MacroPosition, is_nested_macro: bool, ) -> Option<String> { if context.config.use_try_shorthand() { if let Some(expr) = convert_try_mac(mac, context) { context.leave_macro(); return expr.rewrite(context, shape); } } let original_style = macro_style(mac, context); let macro_name = rewrite_macro_name(context, &mac.path, extra_ident); let style = if FORCED_BRACKET_MACROS.contains(&&macro_name[..]) && !is_nested_macro { DelimToken::Bracket } else { original_style }; let ts = mac.args.inner_tokens(); let has_comment = contains_comment(context.snippet(mac.span())); if ts.is_empty() && !has_comment { return match style { DelimToken::Paren if position == MacroPosition::Item => { Some(format!("{}();", macro_name)) } DelimToken::Bracket if position == MacroPosition::Item => { Some(format!("{}[];", macro_name)) } DelimToken::Paren => Some(format!("{}()", macro_name)), DelimToken::Bracket => Some(format!("{}[]", macro_name)), DelimToken::Brace => Some(format!("{} {{}}", macro_name)), _ => unreachable!(), }; } // Format well-known macros which cannot be parsed as a valid AST. if macro_name == "lazy_static!" && !has_comment { if let success @ Some(..) = format_lazy_static(context, shape, &ts) { return success; } } let mut parser = build_parser(context, ts.trees()); let mut arg_vec = Vec::new(); let mut vec_with_semi = false; let mut trailing_comma = false; if DelimToken::Brace != style { loop { if let Some(arg) = check_keyword(&mut parser) { arg_vec.push(arg); } else if let Some(arg) = parse_macro_arg(&mut parser) { arg_vec.push(arg); } else { return return_macro_parse_failure_fallback(context, shape.indent, mac.span()); } match parser.token.kind { TokenKind::Eof => break, TokenKind::Comma => (), TokenKind::Semi => { // Try to parse `vec![expr; expr]` if FORCED_BRACKET_MACROS.contains(&&macro_name[..]) { parser.bump(); if parser.token.kind != TokenKind::Eof { match parse_macro_arg(&mut parser) { Some(arg) => { arg_vec.push(arg); parser.bump(); if parser.token.kind == TokenKind::Eof && arg_vec.len() == 2 { vec_with_semi = true; break; } } None => { return return_macro_parse_failure_fallback( context, shape.indent, mac.span(), ); } } } } return return_macro_parse_failure_fallback(context, shape.indent, mac.span()); } _ if arg_vec.last().map_or(false, MacroArg::is_item) => continue, _ => return return_macro_parse_failure_fallback(context, shape.indent, mac.span()), } parser.bump(); if parser.token.kind == TokenKind::Eof { trailing_comma = true; break; } } } if !arg_vec.is_empty() && arg_vec.iter().all(MacroArg::is_item) { return rewrite_macro_with_items( context, &arg_vec, &macro_name, shape, style, position, mac.span(), ); } match style { DelimToken::Paren => { // Handle special case: `vec!(expr; expr)` if vec_with_semi { handle_vec_semi(context, shape, arg_vec, macro_name, style) } else { // Format macro invocation as function call, preserve the trailing // comma because not all macros support them. overflow::rewrite_with_parens( context, &macro_name, arg_vec.iter(), shape, mac.span(), context.config.fn_call_width(), if trailing_comma { Some(SeparatorTactic::Always) } else { Some(SeparatorTactic::Never) }, ) .map(|rw| match position { MacroPosition::Item => format!("{};", rw), _ => rw, }) } } DelimToken::Bracket => { // Handle special case: `vec![expr; expr]` if vec_with_semi { handle_vec_semi(context, shape, arg_vec, macro_name, style) } else { // If we are rewriting `vec!` macro or other special macros, // then we can rewrite this as a usual array literal. // Otherwise, we must preserve the original existence of trailing comma. let macro_name = &macro_name.as_str(); let mut force_trailing_comma = if trailing_comma { Some(SeparatorTactic::Always) } else { Some(SeparatorTactic::Never) }; if FORCED_BRACKET_MACROS.contains(macro_name) && !is_nested_macro { context.leave_macro(); if context.use_block_indent() { force_trailing_comma = Some(SeparatorTactic::Vertical); }; } let rewrite = rewrite_array( macro_name, arg_vec.iter(), mac.span(), context, shape, force_trailing_comma, Some(original_style), )?; let comma = match position { MacroPosition::Item => ";", _ => "", }; Some(format!("{}{}", rewrite, comma)) } } DelimToken::Brace => { // For macro invocations with braces, always put a space between // the `macro_name!` and `{ /* macro_body */ }` but skip modifying // anything in between the braces (for now). let snippet = context.snippet(mac.span()).trim_start_matches(|c| c != '{'); match trim_left_preserve_layout(snippet, shape.indent, context.config) { Some(macro_body) => Some(format!("{} {}", macro_name, macro_body)), None => Some(format!("{} {}", macro_name, snippet)), } } _ => unreachable!(), } } fn handle_vec_semi( context: &RewriteContext<'_>, shape: Shape, arg_vec: Vec<MacroArg>, macro_name: String, delim_token: DelimToken, ) -> Option<String> { let (left, right) = match delim_token { DelimToken::Paren => ("(", ")"), DelimToken::Bracket => ("[", "]"), _ => unreachable!(), }; let mac_shape = shape.offset_left(macro_name.len())?; // 8 = `vec![]` + `; ` or `vec!()` + `; ` let total_overhead = 8; let nested_shape = mac_shape.block_indent(context.config.tab_spaces()); let lhs = arg_vec[0].rewrite(context, nested_shape)?; let rhs = arg_vec[1].rewrite(context, nested_shape)?; if !lhs.contains('\n') && !rhs.contains('\n') && lhs.len() + rhs.len() + total_overhead <= shape.width { // macro_name(lhs; rhs) or macro_name[lhs; rhs] Some(format!("{}{}{}; {}{}", macro_name, left, lhs, rhs, right)) } else { // macro_name(\nlhs;\nrhs\n) or macro_name[\nlhs;\nrhs\n] Some(format!( "{}{}{}{};{}{}{}{}", macro_name, left, nested_shape.indent.to_string_with_newline(context.config), lhs, nested_shape.indent.to_string_with_newline(context.config), rhs, shape.indent.to_string_with_newline(context.config), right )) } } pub(crate) fn rewrite_macro_def( context: &RewriteContext<'_>, shape: Shape, indent: Indent, def: &ast::MacroDef, ident: symbol::Ident, vis: &ast::Visibility, span: Span, ) -> Option<String> { let snippet = Some(remove_trailing_white_spaces(context.snippet(span))); if snippet.as_ref().map_or(true, |s| s.ends_with(';')) { return snippet; } let ts = def.body.inner_tokens(); let mut parser = MacroParser::new(ts.into_trees()); let parsed_def = match parser.parse() { Some(def) => def, None => return snippet, }; let mut result = if def.macro_rules { String::from("macro_rules!") } else { format!("{}macro", format_visibility(context, vis)) }; result += " "; result += rewrite_ident(context, ident); let multi_branch_style = def.macro_rules || parsed_def.branches.len() != 1; let arm_shape = if multi_branch_style { shape .block_indent(context.config.tab_spaces()) .with_max_width(context.config) } else { shape }; let branch_items = itemize_list( context.snippet_provider, parsed_def.branches.iter(), "}", ";", |branch| branch.span.lo(), |branch| branch.span.hi(), |branch| match branch.rewrite(context, arm_shape, multi_branch_style) { Some(v) => Some(v), // if the rewrite returned None because a macro could not be rewritten, then return the // original body None if context.macro_rewrite_failure.get() => { Some(context.snippet(branch.body).trim().to_string()) } None => None, }, context.snippet_provider.span_after(span, "{"), span.hi(), false, ) .collect::<Vec<_>>(); let fmt = ListFormatting::new(arm_shape, context.config) .separator(if def.macro_rules { ";" } else { "" }) .trailing_separator(SeparatorTactic::Always) .preserve_newline(true); if multi_branch_style { result += " {"; result += &arm_shape.indent.to_string_with_newline(context.config); } match write_list(&branch_items, &fmt) { Some(ref s) => result += s, None => return snippet, } if multi_branch_style { result += &indent.to_string_with_newline(context.config); result += "}"; } Some(result) } fn register_metavariable( map: &mut HashMap<String, String>, result: &mut String, name: &str, dollar_count: usize, ) { let mut new_name = "$".repeat(dollar_count - 1); let mut old_name = "$".repeat(dollar_count); new_name.push('z'); new_name.push_str(name); old_name.push_str(name); result.push_str(&new_name); map.insert(old_name, new_name); } // Replaces `$foo` with `zfoo`. We must check for name overlap to ensure we // aren't causing problems. // This should also work for escaped `$` variables, where we leave earlier `$`s. fn replace_names(input: &str) -> Option<(String, HashMap<String, String>)> { // Each substitution will require five or six extra bytes. let mut result = String::with_capacity(input.len() + 64); let mut substs = HashMap::new(); let mut dollar_count = 0; let mut cur_name = String::new(); for (kind, c) in CharClasses::new(input.chars()) { if kind != FullCodeCharKind::Normal { result.push(c); } else if c == '$' { dollar_count += 1; } else if dollar_count == 0 { result.push(c); } else if !c.is_alphanumeric() && !cur_name.is_empty() { // Terminates a name following one or more dollars. register_metavariable(&mut substs, &mut result, &cur_name, dollar_count); result.push(c); dollar_count = 0; cur_name.clear(); } else if c == '(' && cur_name.is_empty() { // FIXME: Support macro def with repeat. return None; } else if c.is_alphanumeric() || c == '_' { cur_name.push(c); } } if !cur_name.is_empty() { register_metavariable(&mut substs, &mut result, &cur_name, dollar_count); } debug!("replace_names `{}` {:?}", result, substs); Some((result, substs)) } #[derive(Debug, Clone)] enum MacroArgKind { /// e.g., `$x: expr`. MetaVariable(Symbol, String), /// e.g., `$($foo: expr),*` Repeat( /// `()`, `[]` or `{}`. DelimToken, /// Inner arguments inside delimiters. Vec<ParsedMacroArg>, /// Something after the closing delimiter and the repeat token, if available. Option<Box<ParsedMacroArg>>, /// The repeat token. This could be one of `*`, `+` or `?`. Token, ), /// e.g., `[derive(Debug)]` Delimited(DelimToken, Vec<ParsedMacroArg>), /// A possible separator. e.g., `,` or `;`. Separator(String, String), /// Other random stuff that does not fit to other kinds. /// e.g., `== foo` in `($x: expr == foo)`. Other(String, String), } fn delim_token_to_str( context: &RewriteContext<'_>, delim_token: DelimToken, shape: Shape, use_multiple_lines: bool, inner_is_empty: bool, ) -> (String, String) { let (lhs, rhs) = match delim_token { DelimToken::Paren => ("(", ")"), DelimToken::Bracket => ("[", "]"), DelimToken::Brace => { if inner_is_empty || use_multiple_lines { ("{", "}") } else { ("{ ", " }") } } DelimToken::NoDelim => ("", ""), }; if use_multiple_lines { let indent_str = shape.indent.to_string_with_newline(context.config); let nested_indent_str = shape .indent .block_indent(context.config) .to_string_with_newline(context.config); ( format!("{}{}", lhs, nested_indent_str), format!("{}{}", indent_str, rhs), ) } else { (lhs.to_owned(), rhs.to_owned()) } } impl MacroArgKind { fn starts_with_brace(&self) -> bool { matches!( *self, MacroArgKind::Repeat(DelimToken::Brace, _, _, _) | MacroArgKind::Delimited(DelimToken::Brace, _) ) } fn starts_with_dollar(&self) -> bool { matches!( *self, MacroArgKind::Repeat(..) | MacroArgKind::MetaVariable(..) ) } fn ends_with_space(&self) -> bool { matches!(*self, MacroArgKind::Separator(..)) } fn has_meta_var(&self) -> bool { match *self { MacroArgKind::MetaVariable(..) => true, MacroArgKind::Repeat(_, ref args, _, _) => args.iter().any(|a| a.kind.has_meta_var()), _ => false, } } fn rewrite( &self, context: &RewriteContext<'_>, shape: Shape, use_multiple_lines: bool, ) -> Option<String> { let rewrite_delimited_inner = |delim_tok, args| -> Option<(String, String, String)> { let inner = wrap_macro_args(context, args, shape)?; let (lhs, rhs) = delim_token_to_str(context, delim_tok, shape, false, inner.is_empty()); if lhs.len() + inner.len() + rhs.len() <= shape.width { return Some((lhs, inner, rhs)); } let (lhs, rhs) = delim_token_to_str(context, delim_tok, shape, true, false); let nested_shape = shape .block_indent(context.config.tab_spaces()) .with_max_width(context.config); let inner = wrap_macro_args(context, args, nested_shape)?; Some((lhs, inner, rhs)) }; match *self { MacroArgKind::MetaVariable(ty, ref name) => Some(format!("${}:{}", name, ty)), MacroArgKind::Repeat(delim_tok, ref args, ref another, ref tok) => { let (lhs, inner, rhs) = rewrite_delimited_inner(delim_tok, args)?; let another = another .as_ref() .and_then(|a| a.rewrite(context, shape, use_multiple_lines)) .unwrap_or_else(|| "".to_owned()); let repeat_tok = pprust::token_to_string(tok); Some(format!("${}{}{}{}{}", lhs, inner, rhs, another, repeat_tok)) } MacroArgKind::Delimited(delim_tok, ref args) => { rewrite_delimited_inner(delim_tok, args) .map(|(lhs, inner, rhs)| format!("{}{}{}", lhs, inner, rhs)) } MacroArgKind::Separator(ref sep, ref prefix) => Some(format!("{}{} ", prefix, sep)), MacroArgKind::Other(ref inner, ref prefix) => Some(format!("{}{}", prefix, inner)), } } } #[derive(Debug, Clone)] struct ParsedMacroArg { kind: MacroArgKind, } impl ParsedMacroArg { fn rewrite( &self, context: &RewriteContext<'_>, shape: Shape, use_multiple_lines: bool, ) -> Option<String> { self.kind.rewrite(context, shape, use_multiple_lines) } } /// Parses macro arguments on macro def. struct MacroArgParser { /// Either a name of the next metavariable, a separator, or junk. buf: String, /// The first token of the current buffer. start_tok: Token, /// `true` if we are parsing a metavariable or a repeat. is_meta_var: bool, /// The last token parsed. last_tok: Token, /// Holds the parsed arguments. result: Vec<ParsedMacroArg>, } fn last_tok(tt: &TokenTree) -> Token { match *tt { TokenTree::Token(ref t) => t.clone(), TokenTree::Delimited(delim_span, delim, _) => Token { kind: TokenKind::CloseDelim(delim), span: delim_span.close, }, } } impl MacroArgParser { fn new() -> MacroArgParser { MacroArgParser { buf: String::new(), is_meta_var: false, last_tok: Token { kind: TokenKind::Eof, span: DUMMY_SP, }, start_tok: Token { kind: TokenKind::Eof, span: DUMMY_SP, }, result: vec![], } } fn set_last_tok(&mut self, tok: &TokenTree) { self.last_tok = last_tok(tok); } fn add_separator(&mut self) { let prefix = if self.need_space_prefix() { " ".to_owned() } else { "".to_owned() }; self.result.push(ParsedMacroArg { kind: MacroArgKind::Separator(self.buf.clone(), prefix), }); self.buf.clear(); } fn add_other(&mut self) { let prefix = if self.need_space_prefix() { " ".to_owned() } else { "".to_owned() }; self.result.push(ParsedMacroArg { kind: MacroArgKind::Other(self.buf.clone(), prefix), }); self.buf.clear(); } fn add_meta_variable(&mut self, iter: &mut Cursor) -> Option<()> { match iter.next() { Some(TokenTree::Token(Token { kind: TokenKind::Ident(name, _), .. })) => { self.result.push(ParsedMacroArg { kind: MacroArgKind::MetaVariable(name, self.buf.clone()), }); self.buf.clear(); self.is_meta_var = false; Some(()) } _ => None, } } fn add_delimited(&mut self, inner: Vec<ParsedMacroArg>, delim: DelimToken) { self.result.push(ParsedMacroArg { kind: MacroArgKind::Delimited(delim, inner), }); } // $($foo: expr),? fn add_repeat( &mut self, inner: Vec<ParsedMacroArg>, delim: DelimToken, iter: &mut Cursor, ) -> Option<()> { let mut buffer = String::new(); let mut first = true; // Parse '*', '+' or '?. for tok in iter { self.set_last_tok(&tok); if first { first = false; } match tok { TokenTree::Token(Token { kind: TokenKind::BinOp(BinOpToken::Plus), .. }) | TokenTree::Token(Token { kind: TokenKind::Question, .. }) | TokenTree::Token(Token { kind: TokenKind::BinOp(BinOpToken::Star), .. }) => { break; } TokenTree::Token(ref t) => { buffer.push_str(&pprust::token_to_string(t)); } _ => return None, } } // There could be some random stuff between ')' and '*', '+' or '?'. let another = if buffer.trim().is_empty() { None } else { Some(Box::new(ParsedMacroArg { kind: MacroArgKind::Other(buffer, "".to_owned()), })) }; self.result.push(ParsedMacroArg { kind: MacroArgKind::Repeat(delim, inner, another, self.last_tok.clone()), }); Some(()) } fn update_buffer(&mut self, t: &Token) { if self.buf.is_empty() { self.start_tok = t.clone(); } else { let needs_space = match next_space(&self.last_tok.kind) { SpaceState::Ident => ident_like(t), SpaceState::Punctuation => !ident_like(t), SpaceState::Always => true, SpaceState::Never => false, }; if force_space_before(&t.kind) || needs_space { self.buf.push(' '); } } self.buf.push_str(&pprust::token_to_string(t)); } fn need_space_prefix(&self) -> bool { if self.result.is_empty() { return false; } let last_arg = self.result.last().unwrap(); if let MacroArgKind::MetaVariable(..) = last_arg.kind { if ident_like(&self.start_tok) { return true; } if self.start_tok.kind == TokenKind::Colon { return true; } } if force_space_before(&self.start_tok.kind) { return true; } false } /// Returns a collection of parsed macro def's arguments. fn parse(mut self, tokens: TokenStream) -> Option<Vec<ParsedMacroArg>> { let mut iter = tokens.trees(); while let Some(tok) = iter.next() { match tok { TokenTree::Token(Token { kind: TokenKind::Dollar, span, }) => { // We always want to add a separator before meta variables. if !self.buf.is_empty() { self.add_separator(); } // Start keeping the name of this metavariable in the buffer. self.is_meta_var = true; self.start_tok = Token { kind: TokenKind::Dollar, span, }; } TokenTree::Token(Token { kind: TokenKind::Colon, .. }) if self.is_meta_var => { self.add_meta_variable(&mut iter)?; } TokenTree::Token(ref t) => self.update_buffer(t), TokenTree::Delimited(_delimited_span, delimited, ref tts) => { if !self.buf.is_empty() { if next_space(&self.last_tok.kind) == SpaceState::Always { self.add_separator(); } else { self.add_other(); } } // Parse the stuff inside delimiters. let parser = MacroArgParser::new(); let delimited_arg = parser.parse(tts.clone())?; if self.is_meta_var { self.add_repeat(delimited_arg, delimited, &mut iter)?; self.is_meta_var = false; } else { self.add_delimited(delimited_arg, delimited); } } } self.set_last_tok(&tok); } // We are left with some stuff in the buffer. Since there is nothing // left to separate, add this as `Other`. if !self.buf.is_empty() { self.add_other(); } Some(self.result) } } fn wrap_macro_args( context: &RewriteContext<'_>, args: &[ParsedMacroArg], shape: Shape, ) -> Option<String> { wrap_macro_args_inner(context, args, shape, false) .or_else(|| wrap_macro_args_inner(context, args, shape, true)) } fn wrap_macro_args_inner( context: &RewriteContext<'_>, args: &[ParsedMacroArg], shape: Shape, use_multiple_lines: bool, ) -> Option<String> { let mut result = String::with_capacity(128); let mut iter = args.iter().peekable(); let indent_str = shape.indent.to_string_with_newline(context.config); while let Some(arg) = iter.next() { result.push_str(&arg.rewrite(context, shape, use_multiple_lines)?); if use_multiple_lines && (arg.kind.ends_with_space() || iter.peek().map_or(false, |a| a.kind.has_meta_var())) { if arg.kind.ends_with_space() { result.pop(); } result.push_str(&indent_str); } else if let Some(next_arg) = iter.peek() { let space_before_dollar = !arg.kind.ends_with_space() && next_arg.kind.starts_with_dollar(); let space_before_brace = next_arg.kind.starts_with_brace(); if space_before_dollar || space_before_brace { result.push(' '); } } } if !use_multiple_lines && result.len() >= shape.width { None } else { Some(result) } } // This is a bit sketchy. The token rules probably need tweaking, but it works // for some common cases. I hope the basic logic is sufficient. Note that the // meaning of some tokens is a bit different here from usual Rust, e.g., `*` // and `(`/`)` have special meaning. // // We always try and format on one line. // FIXME: Use multi-line when every thing does not fit on one line. fn format_macro_args( context: &RewriteContext<'_>, token_stream: TokenStream, shape: Shape, ) -> Option<String> { if !context.config.format_macro_matchers() { let span = span_for_token_stream(&token_stream); return Some(match span { Some(span) => context.snippet(span).to_owned(), None => String::new(), }); } let parsed_args = MacroArgParser::new().parse(token_stream)?; wrap_macro_args(context, &parsed_args, shape) } fn span_for_token_stream(token_stream: &TokenStream) -> Option<Span> { token_stream.trees().next().map(|tt| tt.span()) } // We should insert a space if the next token is a: #[derive(Copy, Clone, PartialEq)] enum SpaceState { Never, Punctuation, Ident, // Or ident/literal-like thing. Always, } fn force_space_before(tok: &TokenKind) -> bool { debug!("tok: force_space_before {:?}", tok); match tok { TokenKind::Eq | TokenKind::Lt | TokenKind::Le | TokenKind::EqEq | TokenKind::Ne | TokenKind::Ge | TokenKind::Gt | TokenKind::AndAnd | TokenKind::OrOr | TokenKind::Not | TokenKind::Tilde | TokenKind::BinOpEq(_) | TokenKind::At | TokenKind::RArrow | TokenKind::LArrow | TokenKind::FatArrow | TokenKind::BinOp(_) | TokenKind::Pound | TokenKind::Dollar => true, _ => false, } } fn ident_like(tok: &Token) -> bool { matches!( tok.kind, TokenKind::Ident(..) | TokenKind::Literal(..) | TokenKind::Lifetime(_) ) } fn next_space(tok: &TokenKind) -> SpaceState { debug!("next_space: {:?}", tok); match tok { TokenKind::Not | TokenKind::BinOp(BinOpToken::And) | TokenKind::Tilde | TokenKind::At | TokenKind::Comma | TokenKind::Dot | TokenKind::DotDot | TokenKind::DotDotDot | TokenKind::DotDotEq | TokenKind::Question => SpaceState::Punctuation, TokenKind::ModSep | TokenKind::Pound | TokenKind::Dollar | TokenKind::OpenDelim(_) | TokenKind::CloseDelim(_) => SpaceState::Never, TokenKind::Literal(..) | TokenKind::Ident(..) | TokenKind::Lifetime(_) => SpaceState::Ident, _ => SpaceState::Always, } } /// Tries to convert a macro use into a short hand try expression. Returns `None` /// when the macro is not an instance of `try!` (or parsing the inner expression /// failed). pub(crate) fn convert_try_mac( mac: &ast::MacCall, context: &RewriteContext<'_>, ) -> Option<ast::Expr> { let path = &pprust::path_to_string(&mac.path); if path == "try" || path == "r#try" { let ts = mac.args.inner_tokens(); let mut parser = build_parser(context, ts.trees()); Some(ast::Expr { id: ast::NodeId::root(), // dummy value kind: ast::ExprKind::Try(parser.parse_expr().ok()?), span: mac.span(), // incorrect span, but shouldn't matter too much attrs: ast::AttrVec::new(), tokens: None, }) } else { None } } pub(crate) fn macro_style(mac: &ast::MacCall, context: &RewriteContext<'_>) -> DelimToken { let snippet = context.snippet(mac.span()); let paren_pos = snippet.find_uncommented("(").unwrap_or(usize::max_value()); let bracket_pos = snippet.find_uncommented("[").unwrap_or(usize::max_value()); let brace_pos = snippet.find_uncommented("{").unwrap_or(usize::max_value()); if paren_pos < bracket_pos && paren_pos < brace_pos { DelimToken::Paren } else if bracket_pos < brace_pos { DelimToken::Bracket } else { DelimToken::Brace } } // A very simple parser that just parses a macros 2.0 definition into its branches. // Currently we do not attempt to parse any further than that. #[derive(new)] struct MacroParser { toks: Cursor, } impl MacroParser { // (`(` ... `)` `=>` `{` ... `}`)* fn parse(&mut self) -> Option<Macro> { let mut branches = vec![]; while self.toks.look_ahead(1).is_some() { branches.push(self.parse_branch()?); } Some(Macro { branches }) } // `(` ... `)` `=>` `{` ... `}` fn parse_branch(&mut self) -> Option<MacroBranch> { let tok = self.toks.next()?; let (lo, args_paren_kind) = match tok { TokenTree::Token(..) => return None, TokenTree::Delimited(delimited_span, d, _) => (delimited_span.open.lo(), d), }; let args = TokenStream::new(vec![(tok, Spacing::Joint)]); match self.toks.next()? { TokenTree::Token(Token { kind: TokenKind::FatArrow, .. }) => {} _ => return None, } let (mut hi, body, whole_body) = match self.toks.next()? { TokenTree::Token(..) => return None, TokenTree::Delimited(delimited_span, ..) => { let data = delimited_span.entire().data(); ( data.hi, Span::new( data.lo + BytePos(1), data.hi - BytePos(1), data.ctxt, data.parent, ), delimited_span.entire(), ) } }; if let Some(TokenTree::Token(Token { kind: TokenKind::Semi, span, })) = self.toks.look_ahead(0) { hi = span.hi(); self.toks.next(); } Some(MacroBranch { span: mk_sp(lo, hi), args_paren_kind, args, body, whole_body, }) } } // A parsed macros 2.0 macro definition. struct Macro { branches: Vec<MacroBranch>, } // FIXME: it would be more efficient to use references to the token streams // rather than clone them, if we can make the borrowing work out. struct MacroBranch { span: Span, args_paren_kind: DelimToken, args: TokenStream, body: Span, whole_body: Span, } impl MacroBranch { fn rewrite( &self, context: &RewriteContext<'_>, shape: Shape, multi_branch_style: bool, ) -> Option<String> { // Only attempt to format function-like macros. if self.args_paren_kind != DelimToken::Paren { // FIXME(#1539): implement for non-sugared macros. return None; } // 5 = " => {" let mut result = format_macro_args(context, self.args.clone(), shape.sub_width(5)?)?; if multi_branch_style { result += " =>"; } if !context.config.format_macro_bodies() { result += " "; result += context.snippet(self.whole_body); return Some(result); } // The macro body is the most interesting part. It might end up as various // AST nodes, but also has special variables (e.g, `$foo`) which can't be // parsed as regular Rust code (and note that these can be escaped using // `$$`). We'll try and format like an AST node, but we'll substitute // variables for new names with the same length first. let old_body = context.snippet(self.body).trim(); let (body_str, substs) = replace_names(old_body)?; let has_block_body = old_body.starts_with('{'); let mut config = context.config.clone(); config.set().hide_parse_errors(true); result += " {"; let body_indent = if has_block_body { shape.indent } else { shape.indent.block_indent(&config) }; let new_width = config.max_width() - body_indent.width(); config.set().max_width(new_width); // First try to format as items, then as statements. let new_body_snippet = match crate::format_snippet(&body_str, &config, true) { Some(new_body) => new_body, None => { let new_width = new_width + config.tab_spaces(); config.set().max_width(new_width); match crate::format_code_block(&body_str, &config, true) { Some(new_body) => new_body, None => return None, } } }; let new_body = wrap_str( new_body_snippet.snippet.to_string(), config.max_width(), shape, )?; // Indent the body since it is in a block. let indent_str = body_indent.to_string(&config); let mut new_body = LineClasses::new(new_body.trim_end()) .enumerate() .fold( (String::new(), true), |(mut s, need_indent), (i, (kind, ref l))| { if !is_empty_line(l) && need_indent && !new_body_snippet.is_line_non_formatted(i + 1) { s += &indent_str; } (s + l + "\n", indent_next_line(kind, l, &config)) }, ) .0; // Undo our replacement of macro variables. // FIXME: this could be *much* more efficient. for (old, new) in &substs { if old_body.contains(new) { debug!("rewrite_macro_def: bailing matching variable: `{}`", new); return None; } new_body = new_body.replace(new, old); } if has_block_body { result += new_body.trim(); } else if !new_body.is_empty() { result += "\n"; result += &new_body; result += &shape.indent.to_string(&config); } result += "}"; Some(result) } } /// Format `lazy_static!` from <https://crates.io/crates/lazy_static>. /// /// # Expected syntax /// /// ```text /// lazy_static! { /// [pub] static ref NAME_1: TYPE_1 = EXPR_1; /// [pub] static ref NAME_2: TYPE_2 = EXPR_2; /// ... /// [pub] static ref NAME_N: TYPE_N = EXPR_N; /// } /// ``` fn format_lazy_static( context: &RewriteContext<'_>, shape: Shape, ts: &TokenStream, ) -> Option<String> { let mut result = String::with_capacity(1024); let mut parser = build_parser(context, ts.trees()); let nested_shape = shape .block_indent(context.config.tab_spaces()) .with_max_width(context.config); result.push_str("lazy_static! {"); result.push_str(&nested_shape.indent.to_string_with_newline(context.config)); macro_rules! parse_or { ($method:ident $(,)* $($arg:expr),* $(,)*) => { match parser.$method($($arg,)*) { Ok(val) => { if parser.sess.span_diagnostic.has_errors() { parser.sess.span_diagnostic.reset_err_count(); return None; } else { val } } Err(mut err) => { err.cancel(); parser.sess.span_diagnostic.reset_err_count(); return None; } } } } while parser.token.kind != TokenKind::Eof { // Parse a `lazy_static!` item. let vis = crate::utils::format_visibility( context, &parse_or!(parse_visibility, rustc_parse::parser::FollowedByType::No), ); parser.eat_keyword(kw::Static); parser.eat_keyword(kw::Ref); let id = parse_or!(parse_ident); parser.eat(&TokenKind::Colon); let ty = parse_or!(parse_ty); parser.eat(&TokenKind::Eq); let expr = parse_or!(parse_expr); parser.eat(&TokenKind::Semi); // Rewrite as a static item. let mut stmt = String::with_capacity(128); stmt.push_str(&format!( "{}static ref {}: {} =", vis, id, ty.rewrite(context, nested_shape)? )); result.push_str(&crate::expr::rewrite_assign_rhs( context, stmt, &*expr, nested_shape.sub_width(1)?, )?); result.push(';'); if parser.token.kind != TokenKind::Eof { result.push_str(&nested_shape.indent.to_string_with_newline(context.config)); } } result.push_str(&shape.indent.to_string_with_newline(context.config)); result.push('}'); Some(result) } fn rewrite_macro_with_items( context: &RewriteContext<'_>, items: &[MacroArg], macro_name: &str, shape: Shape, style: DelimToken, position: MacroPosition, span: Span, ) -> Option<String> { let (opener, closer) = match style { DelimToken::Paren => ("(", ")"), DelimToken::Bracket => ("[", "]"), DelimToken::Brace => (" {", "}"), _ => return None, }; let trailing_semicolon = match style { DelimToken::Paren | DelimToken::Bracket if position == MacroPosition::Item => ";", _ => "", }; let mut visitor = FmtVisitor::from_context(context); visitor.block_indent = shape.indent.block_indent(context.config); visitor.last_pos = context.snippet_provider.span_after(span, opener.trim()); for item in items { let item = match item { MacroArg::Item(item) => item, _ => return None, }; visitor.visit_item(item); } let mut result = String::with_capacity(256); result.push_str(macro_name); result.push_str(opener); result.push_str(&visitor.block_indent.to_string_with_newline(context.config)); result.push_str(visitor.buffer.trim()); result.push_str(&shape.indent.to_string_with_newline(context.config)); result.push_str(closer); result.push_str(trailing_semicolon); Some(result) } const RUST_KW: [Symbol; 59] = [ kw::PathRoot, kw::DollarCrate, kw::Underscore, kw::As, kw::Box, kw::Break, kw::Const, kw::Continue, kw::Crate, kw::Else, kw::Enum, kw::Extern, kw::False, kw::Fn, kw::For, kw::If, kw::Impl, kw::In, kw::Let, kw::Loop, kw::Match, kw::Mod, kw::Move, kw::Mut, kw::Pub, kw::Ref, kw::Return, kw::SelfLower, kw::SelfUpper, kw::Static, kw::Struct, kw::Super, kw::Trait, kw::True, kw::Type, kw::Unsafe, kw::Use, kw::Where, kw::While, kw::Abstract, kw::Become, kw::Do, kw::Final, kw::Macro, kw::Override, kw::Priv, kw::Typeof, kw::Unsized, kw::Virtual, kw::Yield, kw::Dyn, kw::Async, kw::Try, kw::UnderscoreLifetime, kw::StaticLifetime, kw::Auto, kw::Catch, kw::Default, kw::Union, ];
31.699121
100
0.528208
e976a70613a81f68ca57817f3f0b80713171703f
5,872
use cgmath::Vector3; use num::{FromPrimitive, ToPrimitive}; use std::{any::type_name, fmt, marker::PhantomData}; pub const NINTENDO_VENDOR_ID: u16 = 1406; pub const JOYCON_L_BT: u16 = 0x2006; pub const JOYCON_R_BT: u16 = 0x2007; pub const PRO_CONTROLLER: u16 = 0x2009; pub const JOYCON_CHARGING_GRIP: u16 = 0x200e; pub const HID_IDS: &[u16] = &[ JOYCON_L_BT, JOYCON_R_BT, PRO_CONTROLLER, JOYCON_CHARGING_GRIP, ]; #[repr(u8)] #[derive(Copy, Clone, Debug, FromPrimitive, ToPrimitive, PartialEq, Eq)] pub enum InputReportId { Normal = 0x3F, StandardAndSubcmd = 0x21, MCUFwUpdate = 0x23, StandardFull = 0x30, StandardFullMCU = 0x31, // 0x32 not used // 0x33 not used } // All unused values are a Nop #[repr(u8)] #[derive(Copy, Clone, Debug, FromPrimitive, ToPrimitive, PartialEq, Eq)] pub enum SubcommandId { GetOnlyControllerState = 0x00, BluetoothManualPairing = 0x01, RequestDeviceInfo = 0x02, SetInputReportMode = 0x03, GetTriggerButtonsElapsedTime = 0x04, SetShipmentMode = 0x08, SPIRead = 0x10, SPIWrite = 0x11, SetMCUConf = 0x21, SetMCUState = 0x22, SetUnknownData = 0x24, SetPlayerLights = 0x30, SetHomeLight = 0x38, SetIMUMode = 0x40, SetIMUSens = 0x41, EnableVibration = 0x48, // arg [4,0,0,2], ret [0,8,0,0,0,0,0,44] // arg [4,4,5,2], ret [0,8,0,0,0,0,200] // arg [4,4,50,2], ret [0,8,0,0,0,0,5,0,0,14] // arg [4,4,10,2], ret [0,20,0,0,0,0,244,22,0,0,230,5,0,0,243,11,0,0,234,12, 0, 0] // get ringcon calibration: arg [4,4,26,2] // ret [0,20,0,0,0,0] + [135, 8, 28, 0, 48, 247, 243, 0, 44, 12, 224] // write ringcon calibration: arg [20,4,26,1,16] + [135, 8, 28, 0, 48, 247, 243, 0, 44, 12, 224] // ret [0, 4] // get number steps offline ringcon: arg [4,4,49,2], ret [0,8,0,0,0,0,nb_steps, 0,0, 127|143] // reset number steps offline ringcon: arg [8,4,49,1,4], ret [0,4] // Possibly accessory interaction like ringcon MaybeAccessory = 0x58, // Always [] arg, [0, 32] return Unknown0x59 = 0x59, // Always [4, 1, 1, 2] arg, [] return Unknown0x5a = 0x5a, // Always [] arg, [] return Unknown0x5b = 0x5b, // Always variable arg, [] return Unknown0x5c = 0x5c, } #[derive(Copy, Clone, Default, Eq, PartialEq)] pub struct U16LE([u8; 2]); impl From<u16> for U16LE { fn from(u: u16) -> Self { U16LE(u.to_le_bytes()) } } impl From<U16LE> for u16 { fn from(u: U16LE) -> u16 { u16::from_le_bytes(u.0) } } impl fmt::Debug for U16LE { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_fmt(format_args!("0x{:x}", u16::from(*self))) } } impl fmt::Display for U16LE { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { u16::from(*self).fmt(f) } } #[derive(Copy, Clone, Default, Eq, PartialEq)] pub struct I16LE(pub [u8; 2]); impl From<i16> for I16LE { fn from(u: i16) -> I16LE { I16LE(u.to_le_bytes()) } } impl From<I16LE> for i16 { fn from(u: I16LE) -> i16 { i16::from_le_bytes(u.0) } } impl fmt::Debug for I16LE { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_fmt(format_args!("0x{:x}", i16::from(*self))) } } #[derive(Copy, Clone, Default, Eq, PartialEq)] pub struct U32LE([u8; 4]); impl From<u32> for U32LE { fn from(u: u32) -> Self { U32LE(u.to_le_bytes()) } } impl From<U32LE> for u32 { fn from(u: U32LE) -> u32 { u32::from_le_bytes(u.0) } } impl fmt::Debug for U32LE { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_fmt(format_args!("0x{:x}", u32::from(*self))) } } #[cfg(test)] pub(crate) fn offset_of<A, B>(a: &A, b: &B) -> usize { b as *const _ as usize - a as *const _ as usize } pub fn vector_from_raw(raw: [I16LE; 3]) -> Vector3<f64> { Vector3::new( i16::from(raw[0]) as f64, i16::from(raw[1]) as f64, i16::from(raw[2]) as f64, ) } pub fn raw_from_vector(v: Vector3<f64>) -> [I16LE; 3] { [ (v.x as i16).into(), (v.y as i16).into(), (v.z as i16).into(), ] } #[repr(transparent)] #[derive(Copy, Clone, Default, PartialEq, Eq)] pub struct RawId<Id>(u8, PhantomData<Id>); impl<Id> RawId<Id> { pub fn new(id: u8) -> Self { RawId(id, PhantomData) } } impl<Id: FromPrimitive> RawId<Id> { pub fn try_into(self) -> Option<Id> { Id::from_u8(self.0) } } impl<Id: ToPrimitive> From<Id> for RawId<Id> { fn from(id: Id) -> Self { RawId(id.to_u8().expect("always one byte"), PhantomData) } } impl<Id: fmt::Debug + FromPrimitive + Copy> fmt::Debug for RawId<Id> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(id) = self.try_into() { write!(f, "{:?}", id) } else { f.debug_tuple(&format!("RawId<{}>", type_name::<Id>())) .field(&format!("0x{:x}", self.0)) .finish() } } } impl<Id: fmt::Display + FromPrimitive + Copy> fmt::Display for RawId<Id> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Some(id) = self.try_into() { write!(f, "{}", id) } else { f.debug_tuple("RawId") .field(&format!("0x{:x}", self.0)) .finish() } } } impl<Id: FromPrimitive + PartialEq + Copy> PartialEq<Id> for RawId<Id> { fn eq(&self, other: &Id) -> bool { self.try_into().map(|x| x == *other).unwrap_or(false) } } #[derive(Debug, Clone, Copy, FromPrimitive, ToPrimitive)] pub enum Bool { False = 0, True = 1, } impl From<bool> for Bool { fn from(b: bool) -> Self { match b { false => Bool::False, true => Bool::True, } } }
25.530435
100
0.565565
b92a8e494c388b02c63ce8bef0fed326a3fc836a
966
use std::fmt; pub struct Route<'r> { pub path: Vec<&'r str>, } #[derive(Debug, Clone)] pub struct MalformedRouteError(String); impl fmt::Display for MalformedRouteError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "This route is malformed: {}", self.0) } } impl std::error::Error for MalformedRouteError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { // Generic error, underlying cause isn't tracked. None } } #[allow(dead_code)] pub fn new_route(p: &str) -> Result<Route, MalformedRouteError> { //TODO error type if p.len() < 1 { return Err(MalformedRouteError(p.to_owned())); } if !p.starts_with("/") { return Err(MalformedRouteError(p.to_owned())); } if p.ends_with("/") { //maybe just trim? Not sure return Err(MalformedRouteError(p.to_owned())); } Ok(Route { path: p.split("/").collect(), }) }
24.15
68
0.593168
f73956ceee0d562e623dfea07d7979f8c3b92f6f
15,415
use std::fmt::{Debug, Display}; use std::io; use std::str::FromStr; use console::Term; use theme::{get_default_theme, TermThemeRenderer, Theme}; use validate::Validator; /// Renders a simple confirmation prompt. /// /// ## Example usage /// /// ```rust,no_run /// # fn test() -> Result<(), Box<std::error::Error>> { /// use dialoguer::Confirmation; /// /// if Confirmation::new().with_text("Do you want to continue?").interact()? { /// println!("Looks like you want to continue"); /// } else { /// println!("nevermind then :("); /// } /// # Ok(()) } fn main() { test().unwrap(); } /// ``` pub struct Confirmation<'a> { text: String, default: bool, show_default: bool, theme: &'a dyn Theme, } /// Renders a confirmation prompt with several options. /// /// ## Example usage /// /// ```rust,no_run /// # fn test() -> Result<(), Box<std::error::Error>> { /// use dialoguer::Confirmation; /// /// let rv = KeyPrompt::with_theme(&ColoredTheme::default()) /// .with_text("Execute or preview?") /// .items(&['y', 'n', 'p']) /// .interact() /// .unwrap(); /// if rv == 'y' { /// println!("Looks like you want to continue"); /// } else { /// println!("nevermind then :("); /// } /// # Ok(()) } fn main() { test().unwrap(); } /// ``` pub struct KeyPrompt<'a> { text: String, default: usize, items: Vec<char>, show_default: bool, theme: &'a dyn Theme, } /// Renders a simple input prompt. /// /// ## Example usage /// /// ```rust,no_run /// # fn test() -> Result<(), Box<std::error::Error>> { /// use dialoguer::Input; /// /// let name = Input::<String>::new().with_prompt("Your name").interact()?; /// println!("Name: {}", name); /// # Ok(()) } fn main() { test().unwrap(); } /// ``` pub struct Input<'a, T> { prompt: String, default: Option<T>, show_default: bool, initial_text: Option<String>, theme: &'a dyn Theme, permit_empty: bool, validator: Option<Box<dyn Fn(&str) -> Option<String>>>, } /// Renders a password input prompt. /// /// ## Example usage /// /// ```rust,no_run /// # fn test() -> Result<(), Box<std::error::Error>> { /// use dialoguer::PasswordInput; /// /// let password = PasswordInput::new().with_prompt("New Password") /// .with_confirmation("Confirm password", "Passwords mismatching") /// .interact()?; /// println!("Length of the password is: {}", password.len()); /// # Ok(()) } fn main() { test().unwrap(); } /// ``` pub struct PasswordInput<'a> { prompt: String, theme: &'a dyn Theme, allow_empty_password: bool, confirmation_prompt: Option<(String, String)>, } impl<'a> Default for Confirmation<'a> { fn default() -> Confirmation<'a> { Confirmation::new() } } impl<'a> Confirmation<'a> { /// Creates the prompt with a specific text. pub fn new() -> Confirmation<'static> { Confirmation::with_theme(get_default_theme()) } /// Sets a theme other than the default one. pub fn with_theme(theme: &'a dyn Theme) -> Confirmation<'a> { Confirmation { text: "".into(), default: true, show_default: true, theme, } } /// Sets the confirmation text. pub fn with_text(&mut self, text: &str) -> &mut Confirmation<'a> { self.text = text.into(); self } /// Overrides the default. pub fn default(&mut self, val: bool) -> &mut Confirmation<'a> { self.default = val; self } /// Disables or enables the default value display. /// /// The default is to append `[y/n]` to the prompt to tell the /// user which keys to press. This also renders the default choice /// in uppercase. The default is selected on enter. pub fn show_default(&mut self, val: bool) -> &mut Confirmation<'a> { self.show_default = val; self } /// Enables user interaction and returns the result. /// /// If the user confirms the result is `true`, `false` otherwise. /// The dialog is rendered on stderr. pub fn interact(&self) -> io::Result<bool> { self.interact_on(&Term::stderr()) } /// Like `interact` but allows a specific terminal to be set. pub fn interact_on(&self, term: &Term) -> io::Result<bool> { let mut render = TermThemeRenderer::new(term, self.theme); render.confirmation_prompt( &self.text, if self.show_default { Some(self.default) } else { None }, )?; loop { let input = term.read_char()?; let rv = match input { 'y' | 'Y' => true, 'n' | 'N' => false, '\n' | '\r' => self.default, _ => { continue; } }; term.clear_line()?; render.confirmation_prompt_selection(&self.text, rv)?; return Ok(rv); } } } impl<'a> Default for KeyPrompt<'a> { fn default() -> KeyPrompt<'a> { KeyPrompt::new() } } impl<'a> KeyPrompt<'a> { /// Creates the prompt with a specific text. pub fn new() -> KeyPrompt<'static> { KeyPrompt::with_theme(get_default_theme()) } /// Sets a theme other than the default one. pub fn with_theme(theme: &'a dyn Theme) -> KeyPrompt<'a> { KeyPrompt { text: "".into(), default: 100, items: vec![], show_default: true, theme, } } /// Sets the KeyPrompt text. pub fn with_text(&mut self, text: &str) -> &mut KeyPrompt<'a> { self.text = text.into(); self } /// Adds multiple items to the selector. pub fn items(&mut self, items: &[char]) -> &mut KeyPrompt<'a> { for item in items { self.items.push(*item); } self } /// Overrides the default. pub fn default(&mut self, val: usize) -> &mut KeyPrompt<'a> { self.default = val; self } /// Disables or enables the default value display. /// /// The default is to append `[y/n]` to the prompt to tell the /// user which keys to press. This also renders the default choice /// in uppercase. The default is selected on enter. pub fn show_default(&mut self, val: bool) -> &mut KeyPrompt<'a> { self.show_default = val; self } /// Enables user interaction and returns the result. /// /// If the user confirms the result is `true`, `false` otherwise. /// The dialog is rendered on stderr. pub fn interact(&self) -> io::Result<char> { self.interact_on(&Term::stderr()) } /// Like `interact` but allows a specific terminal to be set. pub fn interact_on(&self, term: &Term) -> io::Result<char> { if self.items.is_empty() { panic!("Expected items to be specified") } let mut render = TermThemeRenderer::new(term, self.theme); render.key_prompt( &self.text, if self.show_default { Some(self.default) } else { None }, &self.items, )?; loop { let input = term.read_char()?.to_ascii_lowercase(); let rv = if input == '\n' || input == '\r' { let c = self.items.get(self.default); match c { Some(c) => c, _ => { continue; } } } else if self.items.contains(&input) { &input } else { continue; }; term.clear_line()?; render.key_prompt_selection(&self.text, *rv)?; return Ok(*rv); } } } impl<'a, T> Default for Input<'a, T> where T: Clone + FromStr + Display, T::Err: Display + Debug, { fn default() -> Input<'a, T> { Input::new() } } impl<'a, T> Input<'a, T> where T: Clone + FromStr + Display, T::Err: Display + Debug, { /// Creates a new input prompt. pub fn new() -> Input<'static, T> { Input::with_theme(get_default_theme()) } /// Creates an input with a specific theme. pub fn with_theme(theme: &'a dyn Theme) -> Input<'a, T> { Input { prompt: "".into(), default: None, show_default: true, initial_text: None, theme, permit_empty: false, validator: None, } } /// Sets the input prompt. pub fn with_prompt(&mut self, prompt: &str) -> &mut Input<'a, T> { self.prompt = prompt.into(); self } /// Sets whether the default can be editable. pub fn with_initial_text(&mut self, val: &str) -> &mut Input<'a, T> { self.initial_text = Some(val.into()); self } /// Sets a default. /// /// Out of the box the prompt does not have a default and will continue /// to display until the user hit enter. If a default is set the user /// can instead accept the default with enter. pub fn default(&mut self, value: Option<T>) -> &mut Input<'a, T> { self.default = value; self } /// Enables or disables an empty input /// /// By default, if there is no default value set for the input, the user must input a non-empty string. pub fn allow_empty(&mut self, val: bool) -> &mut Input<'a, T> { self.permit_empty = val; self } /// Disables or enables the default value display. /// /// The default is to append `[default]` to the prompt to tell the /// user that a default is acceptable. pub fn show_default(&mut self, val: bool) -> &mut Input<'a, T> { self.show_default = val; self } /// Registers a validator. pub fn validate_with<V: Validator + 'static>(&mut self, validator: V) -> &mut Input<'a, T> { let old_validator_func = self.validator.take(); self.validator = Some(Box::new(move |value: &str| -> Option<String> { if let Some(old) = old_validator_func.as_ref() { if let Some(err) = old(value) { return Some(err); } } match validator.validate(value) { Ok(()) => None, Err(err) => Some(err.to_string()), } })); self } /// Enables user interaction and returns the result. /// /// If the user confirms the result is `true`, `false` otherwise. /// The dialog is rendered on stderr. pub fn interact(&self) -> io::Result<T> { self.interact_on(&Term::stderr()) } /// Like `interact` but allows a specific terminal to be set. pub fn interact_on(&self, term: &Term) -> io::Result<T> { let mut render = TermThemeRenderer::new(term, self.theme); loop { let default_string = self.default.as_ref().map(|x| x.to_string()); render.input_prompt( &self.prompt, if self.show_default { default_string.as_deref() } else { None }, )?; let input = if let Some(initial_text) = self.initial_text.as_ref() { term.read_line_initial_text(initial_text)? } else { term.read_line()? }; render.add_line(); term.clear_line()?; if input.is_empty() { render.clear()?; if let Some(ref default) = self.default { render.single_prompt_selection(&self.prompt, &default.to_string())?; return Ok(default.clone()); } else if !self.permit_empty { continue; } } render.clear()?; if let Some(ref validator) = self.validator { if let Some(err) = validator(&input) { render.error(&err)?; continue; } } match input.parse::<T>() { Ok(value) => { render.single_prompt_selection(&self.prompt, &input)?; return Ok(value); } Err(err) => { render.error(&err.to_string())?; continue; } } } } } impl<'a> Default for PasswordInput<'a> { fn default() -> PasswordInput<'a> { PasswordInput::new() } } impl<'a> PasswordInput<'a> { /// Creates a new input prompt. pub fn new() -> PasswordInput<'static> { PasswordInput::with_theme(get_default_theme()) } /// Creates the password input with a specific theme. pub fn with_theme(theme: &'a dyn Theme) -> PasswordInput<'a> { PasswordInput { prompt: "".into(), theme, allow_empty_password: false, confirmation_prompt: None, } } /// Sets the prompt. pub fn with_prompt(&mut self, prompt: &str) -> &mut PasswordInput<'a> { self.prompt = prompt.into(); self } /// Enables confirmation prompting. pub fn with_confirmation( &mut self, prompt: &str, mismatch_err: &str, ) -> &mut PasswordInput<'a> { self.confirmation_prompt = Some((prompt.into(), mismatch_err.into())); self } /// Allows/Disables empty password. /// /// By default this setting is set to false (i.e. password is not empty). pub fn allow_empty_password(&mut self, allow_empty_password: bool) -> &mut PasswordInput<'a> { self.allow_empty_password = allow_empty_password; self } /// Enables user interaction and returns the result. /// /// If the user confirms the result is `true`, `false` otherwise. /// The dialog is rendered on stderr. pub fn interact(&self) -> io::Result<String> { self.interact_on(&Term::stderr()) } /// Like `interact` but allows a specific terminal to be set. pub fn interact_on(&self, term: &Term) -> io::Result<String> { let mut render = TermThemeRenderer::new(term, self.theme); render.set_prompts_reset_height(false); loop { let password = self.prompt_password(&mut render, &self.prompt)?; if let Some((ref prompt, ref err)) = self.confirmation_prompt { let pw2 = self.prompt_password(&mut render, &prompt)?; if password == pw2 { render.clear()?; render.password_prompt_selection(&self.prompt)?; return Ok(password); } render.error(err)?; } else { render.clear()?; render.password_prompt_selection(&self.prompt)?; return Ok(password); } } } fn prompt_password(&self, render: &mut TermThemeRenderer, prompt: &str) -> io::Result<String> { loop { render.password_prompt(prompt)?; let input = render.term().read_secure_line()?; render.add_line(); if !input.is_empty() || self.allow_empty_password { return Ok(input); } } } }
30.048733
107
0.527473
e6e9625c17c7a30e8f30e2790d5871653e283343
142
mod ppm; mod ray; mod vec; pub use ppm::render; pub use ray::Ray; pub use vec::Vec3; pub const WIDTH: u32 = 400; pub const HEIGHT: u32 = 300;
15.777778
28
0.683099
48b5cacc5aaa81469f3fa8966a7bc6dfe4101f0b
70
pub mod client; pub mod clock; pub mod notifications; pub mod server;
14
22
0.771429
8fcd1f7c77b5bda0ff809634a729a627f4f87824
10,103
use crate::component::entry::TxEntry; use crate::pool::TxPool; use ckb_error::{Error, InternalErrorKind}; use ckb_snapshot::Snapshot; use ckb_types::{ core::{ cell::{ resolve_transaction, OverlayCellProvider, ResolvedTransaction, TransactionsProvider, }, Capacity, Cycle, TransactionView, }, packed::Byte32, }; use ckb_verification::{ContextualTransactionVerifier, TransactionVerifier}; use futures::future::Future; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use tokio::prelude::{Async, Poll}; use tokio::sync::lock::Lock; pub struct PreResolveTxsProcess { pub tx_pool: Lock<TxPool>, pub txs: Option<Vec<TransactionView>>, } impl PreResolveTxsProcess { pub fn new(tx_pool: Lock<TxPool>, txs: Vec<TransactionView>) -> PreResolveTxsProcess { PreResolveTxsProcess { tx_pool, txs: Some(txs), } } } type PreResolveTxsItem = ( Byte32, Arc<Snapshot>, Vec<ResolvedTransaction>, Vec<(usize, Capacity, TxStatus)>, ); impl Future for PreResolveTxsProcess { type Item = PreResolveTxsItem; type Error = Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { match self.tx_pool.poll_lock() { Async::Ready(tx_pool) => { let txs = self.txs.take().expect("cannot execute twice"); debug_assert!(!txs.is_empty(), "txs should not be empty!"); let snapshot = tx_pool.cloned_snapshot(); let tip_hash = snapshot.tip_hash(); let mut txs_provider = TransactionsProvider::default(); let resolved = txs .iter() .map(|tx| { let ret = resolve_tx(&tx_pool, &snapshot, &txs_provider, tx.clone()); txs_provider.insert(tx); ret }) .collect::<Result<Vec<(ResolvedTransaction, usize, Capacity, TxStatus)>, _>>( )?; let (rtxs, status) = resolved .into_iter() .map(|(rtx, tx_size, fee, status)| (rtx, (tx_size, fee, status))) .unzip(); Ok(Async::Ready((tip_hash, snapshot, rtxs, status))) } Async::NotReady => Ok(Async::NotReady), } } } pub struct VerifyTxsProcess { pub snapshot: Arc<Snapshot>, pub txs_verify_cache: HashMap<Byte32, Cycle>, pub txs: Option<Vec<ResolvedTransaction>>, } impl VerifyTxsProcess { pub fn new( snapshot: Arc<Snapshot>, txs_verify_cache: HashMap<Byte32, Cycle>, txs: Vec<ResolvedTransaction>, ) -> VerifyTxsProcess { VerifyTxsProcess { snapshot, txs_verify_cache, txs: Some(txs), } } } impl Future for VerifyTxsProcess { type Item = Vec<(ResolvedTransaction, Cycle)>; type Error = Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { let txs = self.txs.take().expect("cannot execute twice"); Ok(Async::Ready(verify_rtxs( &self.snapshot, txs, &self.txs_verify_cache, )?)) } } pub struct SubmitTxsProcess { pub tx_pool: Lock<TxPool>, pub txs: Option<Vec<(ResolvedTransaction, Cycle)>>, pub pre_resolve_tip: Byte32, pub status: Option<Vec<(usize, Capacity, TxStatus)>>, } impl SubmitTxsProcess { pub fn new( tx_pool: Lock<TxPool>, txs: Vec<(ResolvedTransaction, Cycle)>, pre_resolve_tip: Byte32, status: Vec<(usize, Capacity, TxStatus)>, ) -> SubmitTxsProcess { SubmitTxsProcess { tx_pool, pre_resolve_tip, status: Some(status), txs: Some(txs), } } } impl Future for SubmitTxsProcess { type Item = (HashMap<Byte32, Cycle>, Vec<Cycle>); type Error = Error; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { match self.tx_pool.poll_lock() { Async::Ready(mut guard) => { let executor = SubmitTxsExecutor { tx_pool: &mut guard, }; let txs = self.txs.take().expect("cannot execute twice"); let status = self.status.take().expect("cannot execute twice"); Ok(Async::Ready(executor.execute( &self.pre_resolve_tip, txs, status, )?)) } Async::NotReady => Ok(Async::NotReady), } } } pub enum TxStatus { Fresh, Gap, Proposed, } struct SubmitTxsExecutor<'a> { tx_pool: &'a mut TxPool, } impl<'a> SubmitTxsExecutor<'a> { fn execute( self, pre_resolve_tip: &Byte32, txs: Vec<(ResolvedTransaction, Cycle)>, status: Vec<(usize, Capacity, TxStatus)>, ) -> Result<(HashMap<Byte32, Cycle>, Vec<Cycle>), Error> { let snapshot = self.tx_pool.snapshot(); if pre_resolve_tip != &snapshot.tip_hash() { let mut txs_provider = TransactionsProvider::default(); for (tx, _) in &txs { resolve_tx( self.tx_pool, snapshot, &txs_provider, tx.transaction.clone(), )?; txs_provider.insert(&tx.transaction); } } let cache = txs .iter() .map(|(tx, cycles)| (tx.transaction.hash(), *cycles)) .collect(); let cycles_vec = txs.iter().map(|(_, cycles)| *cycles).collect(); for ((rtx, cycles), (tx_size, fee, status)) in txs.into_iter().zip(status.into_iter()) { if self.tx_pool.reach_cycles_limit(cycles) { return Err(InternalErrorKind::TransactionPoolFull.into()); } let related_dep_out_points = rtx.related_dep_out_points(); let entry = TxEntry::new( rtx.transaction, cycles, fee, tx_size, related_dep_out_points, ); if match status { TxStatus::Fresh => self.tx_pool.add_pending(entry), TxStatus::Gap => self.tx_pool.add_gap(entry), TxStatus::Proposed => self.tx_pool.add_proposed(entry), } { self.tx_pool.update_statics_for_add_tx(tx_size, cycles); } } Ok((cache, cycles_vec)) } } fn resolve_tx<'a>( tx_pool: &TxPool, snapshot: &Snapshot, txs_provider: &'a TransactionsProvider<'a>, tx: TransactionView, ) -> Result<(ResolvedTransaction, usize, Capacity, TxStatus), Error> { let tx_size = tx.data().serialized_size_in_block(); if tx_pool.reach_size_limit(tx_size) { return Err(InternalErrorKind::TransactionPoolFull.into()); } let short_id = tx.proposal_short_id(); if snapshot.proposals().contains_proposed(&short_id) { resolve_tx_from_proposed(tx_pool, snapshot, txs_provider, tx).and_then(|rtx| { let fee = tx_pool.calculate_transaction_fee(snapshot, &rtx); fee.map(|fee| (rtx, tx_size, fee, TxStatus::Proposed)) }) } else { resolve_tx_from_pending_and_proposed(tx_pool, snapshot, txs_provider, tx).and_then(|rtx| { let status = if snapshot.proposals().contains_gap(&short_id) { TxStatus::Gap } else { TxStatus::Fresh }; let fee = tx_pool.calculate_transaction_fee(snapshot, &rtx); fee.map(|fee| (rtx, tx_size, fee, status)) }) } } fn resolve_tx_from_proposed<'a>( tx_pool: &TxPool, snapshot: &Snapshot, txs_provider: &'a TransactionsProvider<'a>, tx: TransactionView, ) -> Result<ResolvedTransaction, Error> { let cell_provider = OverlayCellProvider::new(&tx_pool.proposed, snapshot); let provider = OverlayCellProvider::new(txs_provider, &cell_provider); resolve_transaction(tx, &mut HashSet::new(), &provider, snapshot) } fn resolve_tx_from_pending_and_proposed<'a>( tx_pool: &TxPool, snapshot: &Snapshot, txs_provider: &'a TransactionsProvider<'a>, tx: TransactionView, ) -> Result<ResolvedTransaction, Error> { let proposed_provider = OverlayCellProvider::new(&tx_pool.proposed, snapshot); let gap_and_proposed_provider = OverlayCellProvider::new(&tx_pool.gap, &proposed_provider); let pending_and_proposed_provider = OverlayCellProvider::new(&tx_pool.pending, &gap_and_proposed_provider); let provider = OverlayCellProvider::new(txs_provider, &pending_and_proposed_provider); resolve_transaction(tx, &mut HashSet::new(), &provider, snapshot) } fn verify_rtxs( snapshot: &Snapshot, txs: Vec<ResolvedTransaction>, txs_verify_cache: &HashMap<Byte32, Cycle>, ) -> Result<Vec<(ResolvedTransaction, Cycle)>, Error> { let tip_header = snapshot.tip_header(); let tip_number = tip_header.number(); let epoch = tip_header.epoch(); let consensus = snapshot.consensus(); txs.into_iter() .map(|tx| { let tx_hash = tx.transaction.hash(); if let Some(cycles) = txs_verify_cache.get(&tx_hash) { ContextualTransactionVerifier::new( &tx, snapshot, tip_number + 1, epoch, tip_header.hash(), consensus, ) .verify() .map(|_| (tx, *cycles)) } else { TransactionVerifier::new( &tx, snapshot, tip_number + 1, epoch, tip_header.hash(), consensus, snapshot, ) .verify(consensus.max_block_cycles()) .map(|cycles| (tx, cycles)) } }) .collect::<Result<Vec<_>, _>>() }
31.77044
98
0.560824
69a3947df2b5bf4b132aaed334fd8f9231800755
80,252
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A growable list type, written `Vec<T>` but pronounced 'vector.' //! //! Vectors have `O(1)` indexing, push (to the end) and pop (from the end). //! //! # Examples //! //! Explicitly creating a `Vec<T>` with `new()`: //! //! ``` //! let xs: Vec<i32> = Vec::new(); //! ``` //! //! Using the `vec!` macro: //! //! ``` //! let ys: Vec<i32> = vec![]; //! //! let zs = vec![1i32, 2, 3, 4, 5]; //! ``` //! //! Push: //! //! ``` //! let mut xs = vec![1i32, 2]; //! //! xs.push(3); //! ``` //! //! And pop: //! //! ``` //! let mut xs = vec![1i32, 2]; //! //! let two = xs.pop(); //! ``` #![stable] use core::prelude::*; use alloc::boxed::Box; use alloc::heap::{EMPTY, allocate, reallocate, deallocate}; use core::borrow::{Cow, IntoCow}; use core::cmp::max; use core::cmp::{Ordering}; use core::default::Default; use core::fmt; use core::hash::{self, Hash}; use core::iter::{repeat, FromIterator}; use core::marker::{ContravariantLifetime, InvariantType}; use core::mem; use core::nonzero::NonZero; use core::num::{Int, UnsignedInt}; use core::ops::{Index, IndexMut, Deref, Add}; use core::ops; use core::ptr; use core::raw::Slice as RawSlice; use core::uint; /// A growable list type, written `Vec<T>` but pronounced 'vector.' /// /// # Examples /// /// ``` /// let mut vec = Vec::new(); /// vec.push(1i); /// vec.push(2i); /// /// assert_eq!(vec.len(), 2); /// assert_eq!(vec[0], 1); /// /// assert_eq!(vec.pop(), Some(2)); /// assert_eq!(vec.len(), 1); /// /// vec[0] = 7i; /// assert_eq!(vec[0], 7); /// /// vec.push_all(&[1, 2, 3]); /// /// for x in vec.iter() { /// println!("{}", x); /// } /// assert_eq!(vec, vec![7i, 1, 2, 3]); /// ``` /// /// The `vec!` macro is provided to make initialization more convenient: /// /// ``` /// let mut vec = vec![1i, 2i, 3i]; /// vec.push(4); /// assert_eq!(vec, vec![1, 2, 3, 4]); /// ``` /// /// Use a `Vec<T>` as an efficient stack: /// /// ``` /// let mut stack = Vec::new(); /// /// stack.push(1i); /// stack.push(2i); /// stack.push(3i); /// /// loop { /// let top = match stack.pop() { /// None => break, // empty /// Some(x) => x, /// }; /// // Prints 3, 2, 1 /// println!("{}", top); /// } /// ``` /// /// # Capacity and reallocation /// /// The capacity of a vector is the amount of space allocated for any future elements that will be /// added onto the vector. This is not to be confused with the *length* of a vector, which /// specifies the number of actual elements within the vector. If a vector's length exceeds its /// capacity, its capacity will automatically be increased, but its elements will have to be /// reallocated. /// /// For example, a vector with capacity 10 and length 0 would be an empty vector with space for 10 /// more elements. Pushing 10 or fewer elements onto the vector will not change its capacity or /// cause reallocation to occur. However, if the vector's length is increased to 11, it will have /// to reallocate, which can be slow. For this reason, it is recommended to use /// `Vec::with_capacity` whenever possible to specify how big the vector is expected to get. #[unsafe_no_drop_flag] #[stable] pub struct Vec<T> { ptr: NonZero<*mut T>, len: uint, cap: uint, } unsafe impl<T: Send> Send for Vec<T> { } unsafe impl<T: Sync> Sync for Vec<T> { } //////////////////////////////////////////////////////////////////////////////// // Inherent methods //////////////////////////////////////////////////////////////////////////////// impl<T> Vec<T> { /// Constructs a new, empty `Vec<T>`. /// /// The vector will not allocate until elements are pushed onto it. /// /// # Examples /// /// ``` /// let mut vec: Vec<int> = Vec::new(); /// ``` #[inline] #[stable] pub fn new() -> Vec<T> { // We want ptr to never be NULL so instead we set it to some arbitrary // non-null value which is fine since we never call deallocate on the ptr // if cap is 0. The reason for this is because the pointer of a slice // being NULL would break the null pointer optimization for enums. Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: 0 } } /// Constructs a new, empty `Vec<T>` with the specified capacity. /// /// The vector will be able to hold exactly `capacity` elements without reallocating. If /// `capacity` is 0, the vector will not allocate. /// /// It is important to note that this function does not specify the *length* of the returned /// vector, but only the *capacity*. (For an explanation of the difference between length and /// capacity, see the main `Vec<T>` docs above, 'Capacity and reallocation'.) /// /// # Examples /// /// ``` /// let mut vec: Vec<int> = Vec::with_capacity(10); /// /// // The vector contains no items, even though it has capacity for more /// assert_eq!(vec.len(), 0); /// /// // These are all done without reallocating... /// for i in range(0i, 10) { /// vec.push(i); /// } /// /// // ...but this may make the vector reallocate /// vec.push(11); /// ``` #[inline] #[stable] pub fn with_capacity(capacity: uint) -> Vec<T> { if mem::size_of::<T>() == 0 { Vec { ptr: unsafe { NonZero::new(EMPTY as *mut T) }, len: 0, cap: uint::MAX } } else if capacity == 0 { Vec::new() } else { let size = capacity.checked_mul(mem::size_of::<T>()) .expect("capacity overflow"); let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) }; if ptr.is_null() { ::alloc::oom() } Vec { ptr: unsafe { NonZero::new(ptr as *mut T) }, len: 0, cap: capacity } } } /// Creates a `Vec<T>` directly from the raw components of another vector. /// /// This is highly unsafe, due to the number of invariants that aren't checked. /// /// # Examples /// /// ``` /// use std::ptr; /// use std::mem; /// /// fn main() { /// let mut v = vec![1i, 2, 3]; /// /// // Pull out the various important pieces of information about `v` /// let p = v.as_mut_ptr(); /// let len = v.len(); /// let cap = v.capacity(); /// /// unsafe { /// // Cast `v` into the void: no destructor run, so we are in /// // complete control of the allocation to which `p` points. /// mem::forget(v); /// /// // Overwrite memory with 4, 5, 6 /// for i in range(0, len as int) { /// ptr::write(p.offset(i), 4 + i); /// } /// /// // Put everything back together into a Vec /// let rebuilt = Vec::from_raw_parts(p, len, cap); /// assert_eq!(rebuilt, vec![4i, 5i, 6i]); /// } /// } /// ``` #[stable] pub unsafe fn from_raw_parts(ptr: *mut T, length: uint, capacity: uint) -> Vec<T> { Vec { ptr: NonZero::new(ptr), len: length, cap: capacity } } /// Creates a vector by copying the elements from a raw pointer. /// /// This function will copy `elts` contiguous elements starting at `ptr` into a new allocation /// owned by the returned `Vec<T>`. The elements of the buffer are copied into the vector /// without cloning, as if `ptr::read()` were called on them. #[inline] #[unstable = "may be better expressed via composition"] pub unsafe fn from_raw_buf(ptr: *const T, elts: uint) -> Vec<T> { let mut dst = Vec::with_capacity(elts); dst.set_len(elts); ptr::copy_nonoverlapping_memory(dst.as_mut_ptr(), ptr, elts); dst } /// Returns the number of elements the vector can hold without /// reallocating. /// /// # Examples /// /// ``` /// let vec: Vec<int> = Vec::with_capacity(10); /// assert_eq!(vec.capacity(), 10); /// ``` #[inline] #[stable] pub fn capacity(&self) -> uint { self.cap } /// Reserves capacity for at least `additional` more elements to be inserted in the given /// `Vec<T>`. The collection may reserve more space to avoid frequent reallocations. /// /// # Panics /// /// Panics if the new capacity overflows `uint`. /// /// # Examples /// /// ``` /// let mut vec: Vec<int> = vec![1]; /// vec.reserve(10); /// assert!(vec.capacity() >= 11); /// ``` #[stable] pub fn reserve(&mut self, additional: uint) { if self.cap - self.len < additional { let err_msg = "Vec::reserve: `uint` overflow"; let new_cap = self.len.checked_add(additional).expect(err_msg) .checked_next_power_of_two().expect(err_msg); self.grow_capacity(new_cap); } } /// Reserves the minimum capacity for exactly `additional` more elements to /// be inserted in the given `Vec<T>`. Does nothing if the capacity is already /// sufficient. /// /// Note that the allocator may give the collection more space than it /// requests. Therefore capacity can not be relied upon to be precisely /// minimal. Prefer `reserve` if future insertions are expected. /// /// # Panics /// /// Panics if the new capacity overflows `uint`. /// /// # Examples /// /// ``` /// let mut vec: Vec<int> = vec![1]; /// vec.reserve_exact(10); /// assert!(vec.capacity() >= 11); /// ``` #[stable] pub fn reserve_exact(&mut self, additional: uint) { if self.cap - self.len < additional { match self.len.checked_add(additional) { None => panic!("Vec::reserve: `uint` overflow"), Some(new_cap) => self.grow_capacity(new_cap) } } } /// Shrinks the capacity of the vector as much as possible. /// /// It will drop down as close as possible to the length but the allocator /// may still inform the vector that there is space for a few more elements. /// /// # Examples /// /// ``` /// let mut vec: Vec<int> = Vec::with_capacity(10); /// vec.push_all(&[1, 2, 3]); /// assert_eq!(vec.capacity(), 10); /// vec.shrink_to_fit(); /// assert!(vec.capacity() >= 3); /// ``` #[stable] pub fn shrink_to_fit(&mut self) { if mem::size_of::<T>() == 0 { return } if self.len == 0 { if self.cap != 0 { unsafe { dealloc(*self.ptr, self.cap) } self.cap = 0; } } else { unsafe { // Overflow check is unnecessary as the vector is already at // least this large. let ptr = reallocate(*self.ptr as *mut u8, self.cap * mem::size_of::<T>(), self.len * mem::size_of::<T>(), mem::min_align_of::<T>()) as *mut T; if ptr.is_null() { ::alloc::oom() } self.ptr = NonZero::new(ptr); } self.cap = self.len; } } /// Convert the vector into Box<[T]>. /// /// Note that this will drop any excess capacity. Calling this and /// converting back to a vector with `into_vec()` is equivalent to calling /// `shrink_to_fit()`. #[experimental] pub fn into_boxed_slice(mut self) -> Box<[T]> { self.shrink_to_fit(); unsafe { let xs: Box<[T]> = mem::transmute(self.as_mut_slice()); mem::forget(self); xs } } /// Shorten a vector, dropping excess elements. /// /// If `len` is greater than the vector's current length, this has no /// effect. /// /// # Examples /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// vec.truncate(2); /// assert_eq!(vec, vec![1, 2]); /// ``` #[stable] pub fn truncate(&mut self, len: uint) { unsafe { // drop any extra elements while len < self.len { // decrement len before the read(), so a panic on Drop doesn't // re-drop the just-failed value. self.len -= 1; ptr::read(self.get_unchecked(self.len)); } } } /// Returns a mutable slice of the elements of `self`. /// /// # Examples /// /// ``` /// fn foo(slice: &mut [int]) {} /// /// let mut vec = vec![1i, 2]; /// foo(vec.as_mut_slice()); /// ``` #[inline] #[stable] pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] { unsafe { mem::transmute(RawSlice { data: *self.ptr as *const T, len: self.len, }) } } /// Creates a consuming iterator, that is, one that moves each value out of /// the vector (from start to end). The vector cannot be used after calling /// this. /// /// # Examples /// /// ``` /// let v = vec!["a".to_string(), "b".to_string()]; /// for s in v.into_iter() { /// // s has type String, not &String /// println!("{}", s); /// } /// ``` #[inline] #[stable] pub fn into_iter(self) -> IntoIter<T> { unsafe { let ptr = *self.ptr; let cap = self.cap; let begin = ptr as *const T; let end = if mem::size_of::<T>() == 0 { (ptr as uint + self.len()) as *const T } else { ptr.offset(self.len() as int) as *const T }; mem::forget(self); IntoIter { allocation: ptr, cap: cap, ptr: begin, end: end } } } /// Sets the length of a vector. /// /// This will explicitly set the size of the vector, without actually /// modifying its buffers, so it is up to the caller to ensure that the /// vector is actually the specified size. /// /// # Examples /// /// ``` /// let mut v = vec![1u, 2, 3, 4]; /// unsafe { /// v.set_len(1); /// } /// ``` #[inline] #[stable] pub unsafe fn set_len(&mut self, len: uint) { self.len = len; } /// Removes an element from anywhere in the vector and return it, replacing /// it with the last element. /// /// This does not preserve ordering, but is O(1). /// /// # Panics /// /// Panics if `index` is out of bounds. /// /// # Examples /// /// ``` /// let mut v = vec!["foo", "bar", "baz", "qux"]; /// /// assert_eq!(v.swap_remove(1), "bar"); /// assert_eq!(v, vec!["foo", "qux", "baz"]); /// /// assert_eq!(v.swap_remove(0), "foo"); /// assert_eq!(v, vec!["baz", "qux"]); /// ``` #[inline] #[stable] pub fn swap_remove(&mut self, index: uint) -> T { let length = self.len(); self.swap(index, length - 1); self.pop().unwrap() } /// Inserts an element at position `index` within the vector, shifting all /// elements after position `i` one position to the right. /// /// # Panics /// /// Panics if `index` is not between `0` and the vector's length (both /// bounds inclusive). /// /// # Examples /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// vec.insert(1, 4); /// assert_eq!(vec, vec![1, 4, 2, 3]); /// vec.insert(4, 5); /// assert_eq!(vec, vec![1, 4, 2, 3, 5]); /// ``` #[stable] pub fn insert(&mut self, index: uint, element: T) { let len = self.len(); assert!(index <= len); // space for the new element self.reserve(1); unsafe { // infallible // The spot to put the new value { let p = self.as_mut_ptr().offset(index as int); // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) ptr::copy_memory(p.offset(1), &*p, len - index); // Write it in, overwriting the first copy of the `index`th // element. ptr::write(&mut *p, element); } self.set_len(len + 1); } } /// Removes and returns the element at position `index` within the vector, /// shifting all elements after position `index` one position to the left. /// /// # Panics /// /// Panics if `i` is out of bounds. /// /// # Examples /// /// ``` /// let mut v = vec![1i, 2, 3]; /// assert_eq!(v.remove(1), 2); /// assert_eq!(v, vec![1, 3]); /// ``` #[stable] pub fn remove(&mut self, index: uint) -> T { let len = self.len(); assert!(index < len); unsafe { // infallible let ret; { // the place we are taking from. let ptr = self.as_mut_ptr().offset(index as int); // copy it out, unsafely having a copy of the value on // the stack and in the vector at the same time. ret = ptr::read(ptr as *const T); // Shift everything down to fill in that spot. ptr::copy_memory(ptr, &*ptr.offset(1), len - index - 1); } self.set_len(len - 1); ret } } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` such that `f(&e)` returns false. /// This method operates in place and preserves the order of the retained /// elements. /// /// # Examples /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// vec.retain(|&x| x%2 == 0); /// assert_eq!(vec, vec![2, 4]); /// ``` #[stable] pub fn retain<F>(&mut self, mut f: F) where F: FnMut(&T) -> bool { let len = self.len(); let mut del = 0u; { let v = self.as_mut_slice(); for i in range(0u, len) { if !f(&v[i]) { del += 1; } else if del > 0 { v.swap(i-del, i); } } } if del > 0 { self.truncate(len - del); } } /// Appends an element to the back of a collection. /// /// # Panics /// /// Panics if the number of elements in the vector overflows a `uint`. /// /// # Examples /// /// ```rust /// let mut vec = vec!(1i, 2); /// vec.push(3); /// assert_eq!(vec, vec!(1, 2, 3)); /// ``` #[inline] #[stable] pub fn push(&mut self, value: T) { if mem::size_of::<T>() == 0 { // zero-size types consume no memory, so we can't rely on the // address space running out self.len = self.len.checked_add(1).expect("length overflow"); unsafe { mem::forget(value); } return } if self.len == self.cap { let old_size = self.cap * mem::size_of::<T>(); let size = max(old_size, 2 * mem::size_of::<T>()) * 2; if old_size > size { panic!("capacity overflow") } unsafe { let ptr = alloc_or_realloc(*self.ptr, old_size, size); if ptr.is_null() { ::alloc::oom() } self.ptr = NonZero::new(ptr); } self.cap = max(self.cap, 2) * 2; } unsafe { let end = (*self.ptr).offset(self.len as int); ptr::write(&mut *end, value); self.len += 1; } } /// Removes the last element from a vector and returns it, or `None` if it is empty. /// /// # Examples /// /// ```rust /// let mut vec = vec![1i, 2, 3]; /// assert_eq!(vec.pop(), Some(3)); /// assert_eq!(vec, vec![1, 2]); /// ``` #[inline] #[stable] pub fn pop(&mut self) -> Option<T> { if self.len == 0 { None } else { unsafe { self.len -= 1; Some(ptr::read(self.get_unchecked(self.len()))) } } } /// Creates a draining iterator that clears the `Vec` and iterates over /// the removed items from start to end. /// /// # Examples /// /// ``` /// let mut v = vec!["a".to_string(), "b".to_string()]; /// for s in v.drain() { /// // s has type String, not &String /// println!("{}", s); /// } /// assert!(v.is_empty()); /// ``` #[inline] #[unstable = "matches collection reform specification, waiting for dust to settle"] pub fn drain<'a>(&'a mut self) -> Drain<'a, T> { unsafe { let begin = *self.ptr as *const T; let end = if mem::size_of::<T>() == 0 { (*self.ptr as uint + self.len()) as *const T } else { (*self.ptr).offset(self.len() as int) as *const T }; self.set_len(0); Drain { ptr: begin, end: end, marker: ContravariantLifetime, } } } /// Clears the vector, removing all values. /// /// # Examples /// /// ``` /// let mut v = vec![1i, 2, 3]; /// /// v.clear(); /// /// assert!(v.is_empty()); /// ``` #[inline] #[stable] pub fn clear(&mut self) { self.truncate(0) } /// Returns the number of elements in the vector. /// /// # Examples /// /// ``` /// let a = vec![1i, 2, 3]; /// assert_eq!(a.len(), 3); /// ``` #[inline] #[stable] pub fn len(&self) -> uint { self.len } /// Returns `true` if the vector contains no elements. /// /// # Examples /// /// ``` /// let mut v = Vec::new(); /// assert!(v.is_empty()); /// /// v.push(1i); /// assert!(!v.is_empty()); /// ``` #[stable] pub fn is_empty(&self) -> bool { self.len() == 0 } /// Converts a `Vec<T>` to a `Vec<U>` where `T` and `U` have the same /// size and in case they are not zero-sized the same minimal alignment. /// /// # Panics /// /// Panics if `T` and `U` have differing sizes or are not zero-sized and /// have differing minimal alignments. /// /// # Examples /// /// ``` /// let v = vec![0u, 1, 2]; /// let w = v.map_in_place(|i| i + 3); /// assert_eq!(w.as_slice(), [3, 4, 5].as_slice()); /// /// #[derive(PartialEq, Show)] /// struct Newtype(u8); /// let bytes = vec![0x11, 0x22]; /// let newtyped_bytes = bytes.map_in_place(|x| Newtype(x)); /// assert_eq!(newtyped_bytes.as_slice(), [Newtype(0x11), Newtype(0x22)].as_slice()); /// ``` #[experimental = "API may change to provide stronger guarantees"] pub fn map_in_place<U, F>(self, mut f: F) -> Vec<U> where F: FnMut(T) -> U { // FIXME: Assert statically that the types `T` and `U` have the same // size. assert!(mem::size_of::<T>() == mem::size_of::<U>()); let mut vec = self; if mem::size_of::<T>() != 0 { // FIXME: Assert statically that the types `T` and `U` have the // same minimal alignment in case they are not zero-sized. // These asserts are necessary because the `min_align_of` of the // types are passed to the allocator by `Vec`. assert!(mem::min_align_of::<T>() == mem::min_align_of::<U>()); // This `as int` cast is safe, because the size of the elements of the // vector is not 0, and: // // 1) If the size of the elements in the vector is 1, the `int` may // overflow, but it has the correct bit pattern so that the // `.offset()` function will work. // // Example: // Address space 0x0-0xF. // `u8` array at: 0x1. // Size of `u8` array: 0x8. // Calculated `offset`: -0x8. // After `array.offset(offset)`: 0x9. // (0x1 + 0x8 = 0x1 - 0x8) // // 2) If the size of the elements in the vector is >1, the `uint` -> // `int` conversion can't overflow. let offset = vec.len() as int; let start = vec.as_mut_ptr(); let mut pv = PartialVecNonZeroSized { vec: vec, start_t: start, // This points inside the vector, as the vector has length // `offset`. end_t: unsafe { start.offset(offset) }, start_u: start as *mut U, end_u: start as *mut U, }; // start_t // start_u // | // +-+-+-+-+-+-+ // |T|T|T|...|T| // +-+-+-+-+-+-+ // | | // end_u end_t while pv.end_u as *mut T != pv.end_t { unsafe { // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|T|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t let t = ptr::read(pv.start_t as *const T); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|X|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We must not panic here, one cell is marked as `T` // although it is not `T`. pv.start_t = pv.start_t.offset(1); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|X|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We may panic again. // The function given by the user might panic. let u = f(t); ptr::write(pv.end_u, u); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|U|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We should not panic here, because that would leak the `U` // pointed to by `end_u`. pv.end_u = pv.end_u.offset(1); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|U|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We may panic again. } } // start_u start_t // | | // +-+-+-+-+-+-+ // |U|...|U|U|U| // +-+-+-+-+-+-+ // | // end_t // end_u // Extract `vec` and prevent the destructor of // `PartialVecNonZeroSized` from running. Note that none of the // function calls can panic, thus no resources can be leaked (as the // `vec` member of `PartialVec` is the only one which holds // allocations -- and it is returned from this function. None of // this can panic. unsafe { let vec_len = pv.vec.len(); let vec_cap = pv.vec.capacity(); let vec_ptr = pv.vec.as_mut_ptr() as *mut U; mem::forget(pv); Vec::from_raw_parts(vec_ptr, vec_len, vec_cap) } } else { // Put the `Vec` into the `PartialVecZeroSized` structure and // prevent the destructor of the `Vec` from running. Since the // `Vec` contained zero-sized objects, it did not allocate, so we // are not leaking memory here. let mut pv = PartialVecZeroSized::<T,U> { num_t: vec.len(), num_u: 0, marker_t: InvariantType, marker_u: InvariantType, }; unsafe { mem::forget(vec); } while pv.num_t != 0 { unsafe { // Create a `T` out of thin air and decrement `num_t`. This // must not panic between these steps, as otherwise a // destructor of `T` which doesn't exist runs. let t = mem::uninitialized(); pv.num_t -= 1; // The function given by the user might panic. let u = f(t); // Forget the `U` and increment `num_u`. This increment // cannot overflow the `uint` as we only do this for a // number of times that fits into a `uint` (and start with // `0`). Again, we should not panic between these steps. mem::forget(u); pv.num_u += 1; } } // Create a `Vec` from our `PartialVecZeroSized` and make sure the // destructor of the latter will not run. None of this can panic. let mut result = Vec::new(); unsafe { result.set_len(pv.num_u); mem::forget(pv); } result } } } impl<T: Clone> Vec<T> { /// Resizes the `Vec` in-place so that `len()` is equal to `new_len`. /// /// Calls either `extend()` or `truncate()` depending on whether `new_len` /// is larger than the current value of `len()` or not. /// /// # Examples /// /// ``` /// let mut vec = vec!["hello"]; /// vec.resize(3, "world"); /// assert_eq!(vec, vec!["hello", "world", "world"]); /// /// let mut vec = vec![1i, 2, 3, 4]; /// vec.resize(2, 0); /// assert_eq!(vec, vec![1, 2]); /// ``` #[unstable = "matches collection reform specification; waiting for dust to settle"] pub fn resize(&mut self, new_len: uint, value: T) { let len = self.len(); if new_len > len { self.extend(repeat(value).take(new_len - len)); } else { self.truncate(new_len); } } /// Appends all elements in a slice to the `Vec`. /// /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// /// # Examples /// /// ``` /// let mut vec = vec![1i]; /// vec.push_all(&[2i, 3, 4]); /// assert_eq!(vec, vec![1, 2, 3, 4]); /// ``` #[inline] #[experimental = "likely to be replaced by a more optimized extend"] pub fn push_all(&mut self, other: &[T]) { self.reserve(other.len()); for i in range(0, other.len()) { let len = self.len(); // Unsafe code so this can be optimised to a memcpy (or something similarly // fast) when T is Copy. LLVM is easily confused, so any extra operations // during the loop can prevent this optimisation. unsafe { ptr::write( self.get_unchecked_mut(len), other.get_unchecked(i).clone()); self.set_len(len + 1); } } } } impl<T: PartialEq> Vec<T> { /// Removes consecutive repeated elements in the vector. /// /// If the vector is sorted, this removes all duplicates. /// /// # Examples /// /// ``` /// let mut vec = vec![1i, 2, 2, 3, 2]; /// /// vec.dedup(); /// /// assert_eq!(vec, vec![1i, 2, 3, 2]); /// ``` #[stable] pub fn dedup(&mut self) { unsafe { // Although we have a mutable reference to `self`, we cannot make // *arbitrary* changes. The `PartialEq` comparisons could panic, so we // must ensure that the vector is in a valid state at all time. // // The way that we handle this is by using swaps; we iterate // over all the elements, swapping as we go so that at the end // the elements we wish to keep are in the front, and those we // wish to reject are at the back. We can then truncate the // vector. This operation is still O(n). // // Example: We start in this state, where `r` represents "next // read" and `w` represents "next_write`. // // r // +---+---+---+---+---+---+ // | 0 | 1 | 1 | 2 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Comparing self[r] against self[w-1], this is not a duplicate, so // we swap self[r] and self[w] (no effect as r==w) and then increment both // r and w, leaving us with: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 1 | 2 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Comparing self[r] against self[w-1], this value is a duplicate, // so we increment `r` but leave everything else unchanged: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 1 | 2 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Comparing self[r] against self[w-1], this is not a duplicate, // so swap self[r] and self[w] and advance r and w: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 2 | 1 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Not a duplicate, repeat: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 2 | 3 | 1 | 3 | // +---+---+---+---+---+---+ // w // // Duplicate, advance r. End of vec. Truncate to w. let ln = self.len(); if ln < 1 { return; } // Avoid bounds checks by using unsafe pointers. let p = self.as_mut_ptr(); let mut r = 1; let mut w = 1; while r < ln { let p_r = p.offset(r as int); let p_wm1 = p.offset((w - 1) as int); if *p_r != *p_wm1 { if r != w { let p_w = p_wm1.offset(1); mem::swap(&mut *p_r, &mut *p_w); } w += 1; } r += 1; } self.truncate(w); } } } //////////////////////////////////////////////////////////////////////////////// // Internal methods and functions //////////////////////////////////////////////////////////////////////////////// impl<T> Vec<T> { /// Reserves capacity for exactly `capacity` elements in the given vector. /// /// If the capacity for `self` is already equal to or greater than the /// requested capacity, then no action is taken. fn grow_capacity(&mut self, capacity: uint) { if mem::size_of::<T>() == 0 { return } if capacity > self.cap { let size = capacity.checked_mul(mem::size_of::<T>()) .expect("capacity overflow"); unsafe { let ptr = alloc_or_realloc(*self.ptr, self.cap * mem::size_of::<T>(), size); if ptr.is_null() { ::alloc::oom() } self.ptr = NonZero::new(ptr); } self.cap = capacity; } } } // FIXME: #13996: need a way to mark the return value as `noalias` #[inline(never)] unsafe fn alloc_or_realloc<T>(ptr: *mut T, old_size: uint, size: uint) -> *mut T { if old_size == 0 { allocate(size, mem::min_align_of::<T>()) as *mut T } else { reallocate(ptr as *mut u8, old_size, size, mem::min_align_of::<T>()) as *mut T } } #[inline] unsafe fn dealloc<T>(ptr: *mut T, len: uint) { if mem::size_of::<T>() != 0 { deallocate(ptr as *mut u8, len * mem::size_of::<T>(), mem::min_align_of::<T>()) } } //////////////////////////////////////////////////////////////////////////////// // Common trait implementations for Vec //////////////////////////////////////////////////////////////////////////////// #[unstable] impl<T:Clone> Clone for Vec<T> { fn clone(&self) -> Vec<T> { ::slice::SliceExt::to_vec(self.as_slice()) } fn clone_from(&mut self, other: &Vec<T>) { // drop anything in self that will not be overwritten if self.len() > other.len() { self.truncate(other.len()) } // reuse the contained values' allocations/resources. for (place, thing) in self.iter_mut().zip(other.iter()) { place.clone_from(thing) } // self.len <= other.len due to the truncate above, so the // slice here is always in-bounds. let slice = &other[self.len()..]; self.push_all(slice); } } #[cfg(stage0)] impl<S: hash::Writer, T: Hash<S>> Hash<S> for Vec<T> { #[inline] fn hash(&self, state: &mut S) { self.as_slice().hash(state); } } #[cfg(not(stage0))] impl<S: hash::Writer + hash::Hasher, T: Hash<S>> Hash<S> for Vec<T> { #[inline] fn hash(&self, state: &mut S) { self.as_slice().hash(state); } } #[experimental = "waiting on Index stability"] impl<T> Index<uint> for Vec<T> { type Output = T; #[inline] fn index<'a>(&'a self, index: &uint) -> &'a T { &self.as_slice()[*index] } } impl<T> IndexMut<uint> for Vec<T> { type Output = T; #[inline] fn index_mut<'a>(&'a mut self, index: &uint) -> &'a mut T { &mut self.as_mut_slice()[*index] } } impl<T> ops::Index<ops::Range<uint>> for Vec<T> { type Output = [T]; #[inline] fn index(&self, index: &ops::Range<uint>) -> &[T] { self.as_slice().index(index) } } impl<T> ops::Index<ops::RangeTo<uint>> for Vec<T> { type Output = [T]; #[inline] fn index(&self, index: &ops::RangeTo<uint>) -> &[T] { self.as_slice().index(index) } } impl<T> ops::Index<ops::RangeFrom<uint>> for Vec<T> { type Output = [T]; #[inline] fn index(&self, index: &ops::RangeFrom<uint>) -> &[T] { self.as_slice().index(index) } } impl<T> ops::Index<ops::FullRange> for Vec<T> { type Output = [T]; #[inline] fn index(&self, _index: &ops::FullRange) -> &[T] { self.as_slice() } } impl<T> ops::IndexMut<ops::Range<uint>> for Vec<T> { type Output = [T]; #[inline] fn index_mut(&mut self, index: &ops::Range<uint>) -> &mut [T] { self.as_mut_slice().index_mut(index) } } impl<T> ops::IndexMut<ops::RangeTo<uint>> for Vec<T> { type Output = [T]; #[inline] fn index_mut(&mut self, index: &ops::RangeTo<uint>) -> &mut [T] { self.as_mut_slice().index_mut(index) } } impl<T> ops::IndexMut<ops::RangeFrom<uint>> for Vec<T> { type Output = [T]; #[inline] fn index_mut(&mut self, index: &ops::RangeFrom<uint>) -> &mut [T] { self.as_mut_slice().index_mut(index) } } impl<T> ops::IndexMut<ops::FullRange> for Vec<T> { type Output = [T]; #[inline] fn index_mut(&mut self, _index: &ops::FullRange) -> &mut [T] { self.as_mut_slice() } } #[stable] impl<T> ops::Deref for Vec<T> { type Target = [T]; fn deref<'a>(&'a self) -> &'a [T] { self.as_slice() } } #[stable] impl<T> ops::DerefMut for Vec<T> { fn deref_mut<'a>(&'a mut self) -> &'a mut [T] { self.as_mut_slice() } } #[stable] impl<T> FromIterator<T> for Vec<T> { #[inline] fn from_iter<I:Iterator<Item=T>>(mut iterator: I) -> Vec<T> { let (lower, _) = iterator.size_hint(); let mut vector = Vec::with_capacity(lower); for element in iterator { vector.push(element) } vector } } #[experimental = "waiting on Extend stability"] impl<T> Extend<T> for Vec<T> { #[inline] fn extend<I: Iterator<Item=T>>(&mut self, mut iterator: I) { let (lower, _) = iterator.size_hint(); self.reserve(lower); for element in iterator { self.push(element) } } } impl<A, B> PartialEq<Vec<B>> for Vec<A> where A: PartialEq<B> { #[inline] fn eq(&self, other: &Vec<B>) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &Vec<B>) -> bool { PartialEq::ne(&**self, &**other) } } macro_rules! impl_eq { ($lhs:ty, $rhs:ty) => { impl<'b, A, B> PartialEq<$rhs> for $lhs where A: PartialEq<B> { #[inline] fn eq(&self, other: &$rhs) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &$rhs) -> bool { PartialEq::ne(&**self, &**other) } } impl<'b, A, B> PartialEq<$lhs> for $rhs where B: PartialEq<A> { #[inline] fn eq(&self, other: &$lhs) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &$lhs) -> bool { PartialEq::ne(&**self, &**other) } } } } impl_eq! { Vec<A>, &'b [B] } impl_eq! { Vec<A>, &'b mut [B] } impl<'a, A, B> PartialEq<Vec<B>> for CowVec<'a, A> where A: PartialEq<B> + Clone { #[inline] fn eq(&self, other: &Vec<B>) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &Vec<B>) -> bool { PartialEq::ne(&**self, &**other) } } impl<'a, A, B> PartialEq<CowVec<'a, A>> for Vec<B> where A: Clone, B: PartialEq<A> { #[inline] fn eq(&self, other: &CowVec<'a, A>) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &CowVec<'a, A>) -> bool { PartialEq::ne(&**self, &**other) } } macro_rules! impl_eq_for_cowvec { ($rhs:ty) => { impl<'a, 'b, A, B> PartialEq<$rhs> for CowVec<'a, A> where A: PartialEq<B> + Clone { #[inline] fn eq(&self, other: &$rhs) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &$rhs) -> bool { PartialEq::ne(&**self, &**other) } } impl<'a, 'b, A, B> PartialEq<CowVec<'a, A>> for $rhs where A: Clone, B: PartialEq<A> { #[inline] fn eq(&self, other: &CowVec<'a, A>) -> bool { PartialEq::eq(&**self, &**other) } #[inline] fn ne(&self, other: &CowVec<'a, A>) -> bool { PartialEq::ne(&**self, &**other) } } } } impl_eq_for_cowvec! { &'b [B] } impl_eq_for_cowvec! { &'b mut [B] } #[unstable = "waiting on PartialOrd stability"] impl<T: PartialOrd> PartialOrd for Vec<T> { #[inline] fn partial_cmp(&self, other: &Vec<T>) -> Option<Ordering> { self.as_slice().partial_cmp(other.as_slice()) } } #[unstable = "waiting on Eq stability"] impl<T: Eq> Eq for Vec<T> {} #[unstable = "waiting on Ord stability"] impl<T: Ord> Ord for Vec<T> { #[inline] fn cmp(&self, other: &Vec<T>) -> Ordering { self.as_slice().cmp(other.as_slice()) } } impl<T> AsSlice<T> for Vec<T> { /// Returns a slice into `self`. /// /// # Examples /// /// ``` /// fn foo(slice: &[int]) {} /// /// let vec = vec![1i, 2]; /// foo(vec.as_slice()); /// ``` #[inline] #[stable] fn as_slice<'a>(&'a self) -> &'a [T] { unsafe { mem::transmute(RawSlice { data: *self.ptr as *const T, len: self.len }) } } } #[unstable = "recent addition, needs more experience"] impl<'a, T: Clone> Add<&'a [T]> for Vec<T> { type Output = Vec<T>; #[inline] fn add(mut self, rhs: &[T]) -> Vec<T> { self.push_all(rhs); self } } #[unsafe_destructor] #[stable] impl<T> Drop for Vec<T> { fn drop(&mut self) { // This is (and should always remain) a no-op if the fields are // zeroed (when moving out, because of #[unsafe_no_drop_flag]). if self.cap != 0 { unsafe { for x in self.iter() { ptr::read(x); } dealloc(*self.ptr, self.cap) } } } } #[stable] impl<T> Default for Vec<T> { #[stable] fn default() -> Vec<T> { Vec::new() } } #[experimental = "waiting on Show stability"] impl<T: fmt::Show> fmt::Show for Vec<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Show::fmt(self.as_slice(), f) } } impl<'a> fmt::Writer for Vec<u8> { fn write_str(&mut self, s: &str) -> fmt::Result { self.push_all(s.as_bytes()); Ok(()) } } //////////////////////////////////////////////////////////////////////////////// // Clone-on-write //////////////////////////////////////////////////////////////////////////////// #[experimental = "unclear how valuable this alias is"] /// A clone-on-write vector pub type CowVec<'a, T> = Cow<'a, Vec<T>, [T]>; #[unstable] impl<'a, T> FromIterator<T> for CowVec<'a, T> where T: Clone { fn from_iter<I: Iterator<Item=T>>(it: I) -> CowVec<'a, T> { Cow::Owned(FromIterator::from_iter(it)) } } impl<'a, T: 'a> IntoCow<'a, Vec<T>, [T]> for Vec<T> where T: Clone { fn into_cow(self) -> CowVec<'a, T> { Cow::Owned(self) } } impl<'a, T> IntoCow<'a, Vec<T>, [T]> for &'a [T] where T: Clone { fn into_cow(self) -> CowVec<'a, T> { Cow::Borrowed(self) } } //////////////////////////////////////////////////////////////////////////////// // Iterators //////////////////////////////////////////////////////////////////////////////// /// An iterator that moves out of a vector. #[stable] pub struct IntoIter<T> { allocation: *mut T, // the block of memory allocated for the vector cap: uint, // the capacity of the vector ptr: *const T, end: *const T } impl<T> IntoIter<T> { #[inline] /// Drops all items that have not yet been moved and returns the empty vector. #[unstable] pub fn into_inner(mut self) -> Vec<T> { unsafe { for _x in self { } let IntoIter { allocation, cap, ptr: _ptr, end: _end } = self; mem::forget(self); Vec { ptr: NonZero::new(allocation), cap: cap, len: 0 } } } } #[stable] impl<T> Iterator for IntoIter<T> { type Item = T; #[inline] fn next<'a>(&'a mut self) -> Option<T> { unsafe { if self.ptr == self.end { None } else { if mem::size_of::<T>() == 0 { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. self.ptr = mem::transmute(self.ptr as uint + 1); // Use a non-null pointer value Some(ptr::read(mem::transmute(1u))) } else { let old = self.ptr; self.ptr = self.ptr.offset(1); Some(ptr::read(old)) } } } } #[inline] fn size_hint(&self) -> (uint, Option<uint>) { let diff = (self.end as uint) - (self.ptr as uint); let size = mem::size_of::<T>(); let exact = diff / (if size == 0 {1} else {size}); (exact, Some(exact)) } } #[stable] impl<T> DoubleEndedIterator for IntoIter<T> { #[inline] fn next_back<'a>(&'a mut self) -> Option<T> { unsafe { if self.end == self.ptr { None } else { if mem::size_of::<T>() == 0 { // See above for why 'ptr.offset' isn't used self.end = mem::transmute(self.end as uint - 1); // Use a non-null pointer value Some(ptr::read(mem::transmute(1u))) } else { self.end = self.end.offset(-1); Some(ptr::read(mem::transmute(self.end))) } } } } } #[stable] impl<T> ExactSizeIterator for IntoIter<T> {} #[unsafe_destructor] #[stable] impl<T> Drop for IntoIter<T> { fn drop(&mut self) { // destroy the remaining elements if self.cap != 0 { for _x in *self {} unsafe { dealloc(self.allocation, self.cap); } } } } /// An iterator that drains a vector. #[unsafe_no_drop_flag] #[unstable = "recently added as part of collections reform 2"] pub struct Drain<'a, T> { ptr: *const T, end: *const T, marker: ContravariantLifetime<'a>, } #[stable] impl<'a, T> Iterator for Drain<'a, T> { type Item = T; #[inline] fn next(&mut self) -> Option<T> { unsafe { if self.ptr == self.end { None } else { if mem::size_of::<T>() == 0 { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. self.ptr = mem::transmute(self.ptr as uint + 1); // Use a non-null pointer value Some(ptr::read(mem::transmute(1u))) } else { let old = self.ptr; self.ptr = self.ptr.offset(1); Some(ptr::read(old)) } } } } #[inline] fn size_hint(&self) -> (uint, Option<uint>) { let diff = (self.end as uint) - (self.ptr as uint); let size = mem::size_of::<T>(); let exact = diff / (if size == 0 {1} else {size}); (exact, Some(exact)) } } #[stable] impl<'a, T> DoubleEndedIterator for Drain<'a, T> { #[inline] fn next_back(&mut self) -> Option<T> { unsafe { if self.end == self.ptr { None } else { if mem::size_of::<T>() == 0 { // See above for why 'ptr.offset' isn't used self.end = mem::transmute(self.end as uint - 1); // Use a non-null pointer value Some(ptr::read(mem::transmute(1u))) } else { self.end = self.end.offset(-1); Some(ptr::read(self.end)) } } } } } #[stable] impl<'a, T> ExactSizeIterator for Drain<'a, T> {} #[unsafe_destructor] #[stable] impl<'a, T> Drop for Drain<'a, T> { fn drop(&mut self) { // self.ptr == self.end == null if drop has already been called, // so we can use #[unsafe_no_drop_flag]. // destroy the remaining elements for _x in *self {} } } //////////////////////////////////////////////////////////////////////////////// // Conversion from &[T] to &Vec<T> //////////////////////////////////////////////////////////////////////////////// /// Wrapper type providing a `&Vec<T>` reference via `Deref`. #[experimental] pub struct DerefVec<'a, T> { x: Vec<T>, l: ContravariantLifetime<'a> } #[experimental] impl<'a, T> Deref for DerefVec<'a, T> { type Target = Vec<T>; fn deref<'b>(&'b self) -> &'b Vec<T> { &self.x } } // Prevent the inner `Vec<T>` from attempting to deallocate memory. #[unsafe_destructor] #[stable] impl<'a, T> Drop for DerefVec<'a, T> { fn drop(&mut self) { self.x.len = 0; self.x.cap = 0; } } /// Convert a slice to a wrapper type providing a `&Vec<T>` reference. #[experimental] pub fn as_vec<'a, T>(x: &'a [T]) -> DerefVec<'a, T> { unsafe { DerefVec { x: Vec::from_raw_parts(x.as_ptr() as *mut T, x.len(), x.len()), l: ContravariantLifetime::<'a> } } } //////////////////////////////////////////////////////////////////////////////// // Partial vec, used for map_in_place //////////////////////////////////////////////////////////////////////////////// /// An owned, partially type-converted vector of elements with non-zero size. /// /// `T` and `U` must have the same, non-zero size. They must also have the same /// alignment. /// /// When the destructor of this struct runs, all `U`s from `start_u` (incl.) to /// `end_u` (excl.) and all `T`s from `start_t` (incl.) to `end_t` (excl.) are /// destructed. Additionally the underlying storage of `vec` will be freed. struct PartialVecNonZeroSized<T,U> { vec: Vec<T>, start_u: *mut U, end_u: *mut U, start_t: *mut T, end_t: *mut T, } /// An owned, partially type-converted vector of zero-sized elements. /// /// When the destructor of this struct runs, all `num_t` `T`s and `num_u` `U`s /// are destructed. struct PartialVecZeroSized<T,U> { num_t: uint, num_u: uint, marker_t: InvariantType<T>, marker_u: InvariantType<U>, } #[unsafe_destructor] impl<T,U> Drop for PartialVecNonZeroSized<T,U> { fn drop(&mut self) { unsafe { // `vec` hasn't been modified until now. As it has a length // currently, this would run destructors of `T`s which might not be // there. So at first, set `vec`s length to `0`. This must be done // at first to remain memory-safe as the destructors of `U` or `T` // might cause unwinding where `vec`s destructor would be executed. self.vec.set_len(0); // We have instances of `U`s and `T`s in `vec`. Destruct them. while self.start_u != self.end_u { let _ = ptr::read(self.start_u as *const U); // Run a `U` destructor. self.start_u = self.start_u.offset(1); } while self.start_t != self.end_t { let _ = ptr::read(self.start_t as *const T); // Run a `T` destructor. self.start_t = self.start_t.offset(1); } // After this destructor ran, the destructor of `vec` will run, // deallocating the underlying memory. } } } #[unsafe_destructor] impl<T,U> Drop for PartialVecZeroSized<T,U> { fn drop(&mut self) { unsafe { // Destruct the instances of `T` and `U` this struct owns. while self.num_t != 0 { let _: T = mem::uninitialized(); // Run a `T` destructor. self.num_t -= 1; } while self.num_u != 0 { let _: U = mem::uninitialized(); // Run a `U` destructor. self.num_u -= 1; } } } } #[cfg(test)] mod tests { use prelude::*; use core::mem::size_of; use core::iter::repeat; use core::ops::FullRange; use test::Bencher; use super::as_vec; struct DropCounter<'a> { count: &'a mut int } #[unsafe_destructor] impl<'a> Drop for DropCounter<'a> { fn drop(&mut self) { *self.count += 1; } } #[test] fn test_as_vec() { let xs = [1u8, 2u8, 3u8]; assert_eq!(as_vec(&xs).as_slice(), xs); } #[test] fn test_as_vec_dtor() { let (mut count_x, mut count_y) = (0, 0); { let xs = &[DropCounter { count: &mut count_x }, DropCounter { count: &mut count_y }]; assert_eq!(as_vec(xs).len(), 2); } assert_eq!(count_x, 1); assert_eq!(count_y, 1); } #[test] fn test_small_vec_struct() { assert!(size_of::<Vec<u8>>() == size_of::<uint>() * 3); } #[test] fn test_double_drop() { struct TwoVec<T> { x: Vec<T>, y: Vec<T> } let (mut count_x, mut count_y) = (0, 0); { let mut tv = TwoVec { x: Vec::new(), y: Vec::new() }; tv.x.push(DropCounter {count: &mut count_x}); tv.y.push(DropCounter {count: &mut count_y}); // If Vec had a drop flag, here is where it would be zeroed. // Instead, it should rely on its internal state to prevent // doing anything significant when dropped multiple times. drop(tv.x); // Here tv goes out of scope, tv.y should be dropped, but not tv.x. } assert_eq!(count_x, 1); assert_eq!(count_y, 1); } #[test] fn test_reserve() { let mut v = Vec::new(); assert_eq!(v.capacity(), 0); v.reserve(2); assert!(v.capacity() >= 2); for i in range(0i, 16) { v.push(i); } assert!(v.capacity() >= 16); v.reserve(16); assert!(v.capacity() >= 32); v.push(16); v.reserve(16); assert!(v.capacity() >= 33) } #[test] fn test_extend() { let mut v = Vec::new(); let mut w = Vec::new(); v.extend(range(0i, 3)); for i in range(0i, 3) { w.push(i) } assert_eq!(v, w); v.extend(range(3i, 10)); for i in range(3i, 10) { w.push(i) } assert_eq!(v, w); } #[test] fn test_slice_from_mut() { let mut values = vec![1u8,2,3,4,5]; { let slice = values.slice_from_mut(2); assert!(slice == [3, 4, 5]); for p in slice.iter_mut() { *p += 2; } } assert!(values == [1, 2, 5, 6, 7]); } #[test] fn test_slice_to_mut() { let mut values = vec![1u8,2,3,4,5]; { let slice = values.slice_to_mut(2); assert!(slice == [1, 2]); for p in slice.iter_mut() { *p += 1; } } assert!(values == [2, 3, 3, 4, 5]); } #[test] fn test_split_at_mut() { let mut values = vec![1u8,2,3,4,5]; { let (left, right) = values.split_at_mut(2); { let left: &[_] = left; assert!(&left[..left.len()] == &[1, 2][]); } for p in left.iter_mut() { *p += 1; } { let right: &[_] = right; assert!(&right[..right.len()] == &[3, 4, 5][]); } for p in right.iter_mut() { *p += 2; } } assert!(values == vec![2u8, 3, 5, 6, 7]); } #[test] fn test_clone() { let v: Vec<int> = vec!(); let w = vec!(1i, 2, 3); assert_eq!(v, v.clone()); let z = w.clone(); assert_eq!(w, z); // they should be disjoint in memory. assert!(w.as_ptr() != z.as_ptr()) } #[test] fn test_clone_from() { let mut v = vec!(); let three = vec!(box 1i, box 2, box 3); let two = vec!(box 4i, box 5); // zero, long v.clone_from(&three); assert_eq!(v, three); // equal v.clone_from(&three); assert_eq!(v, three); // long, short v.clone_from(&two); assert_eq!(v, two); // short, long v.clone_from(&three); assert_eq!(v, three) } #[test] fn test_retain() { let mut vec = vec![1u, 2, 3, 4]; vec.retain(|&x| x % 2 == 0); assert!(vec == vec![2u, 4]); } #[test] fn zero_sized_values() { let mut v = Vec::new(); assert_eq!(v.len(), 0); v.push(()); assert_eq!(v.len(), 1); v.push(()); assert_eq!(v.len(), 2); assert_eq!(v.pop(), Some(())); assert_eq!(v.pop(), Some(())); assert_eq!(v.pop(), None); assert_eq!(v.iter().count(), 0); v.push(()); assert_eq!(v.iter().count(), 1); v.push(()); assert_eq!(v.iter().count(), 2); for &() in v.iter() {} assert_eq!(v.iter_mut().count(), 2); v.push(()); assert_eq!(v.iter_mut().count(), 3); v.push(()); assert_eq!(v.iter_mut().count(), 4); for &mut () in v.iter_mut() {} unsafe { v.set_len(0); } assert_eq!(v.iter_mut().count(), 0); } #[test] fn test_partition() { assert_eq!(vec![].into_iter().partition(|x: &int| *x < 3), (vec![], vec![])); assert_eq!(vec![1i, 2, 3].into_iter().partition(|x: &int| *x < 4), (vec![1, 2, 3], vec![])); assert_eq!(vec![1i, 2, 3].into_iter().partition(|x: &int| *x < 2), (vec![1], vec![2, 3])); assert_eq!(vec![1i, 2, 3].into_iter().partition(|x: &int| *x < 0), (vec![], vec![1, 2, 3])); } #[test] fn test_zip_unzip() { let z1 = vec![(1i, 4i), (2, 5), (3, 6)]; let (left, right): (Vec<_>, Vec<_>) = z1.iter().map(|&x| x).unzip(); assert_eq!((1, 4), (left[0], right[0])); assert_eq!((2, 5), (left[1], right[1])); assert_eq!((3, 6), (left[2], right[2])); } #[test] fn test_unsafe_ptrs() { unsafe { // Test on-stack copy-from-buf. let a = [1i, 2, 3]; let ptr = a.as_ptr(); let b = Vec::from_raw_buf(ptr, 3u); assert_eq!(b, vec![1, 2, 3]); // Test on-heap copy-from-buf. let c = vec![1i, 2, 3, 4, 5]; let ptr = c.as_ptr(); let d = Vec::from_raw_buf(ptr, 5u); assert_eq!(d, vec![1, 2, 3, 4, 5]); } } #[test] fn test_vec_truncate_drop() { static mut drops: uint = 0; struct Elem(int); impl Drop for Elem { fn drop(&mut self) { unsafe { drops += 1; } } } let mut v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)]; assert_eq!(unsafe { drops }, 0); v.truncate(3); assert_eq!(unsafe { drops }, 2); v.truncate(0); assert_eq!(unsafe { drops }, 5); } #[test] #[should_fail] fn test_vec_truncate_fail() { struct BadElem(int); impl Drop for BadElem { fn drop(&mut self) { let BadElem(ref mut x) = *self; if *x == 0xbadbeef { panic!("BadElem panic: 0xbadbeef") } } } let mut v = vec![BadElem(1), BadElem(2), BadElem(0xbadbeef), BadElem(4)]; v.truncate(0); } #[test] fn test_index() { let vec = vec!(1i, 2, 3); assert!(vec[1] == 2); } #[test] #[should_fail] fn test_index_out_of_bounds() { let vec = vec!(1i, 2, 3); let _ = vec[3]; } #[test] #[should_fail] fn test_slice_out_of_bounds_1() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; &x[(-1)..]; } #[test] #[should_fail] fn test_slice_out_of_bounds_2() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; &x[..6]; } #[test] #[should_fail] fn test_slice_out_of_bounds_3() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; &x[(-1)..4]; } #[test] #[should_fail] fn test_slice_out_of_bounds_4() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; &x[1..6]; } #[test] #[should_fail] fn test_slice_out_of_bounds_5() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; &x[3..2]; } #[test] #[should_fail] fn test_swap_remove_empty() { let mut vec: Vec<uint> = vec!(); vec.swap_remove(0); } #[test] fn test_move_iter_unwrap() { let mut vec: Vec<uint> = Vec::with_capacity(7); vec.push(1); vec.push(2); let ptr = vec.as_ptr(); vec = vec.into_iter().into_inner(); assert_eq!(vec.as_ptr(), ptr); assert_eq!(vec.capacity(), 7); assert_eq!(vec.len(), 0); } #[test] #[should_fail] fn test_map_in_place_incompatible_types_fail() { let v = vec![0u, 1, 2]; v.map_in_place(|_| ()); } #[test] fn test_map_in_place() { let v = vec![0u, 1, 2]; assert_eq!(v.map_in_place(|i: uint| i as int - 1), [-1i, 0, 1]); } #[test] fn test_map_in_place_zero_sized() { let v = vec![(), ()]; #[derive(PartialEq, Show)] struct ZeroSized; assert_eq!(v.map_in_place(|_| ZeroSized), [ZeroSized, ZeroSized]); } #[test] fn test_map_in_place_zero_drop_count() { use std::sync::atomic::{AtomicUint, Ordering, ATOMIC_UINT_INIT}; #[derive(Clone, PartialEq, Show)] struct Nothing; impl Drop for Nothing { fn drop(&mut self) { } } #[derive(Clone, PartialEq, Show)] struct ZeroSized; impl Drop for ZeroSized { fn drop(&mut self) { DROP_COUNTER.fetch_add(1, Ordering::Relaxed); } } const NUM_ELEMENTS: uint = 2; static DROP_COUNTER: AtomicUint = ATOMIC_UINT_INIT; let v = repeat(Nothing).take(NUM_ELEMENTS).collect::<Vec<_>>(); DROP_COUNTER.store(0, Ordering::Relaxed); let v = v.map_in_place(|_| ZeroSized); assert_eq!(DROP_COUNTER.load(Ordering::Relaxed), 0); drop(v); assert_eq!(DROP_COUNTER.load(Ordering::Relaxed), NUM_ELEMENTS); } #[test] fn test_move_items() { let vec = vec![1, 2, 3]; let mut vec2 : Vec<i32> = vec![]; for i in vec.into_iter() { vec2.push(i); } assert!(vec2 == vec![1, 2, 3]); } #[test] fn test_move_items_reverse() { let vec = vec![1, 2, 3]; let mut vec2 : Vec<i32> = vec![]; for i in vec.into_iter().rev() { vec2.push(i); } assert!(vec2 == vec![3, 2, 1]); } #[test] fn test_move_items_zero_sized() { let vec = vec![(), (), ()]; let mut vec2 : Vec<()> = vec![]; for i in vec.into_iter() { vec2.push(i); } assert!(vec2 == vec![(), (), ()]); } #[test] fn test_drain_items() { let mut vec = vec![1, 2, 3]; let mut vec2: Vec<i32> = vec![]; for i in vec.drain() { vec2.push(i); } assert_eq!(vec, []); assert_eq!(vec2, [ 1, 2, 3 ]); } #[test] fn test_drain_items_reverse() { let mut vec = vec![1, 2, 3]; let mut vec2: Vec<i32> = vec![]; for i in vec.drain().rev() { vec2.push(i); } assert_eq!(vec, []); assert_eq!(vec2, [ 3, 2, 1 ]); } #[test] fn test_drain_items_zero_sized() { let mut vec = vec![(), (), ()]; let mut vec2: Vec<()> = vec![]; for i in vec.drain() { vec2.push(i); } assert_eq!(vec, []); assert_eq!(vec2, [(), (), ()]); } #[test] fn test_into_boxed_slice() { let xs = vec![1u, 2, 3]; let ys = xs.into_boxed_slice(); assert_eq!(ys.as_slice(), [1u, 2, 3]); } #[bench] fn bench_new(b: &mut Bencher) { b.iter(|| { let v: Vec<uint> = Vec::new(); assert_eq!(v.len(), 0); assert_eq!(v.capacity(), 0); }) } fn do_bench_with_capacity(b: &mut Bencher, src_len: uint) { b.bytes = src_len as u64; b.iter(|| { let v: Vec<uint> = Vec::with_capacity(src_len); assert_eq!(v.len(), 0); assert_eq!(v.capacity(), src_len); }) } #[bench] fn bench_with_capacity_0000(b: &mut Bencher) { do_bench_with_capacity(b, 0) } #[bench] fn bench_with_capacity_0010(b: &mut Bencher) { do_bench_with_capacity(b, 10) } #[bench] fn bench_with_capacity_0100(b: &mut Bencher) { do_bench_with_capacity(b, 100) } #[bench] fn bench_with_capacity_1000(b: &mut Bencher) { do_bench_with_capacity(b, 1000) } fn do_bench_from_fn(b: &mut Bencher, src_len: uint) { b.bytes = src_len as u64; b.iter(|| { let dst = range(0, src_len).collect::<Vec<_>>(); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }) } #[bench] fn bench_from_fn_0000(b: &mut Bencher) { do_bench_from_fn(b, 0) } #[bench] fn bench_from_fn_0010(b: &mut Bencher) { do_bench_from_fn(b, 10) } #[bench] fn bench_from_fn_0100(b: &mut Bencher) { do_bench_from_fn(b, 100) } #[bench] fn bench_from_fn_1000(b: &mut Bencher) { do_bench_from_fn(b, 1000) } fn do_bench_from_elem(b: &mut Bencher, src_len: uint) { b.bytes = src_len as u64; b.iter(|| { let dst: Vec<uint> = repeat(5).take(src_len).collect(); assert_eq!(dst.len(), src_len); assert!(dst.iter().all(|x| *x == 5)); }) } #[bench] fn bench_from_elem_0000(b: &mut Bencher) { do_bench_from_elem(b, 0) } #[bench] fn bench_from_elem_0010(b: &mut Bencher) { do_bench_from_elem(b, 10) } #[bench] fn bench_from_elem_0100(b: &mut Bencher) { do_bench_from_elem(b, 100) } #[bench] fn bench_from_elem_1000(b: &mut Bencher) { do_bench_from_elem(b, 1000) } fn do_bench_from_slice(b: &mut Bencher, src_len: uint) { let src: Vec<uint> = FromIterator::from_iter(range(0, src_len)); b.bytes = src_len as u64; b.iter(|| { let dst = src.clone()[].to_vec(); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_from_slice_0000(b: &mut Bencher) { do_bench_from_slice(b, 0) } #[bench] fn bench_from_slice_0010(b: &mut Bencher) { do_bench_from_slice(b, 10) } #[bench] fn bench_from_slice_0100(b: &mut Bencher) { do_bench_from_slice(b, 100) } #[bench] fn bench_from_slice_1000(b: &mut Bencher) { do_bench_from_slice(b, 1000) } fn do_bench_from_iter(b: &mut Bencher, src_len: uint) { let src: Vec<uint> = FromIterator::from_iter(range(0, src_len)); b.bytes = src_len as u64; b.iter(|| { let dst: Vec<uint> = FromIterator::from_iter(src.clone().into_iter()); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_from_iter_0000(b: &mut Bencher) { do_bench_from_iter(b, 0) } #[bench] fn bench_from_iter_0010(b: &mut Bencher) { do_bench_from_iter(b, 10) } #[bench] fn bench_from_iter_0100(b: &mut Bencher) { do_bench_from_iter(b, 100) } #[bench] fn bench_from_iter_1000(b: &mut Bencher) { do_bench_from_iter(b, 1000) } fn do_bench_extend(b: &mut Bencher, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0, dst_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = src_len as u64; b.iter(|| { let mut dst = dst.clone(); dst.extend(src.clone().into_iter()); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_extend_0000_0000(b: &mut Bencher) { do_bench_extend(b, 0, 0) } #[bench] fn bench_extend_0000_0010(b: &mut Bencher) { do_bench_extend(b, 0, 10) } #[bench] fn bench_extend_0000_0100(b: &mut Bencher) { do_bench_extend(b, 0, 100) } #[bench] fn bench_extend_0000_1000(b: &mut Bencher) { do_bench_extend(b, 0, 1000) } #[bench] fn bench_extend_0010_0010(b: &mut Bencher) { do_bench_extend(b, 10, 10) } #[bench] fn bench_extend_0100_0100(b: &mut Bencher) { do_bench_extend(b, 100, 100) } #[bench] fn bench_extend_1000_1000(b: &mut Bencher) { do_bench_extend(b, 1000, 1000) } fn do_bench_push_all(b: &mut Bencher, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0, dst_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = src_len as u64; b.iter(|| { let mut dst = dst.clone(); dst.push_all(src.as_slice()); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_push_all_0000_0000(b: &mut Bencher) { do_bench_push_all(b, 0, 0) } #[bench] fn bench_push_all_0000_0010(b: &mut Bencher) { do_bench_push_all(b, 0, 10) } #[bench] fn bench_push_all_0000_0100(b: &mut Bencher) { do_bench_push_all(b, 0, 100) } #[bench] fn bench_push_all_0000_1000(b: &mut Bencher) { do_bench_push_all(b, 0, 1000) } #[bench] fn bench_push_all_0010_0010(b: &mut Bencher) { do_bench_push_all(b, 10, 10) } #[bench] fn bench_push_all_0100_0100(b: &mut Bencher) { do_bench_push_all(b, 100, 100) } #[bench] fn bench_push_all_1000_1000(b: &mut Bencher) { do_bench_push_all(b, 1000, 1000) } fn do_bench_push_all_move(b: &mut Bencher, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0u, dst_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = src_len as u64; b.iter(|| { let mut dst = dst.clone(); dst.extend(src.clone().into_iter()); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_push_all_move_0000_0000(b: &mut Bencher) { do_bench_push_all_move(b, 0, 0) } #[bench] fn bench_push_all_move_0000_0010(b: &mut Bencher) { do_bench_push_all_move(b, 0, 10) } #[bench] fn bench_push_all_move_0000_0100(b: &mut Bencher) { do_bench_push_all_move(b, 0, 100) } #[bench] fn bench_push_all_move_0000_1000(b: &mut Bencher) { do_bench_push_all_move(b, 0, 1000) } #[bench] fn bench_push_all_move_0010_0010(b: &mut Bencher) { do_bench_push_all_move(b, 10, 10) } #[bench] fn bench_push_all_move_0100_0100(b: &mut Bencher) { do_bench_push_all_move(b, 100, 100) } #[bench] fn bench_push_all_move_1000_1000(b: &mut Bencher) { do_bench_push_all_move(b, 1000, 1000) } fn do_bench_clone(b: &mut Bencher, src_len: uint) { let src: Vec<uint> = FromIterator::from_iter(range(0, src_len)); b.bytes = src_len as u64; b.iter(|| { let dst = src.clone(); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_clone_0000(b: &mut Bencher) { do_bench_clone(b, 0) } #[bench] fn bench_clone_0010(b: &mut Bencher) { do_bench_clone(b, 10) } #[bench] fn bench_clone_0100(b: &mut Bencher) { do_bench_clone(b, 100) } #[bench] fn bench_clone_1000(b: &mut Bencher) { do_bench_clone(b, 1000) } fn do_bench_clone_from(b: &mut Bencher, times: uint, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0, src_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = (times * src_len) as u64; b.iter(|| { let mut dst = dst.clone(); for _ in range(0, times) { dst.clone_from(&src); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| dst_len + i == *x)); } }); } #[bench] fn bench_clone_from_01_0000_0000(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 0) } #[bench] fn bench_clone_from_01_0000_0010(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 10) } #[bench] fn bench_clone_from_01_0000_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 100) } #[bench] fn bench_clone_from_01_0000_1000(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 1000) } #[bench] fn bench_clone_from_01_0010_0010(b: &mut Bencher) { do_bench_clone_from(b, 1, 10, 10) } #[bench] fn bench_clone_from_01_0100_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 100, 100) } #[bench] fn bench_clone_from_01_1000_1000(b: &mut Bencher) { do_bench_clone_from(b, 1, 1000, 1000) } #[bench] fn bench_clone_from_01_0010_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 10, 100) } #[bench] fn bench_clone_from_01_0100_1000(b: &mut Bencher) { do_bench_clone_from(b, 1, 100, 1000) } #[bench] fn bench_clone_from_01_0010_0000(b: &mut Bencher) { do_bench_clone_from(b, 1, 10, 0) } #[bench] fn bench_clone_from_01_0100_0010(b: &mut Bencher) { do_bench_clone_from(b, 1, 100, 10) } #[bench] fn bench_clone_from_01_1000_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 1000, 100) } #[bench] fn bench_clone_from_10_0000_0000(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 0) } #[bench] fn bench_clone_from_10_0000_0010(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 10) } #[bench] fn bench_clone_from_10_0000_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 100) } #[bench] fn bench_clone_from_10_0000_1000(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 1000) } #[bench] fn bench_clone_from_10_0010_0010(b: &mut Bencher) { do_bench_clone_from(b, 10, 10, 10) } #[bench] fn bench_clone_from_10_0100_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 100, 100) } #[bench] fn bench_clone_from_10_1000_1000(b: &mut Bencher) { do_bench_clone_from(b, 10, 1000, 1000) } #[bench] fn bench_clone_from_10_0010_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 10, 100) } #[bench] fn bench_clone_from_10_0100_1000(b: &mut Bencher) { do_bench_clone_from(b, 10, 100, 1000) } #[bench] fn bench_clone_from_10_0010_0000(b: &mut Bencher) { do_bench_clone_from(b, 10, 10, 0) } #[bench] fn bench_clone_from_10_0100_0010(b: &mut Bencher) { do_bench_clone_from(b, 10, 100, 10) } #[bench] fn bench_clone_from_10_1000_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 1000, 100) } }
28.888409
100
0.483801
8fd42ed498012ef2f137cfd15cff9df0126ba8fd
3,229
// Copyright 2020 WeDPR Lab Project Authors. Licensed under Apache-2.0. #![cfg(not(tarpaulin_include))] //! ECIES function wrappers. #![cfg(feature = "wedpr_f_ecies_secp256k1")] extern crate jni; use wedpr_l_utils::traits::Ecies; use crate::get_result_jobject; #[cfg(feature = "wedpr_f_ecies_secp256k1")] use crate::config::ECIES_SECP256K1; use jni::{ objects::{JClass, JObject, JValue}, sys::jobject, JNIEnv, }; use jni::sys::jbyteArray; use wedpr_ffi_common::utils::{ java_bytes_to_jbyte_array, java_jbytes_to_bytes, java_set_error_field_and_extract_jobject, }; // Secp256k1 implementation. #[cfg(feature = "wedpr_f_ecies_secp256k1")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->secp256k1EciesEncrypt'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_secp256k1EciesEncrypt( _env: JNIEnv, _class: JClass, public_key_jbyte_array: jbyteArray, message_hash_jbyte_array: jbyteArray, ) -> jobject { let result_jobject = get_result_jobject(&_env); let public_key = java_safe_jbytes_to_bytes!( _env, result_jobject, public_key_jbyte_array ); let encoded_message = java_safe_jbytes_to_bytes!( _env, result_jobject, message_hash_jbyte_array ); let encrypted_data = match ECIES_SECP256K1.encrypt(&public_key, &encoded_message) { Ok(v) => v, Err(_) => { return java_set_error_field_and_extract_jobject( &_env, &result_jobject, &format!( "ECIES encrypt failed, encoded_message={:?}, \ public_key={:?}", &encoded_message, &public_key ), ) }, }; java_safe_set_byte_array_field!( _env, result_jobject, &encrypted_data, "encryptedData" ); result_jobject.into_inner() } #[cfg(feature = "wedpr_f_ecies_secp256k1")] #[no_mangle] /// Java interface for /// 'com.webank.wedpr.crypto.NativeInterface->secp256k1EciesDecrypt'. pub extern "system" fn Java_com_webank_wedpr_crypto_NativeInterface_secp256k1EciesDecrypt( _env: JNIEnv, _class: JClass, private_key_jbyte_array: jbyteArray, ciphertext_jbyte_array: jbyteArray, ) -> jobject { let result_jobject = get_result_jobject(&_env); let private_key = java_safe_jbytes_to_bytes!( _env, result_jobject, private_key_jbyte_array ); let ciphertext = java_safe_jbytes_to_bytes!( _env, result_jobject, ciphertext_jbyte_array ); let decrypted_data = match ECIES_SECP256K1 .decrypt(&private_key, &ciphertext) { Ok(v) => v, Err(_) => { return java_set_error_field_and_extract_jobject( &_env, &result_jobject, &format!("ECIES decrypt failed, ciphertext={:?}", &ciphertext), ) }, }; java_safe_set_byte_array_field!( _env, result_jobject, &decrypted_data, "decryptedData" ); result_jobject.into_inner() }
26.040323
90
0.630845
8975faec487657dafa81c3ff2530e6cceff56766
21,206
//! Dataflow analyses are built upon some interpretation of the //! bitvectors attached to each basic block, represented via a //! zero-sized structure. use rustc_index::bit_set::BitSet; use rustc_index::vec::Idx; use rustc_middle::mir::{self, Body, Location}; use rustc_middle::ty::{self, TyCtxt}; use rustc_target::abi::VariantIdx; use super::MoveDataParamEnv; use crate::util::elaborate_drops::DropFlagState; use super::move_paths::{HasMoveData, InitIndex, InitKind, MoveData, MovePathIndex}; use super::{AnalysisDomain, BottomValue, GenKill, GenKillAnalysis}; use super::drop_flag_effects_for_function_entry; use super::drop_flag_effects_for_location; use super::on_lookup_result_bits; use crate::dataflow::drop_flag_effects; mod borrowed_locals; pub(super) mod borrows; mod init_locals; mod liveness; mod storage_liveness; pub use self::borrowed_locals::{MaybeBorrowedLocals, MaybeMutBorrowedLocals}; pub use self::borrows::Borrows; pub use self::init_locals::MaybeInitializedLocals; pub use self::liveness::MaybeLiveLocals; pub use self::storage_liveness::{MaybeRequiresStorage, MaybeStorageLive}; /// `MaybeInitializedPlaces` tracks all places that might be /// initialized upon reaching a particular point in the control flow /// for a function. /// /// For example, in code like the following, we have corresponding /// dataflow information shown in the right-hand comments. /// /// ```rust /// struct S; /// fn foo(pred: bool) { // maybe-init: /// // {} /// let a = S; let b = S; let c; let d; // {a, b} /// /// if pred { /// drop(a); // { b} /// b = S; // { b} /// /// } else { /// drop(b); // {a} /// d = S; // {a, d} /// /// } // {a, b, d} /// /// c = S; // {a, b, c, d} /// } /// ``` /// /// To determine whether a place *must* be initialized at a /// particular control-flow point, one can take the set-difference /// between this data and the data from `MaybeUninitializedPlaces` at the /// corresponding control-flow point. /// /// Similarly, at a given `drop` statement, the set-intersection /// between this data and `MaybeUninitializedPlaces` yields the set of /// places that would require a dynamic drop-flag at that statement. pub struct MaybeInitializedPlaces<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>, } impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> { pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self { MaybeInitializedPlaces { tcx, body, mdpe } } } impl<'a, 'tcx> HasMoveData<'tcx> for MaybeInitializedPlaces<'a, 'tcx> { fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data } } /// `MaybeUninitializedPlaces` tracks all places that might be /// uninitialized upon reaching a particular point in the control flow /// for a function. /// /// For example, in code like the following, we have corresponding /// dataflow information shown in the right-hand comments. /// /// ```rust /// struct S; /// fn foo(pred: bool) { // maybe-uninit: /// // {a, b, c, d} /// let a = S; let b = S; let c; let d; // { c, d} /// /// if pred { /// drop(a); // {a, c, d} /// b = S; // {a, c, d} /// /// } else { /// drop(b); // { b, c, d} /// d = S; // { b, c } /// /// } // {a, b, c, d} /// /// c = S; // {a, b, d} /// } /// ``` /// /// To determine whether a place *must* be uninitialized at a /// particular control-flow point, one can take the set-difference /// between this data and the data from `MaybeInitializedPlaces` at the /// corresponding control-flow point. /// /// Similarly, at a given `drop` statement, the set-intersection /// between this data and `MaybeInitializedPlaces` yields the set of /// places that would require a dynamic drop-flag at that statement. pub struct MaybeUninitializedPlaces<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>, mark_inactive_variants_as_uninit: bool, } impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> { pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self { MaybeUninitializedPlaces { tcx, body, mdpe, mark_inactive_variants_as_uninit: false } } /// Causes inactive enum variants to be marked as "maybe uninitialized" after a switch on an /// enum discriminant. /// /// This is correct in a vacuum but is not the default because it causes problems in the borrow /// checker, where this information gets propagated along `FakeEdge`s. pub fn mark_inactive_variants_as_uninit(mut self) -> Self { self.mark_inactive_variants_as_uninit = true; self } } impl<'a, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'tcx> { fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data } } /// `DefinitelyInitializedPlaces` tracks all places that are definitely /// initialized upon reaching a particular point in the control flow /// for a function. /// /// For example, in code like the following, we have corresponding /// dataflow information shown in the right-hand comments. /// /// ```rust /// struct S; /// fn foo(pred: bool) { // definite-init: /// // { } /// let a = S; let b = S; let c; let d; // {a, b } /// /// if pred { /// drop(a); // { b, } /// b = S; // { b, } /// /// } else { /// drop(b); // {a, } /// d = S; // {a, d} /// /// } // { } /// /// c = S; // { c } /// } /// ``` /// /// To determine whether a place *may* be uninitialized at a /// particular control-flow point, one can take the set-complement /// of this data. /// /// Similarly, at a given `drop` statement, the set-difference between /// this data and `MaybeInitializedPlaces` yields the set of places /// that would require a dynamic drop-flag at that statement. pub struct DefinitelyInitializedPlaces<'a, 'tcx> { tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>, } impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> { pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self { DefinitelyInitializedPlaces { tcx, body, mdpe } } } impl<'a, 'tcx> HasMoveData<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> { fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data } } /// `EverInitializedPlaces` tracks all places that might have ever been /// initialized upon reaching a particular point in the control flow /// for a function, without an intervening `Storage Dead`. /// /// This dataflow is used to determine if an immutable local variable may /// be assigned to. /// /// For example, in code like the following, we have corresponding /// dataflow information shown in the right-hand comments. /// /// ```rust /// struct S; /// fn foo(pred: bool) { // ever-init: /// // { } /// let a = S; let b = S; let c; let d; // {a, b } /// /// if pred { /// drop(a); // {a, b, } /// b = S; // {a, b, } /// /// } else { /// drop(b); // {a, b, } /// d = S; // {a, b, d } /// /// } // {a, b, d } /// /// c = S; // {a, b, c, d } /// } /// ``` pub struct EverInitializedPlaces<'a, 'tcx> { #[allow(dead_code)] tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>, } impl<'a, 'tcx> EverInitializedPlaces<'a, 'tcx> { pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self { EverInitializedPlaces { tcx, body, mdpe } } } impl<'a, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'tcx> { fn move_data(&self) -> &MoveData<'tcx> { &self.mdpe.move_data } } impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> { fn update_bits( trans: &mut impl GenKill<MovePathIndex>, path: MovePathIndex, state: DropFlagState, ) { match state { DropFlagState::Absent => trans.kill(path), DropFlagState::Present => trans.gen(path), } } } impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> { fn update_bits( trans: &mut impl GenKill<MovePathIndex>, path: MovePathIndex, state: DropFlagState, ) { match state { DropFlagState::Absent => trans.gen(path), DropFlagState::Present => trans.kill(path), } } } impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> { fn update_bits( trans: &mut impl GenKill<MovePathIndex>, path: MovePathIndex, state: DropFlagState, ) { match state { DropFlagState::Absent => trans.kill(path), DropFlagState::Present => trans.gen(path), } } } impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, 'tcx> { type Idx = MovePathIndex; const NAME: &'static str = "maybe_init"; fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize { self.move_data().move_paths.len() } fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) { drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| { assert!(s == DropFlagState::Present); state.insert(path); }); } fn pretty_print_idx(&self, w: &mut impl std::io::Write, mpi: Self::Idx) -> std::io::Result<()> { write!(w, "{}", self.move_data().move_paths[mpi]) } } impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> { fn statement_effect( &self, trans: &mut impl GenKill<Self::Idx>, _statement: &mir::Statement<'tcx>, location: Location, ) { drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| { Self::update_bits(trans, path, s) }) } fn terminator_effect( &self, trans: &mut impl GenKill<Self::Idx>, _terminator: &mir::Terminator<'tcx>, location: Location, ) { drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| { Self::update_bits(trans, path, s) }) } fn call_return_effect( &self, trans: &mut impl GenKill<Self::Idx>, _block: mir::BasicBlock, _func: &mir::Operand<'tcx>, _args: &[mir::Operand<'tcx>], dest_place: mir::Place<'tcx>, ) { // when a call returns successfully, that means we need to set // the bits for that dest_place to 1 (initialized). on_lookup_result_bits( self.tcx, self.body, self.move_data(), self.move_data().rev_lookup.find(dest_place.as_ref()), |mpi| { trans.gen(mpi); }, ); } fn discriminant_switch_effect( &self, trans: &mut impl GenKill<Self::Idx>, _block: mir::BasicBlock, enum_place: mir::Place<'tcx>, _adt: &ty::AdtDef, variant: VariantIdx, ) { // Kill all move paths that correspond to variants we know to be inactive along this // particular outgoing edge of a `SwitchInt`. drop_flag_effects::on_all_inactive_variants( self.tcx, self.body, self.move_data(), enum_place, variant, |mpi| trans.kill(mpi), ); } } impl<'tcx> AnalysisDomain<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> { type Idx = MovePathIndex; const NAME: &'static str = "maybe_uninit"; fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize { self.move_data().move_paths.len() } // sets on_entry bits for Arg places fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) { // set all bits to 1 (uninit) before gathering counterevidence assert!(self.bits_per_block(body) == state.domain_size()); state.insert_all(); drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| { assert!(s == DropFlagState::Present); state.remove(path); }); } fn pretty_print_idx(&self, w: &mut impl std::io::Write, mpi: Self::Idx) -> std::io::Result<()> { write!(w, "{}", self.move_data().move_paths[mpi]) } } impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> { fn statement_effect( &self, trans: &mut impl GenKill<Self::Idx>, _statement: &mir::Statement<'tcx>, location: Location, ) { drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| { Self::update_bits(trans, path, s) }) } fn terminator_effect( &self, trans: &mut impl GenKill<Self::Idx>, _terminator: &mir::Terminator<'tcx>, location: Location, ) { drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| { Self::update_bits(trans, path, s) }) } fn call_return_effect( &self, trans: &mut impl GenKill<Self::Idx>, _block: mir::BasicBlock, _func: &mir::Operand<'tcx>, _args: &[mir::Operand<'tcx>], dest_place: mir::Place<'tcx>, ) { // when a call returns successfully, that means we need to set // the bits for that dest_place to 0 (initialized). on_lookup_result_bits( self.tcx, self.body, self.move_data(), self.move_data().rev_lookup.find(dest_place.as_ref()), |mpi| { trans.kill(mpi); }, ); } fn discriminant_switch_effect( &self, trans: &mut impl GenKill<Self::Idx>, _block: mir::BasicBlock, enum_place: mir::Place<'tcx>, _adt: &ty::AdtDef, variant: VariantIdx, ) { if !self.mark_inactive_variants_as_uninit { return; } // Mark all move paths that correspond to variants other than this one as maybe // uninitialized (in reality, they are *definitely* uninitialized). drop_flag_effects::on_all_inactive_variants( self.tcx, self.body, self.move_data(), enum_place, variant, |mpi| trans.gen(mpi), ); } } impl<'a, 'tcx> AnalysisDomain<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> { type Idx = MovePathIndex; const NAME: &'static str = "definite_init"; fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize { self.move_data().move_paths.len() } // sets on_entry bits for Arg places fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) { state.clear(); drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| { assert!(s == DropFlagState::Present); state.insert(path); }); } fn pretty_print_idx(&self, w: &mut impl std::io::Write, mpi: Self::Idx) -> std::io::Result<()> { write!(w, "{}", self.move_data().move_paths[mpi]) } } impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> { fn statement_effect( &self, trans: &mut impl GenKill<Self::Idx>, _statement: &mir::Statement<'tcx>, location: Location, ) { drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| { Self::update_bits(trans, path, s) }) } fn terminator_effect( &self, trans: &mut impl GenKill<Self::Idx>, _terminator: &mir::Terminator<'tcx>, location: Location, ) { drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| { Self::update_bits(trans, path, s) }) } fn call_return_effect( &self, trans: &mut impl GenKill<Self::Idx>, _block: mir::BasicBlock, _func: &mir::Operand<'tcx>, _args: &[mir::Operand<'tcx>], dest_place: mir::Place<'tcx>, ) { // when a call returns successfully, that means we need to set // the bits for that dest_place to 1 (initialized). on_lookup_result_bits( self.tcx, self.body, self.move_data(), self.move_data().rev_lookup.find(dest_place.as_ref()), |mpi| { trans.gen(mpi); }, ); } } impl<'tcx> AnalysisDomain<'tcx> for EverInitializedPlaces<'_, 'tcx> { type Idx = InitIndex; const NAME: &'static str = "ever_init"; fn bits_per_block(&self, _: &mir::Body<'tcx>) -> usize { self.move_data().inits.len() } fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut BitSet<Self::Idx>) { for arg_init in 0..body.arg_count { state.insert(InitIndex::new(arg_init)); } } } impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, 'tcx> { fn statement_effect( &self, trans: &mut impl GenKill<Self::Idx>, stmt: &mir::Statement<'tcx>, location: Location, ) { let move_data = self.move_data(); let init_path_map = &move_data.init_path_map; let init_loc_map = &move_data.init_loc_map; let rev_lookup = &move_data.rev_lookup; debug!( "statement {:?} at loc {:?} initializes move_indexes {:?}", stmt, location, &init_loc_map[location] ); trans.gen_all(init_loc_map[location].iter().copied()); if let mir::StatementKind::StorageDead(local) = stmt.kind { // End inits for StorageDead, so that an immutable variable can // be reinitialized on the next iteration of the loop. let move_path_index = rev_lookup.find_local(local); debug!( "stmt {:?} at loc {:?} clears the ever initialized status of {:?}", stmt, location, &init_path_map[move_path_index] ); trans.kill_all(init_path_map[move_path_index].iter().copied()); } } fn terminator_effect( &self, trans: &mut impl GenKill<Self::Idx>, _terminator: &mir::Terminator<'tcx>, location: Location, ) { let (body, move_data) = (self.body, self.move_data()); let term = body[location.block].terminator(); let init_loc_map = &move_data.init_loc_map; debug!( "terminator {:?} at loc {:?} initializes move_indexes {:?}", term, location, &init_loc_map[location] ); trans.gen_all( init_loc_map[location] .iter() .filter(|init_index| { move_data.inits[**init_index].kind != InitKind::NonPanicPathOnly }) .copied(), ); } fn call_return_effect( &self, trans: &mut impl GenKill<Self::Idx>, block: mir::BasicBlock, _func: &mir::Operand<'tcx>, _args: &[mir::Operand<'tcx>], _dest_place: mir::Place<'tcx>, ) { let move_data = self.move_data(); let init_loc_map = &move_data.init_loc_map; let call_loc = self.body.terminator_loc(block); for init_index in &init_loc_map[call_loc] { trans.gen(*init_index); } } } impl<'a, 'tcx> BottomValue for MaybeInitializedPlaces<'a, 'tcx> { /// bottom = uninitialized const BOTTOM_VALUE: bool = false; } impl<'a, 'tcx> BottomValue for MaybeUninitializedPlaces<'a, 'tcx> { /// bottom = initialized (start_block_effect counters this at outset) const BOTTOM_VALUE: bool = false; } impl<'a, 'tcx> BottomValue for DefinitelyInitializedPlaces<'a, 'tcx> { /// bottom = initialized (start_block_effect counters this at outset) const BOTTOM_VALUE: bool = true; } impl<'a, 'tcx> BottomValue for EverInitializedPlaces<'a, 'tcx> { /// bottom = no initialized variables by default const BOTTOM_VALUE: bool = false; }
32.725309
100
0.559842
dd6bd8f11f618071c15db5349c2e444297b9e58a
533
// option1.rs // Make me compile! Execute `rustlings hint option1` for hints // you can modify anything EXCEPT for this function's sig fn print_number(maybe_number: Option<u16>) { println!("printing: {}", maybe_number.is_some()) } fn main() { print_number(Some(13)); print_number(Some(99)); let mut numbers: [Option<u16>; 5] = [None; 5]; for iter in 0..5 { let number_to_add: u16 = { ((iter * 1235) + 2) / (4 * 16) }; numbers[iter as usize] = Some(number_to_add); } }
23.173913
62
0.596623
eb6f60ec7d72710a40909bbb74e48f445a7c313a
9,939
#[cfg(test)] mod prf_test; use std::convert::TryInto; use std::fmt; use hmac::{Hmac, Mac, NewMac}; use sha1::Sha1; use sha2::Digest; use sha2::Sha256; type HmacSha256 = Hmac<Sha256>; type HmacSha1 = Hmac<Sha1>; use crate::cipher_suite::CipherSuiteHash; use crate::content::ContentType; use crate::curve::named_curve::*; use crate::error::*; use crate::record_layer::record_layer_header::ProtocolVersion; pub(crate) const PRF_MASTER_SECRET_LABEL: &str = "master secret"; pub(crate) const PRF_EXTENDED_MASTER_SECRET_LABEL: &str = "extended master secret"; pub(crate) const PRF_KEY_EXPANSION_LABEL: &str = "key expansion"; pub(crate) const PRF_VERIFY_DATA_CLIENT_LABEL: &str = "client finished"; pub(crate) const PRF_VERIFY_DATA_SERVER_LABEL: &str = "server finished"; #[derive(PartialEq, Debug, Clone)] pub(crate) struct EncryptionKeys { pub(crate) master_secret: Vec<u8>, pub(crate) client_mac_key: Vec<u8>, pub(crate) server_mac_key: Vec<u8>, pub(crate) client_write_key: Vec<u8>, pub(crate) server_write_key: Vec<u8>, pub(crate) client_write_iv: Vec<u8>, pub(crate) server_write_iv: Vec<u8>, } impl fmt::Display for EncryptionKeys { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut out = "EncryptionKeys:\n".to_string(); out += format!("- master_secret: {:?}\n", self.master_secret).as_str(); out += format!("- client_mackey: {:?}\n", self.client_mac_key).as_str(); out += format!("- server_mackey: {:?}\n", self.server_mac_key).as_str(); out += format!("- client_write_key: {:?}\n", self.client_write_key).as_str(); out += format!("- server_write_key: {:?}\n", self.server_write_key).as_str(); out += format!("- client_write_iv: {:?}\n", self.client_write_iv).as_str(); out += format!("- server_write_iv: {:?}\n", self.server_write_iv).as_str(); write!(f, "{}", out) } } // The premaster secret is formed as follows: if the PSK is N octets // long, concatenate a uint16 with the value N, N zero octets, a second // uint16 with the value N, and the PSK itself. // // https://tools.ietf.org/html/rfc4279#section-2 pub(crate) fn prf_psk_pre_master_secret(psk: &[u8]) -> Vec<u8> { let psk_len = psk.len(); let mut out = vec![0u8; 2 + psk_len + 2]; out.extend_from_slice(psk); let be = (psk_len as u16).to_be_bytes(); out[..2].copy_from_slice(&be); out[2 + psk_len..2 + psk_len + 2].copy_from_slice(&be); out } pub(crate) fn prf_pre_master_secret( public_key: &[u8], private_key: &NamedCurvePrivateKey, curve: NamedCurve, ) -> Result<Vec<u8>> { match curve { NamedCurve::P256 => elliptic_curve_pre_master_secret(public_key, private_key, curve), NamedCurve::X25519 => elliptic_curve_pre_master_secret(public_key, private_key, curve), _ => Err(Error::ErrInvalidNamedCurve), } } fn elliptic_curve_pre_master_secret( public_key: &[u8], private_key: &NamedCurvePrivateKey, curve: NamedCurve, ) -> Result<Vec<u8>> { match curve { NamedCurve::P256 => { let pub_key = p256::EncodedPoint::from_bytes(public_key)?; let public = p256::PublicKey::from_sec1_bytes(pub_key.as_ref())?; if let NamedCurvePrivateKey::EphemeralSecretP256(secret) = private_key { return Ok(secret.diffie_hellman(&public).as_bytes().to_vec()); } } NamedCurve::X25519 => { if public_key.len() != 32 { return Err(Error::Other("Public key is not 32 len".into())); } let pub_key: [u8; 32] = public_key.try_into().unwrap(); let public = x25519_dalek::PublicKey::from(pub_key); if let NamedCurvePrivateKey::StaticSecretX25519(secret) = private_key { return Ok(secret.diffie_hellman(&public).as_bytes().to_vec()); } } _ => return Err(Error::ErrInvalidNamedCurve), } Err(Error::ErrNamedCurveAndPrivateKeyMismatch) } // This PRF with the SHA-256 hash function is used for all cipher suites // defined in this document and in TLS documents published prior to this // document when TLS 1.2 is negotiated. New cipher suites MUST explicitly // specify a PRF and, in general, SHOULD use the TLS PRF with SHA-256 or a // stronger standard hash function. // // P_hash(secret, seed) = HMAC_hash(secret, A(1) + seed) + // HMAC_hash(secret, A(2) + seed) + // HMAC_hash(secret, A(3) + seed) + ... // // A() is defined as: // // A(0) = seed // A(i) = HMAC_hash(secret, A(i-1)) // // P_hash can be iterated as many times as necessary to produce the // required quantity of data. For example, if P_SHA256 is being used to // create 80 bytes of data, it will have to be iterated three times // (through A(3)), creating 96 bytes of output data; the last 16 bytes // of the final iteration will then be discarded, leaving 80 bytes of // output data. // // https://tools.ietf.org/html/rfc4346w fn hmac_sha(h: CipherSuiteHash, key: &[u8], data: &[u8]) -> Result<Vec<u8>> { let mut mac = match h { CipherSuiteHash::Sha256 => { HmacSha256::new_varkey(key).map_err(|e| Error::Other(e.to_string()))? } }; mac.update(data); let result = mac.finalize(); let code_bytes = result.into_bytes(); Ok(code_bytes.to_vec()) } pub(crate) fn prf_p_hash( secret: &[u8], seed: &[u8], requested_length: usize, h: CipherSuiteHash, ) -> Result<Vec<u8>> { let mut last_round = seed.to_vec(); let mut out = vec![]; let iterations = ((requested_length as f64) / (h.size() as f64)).ceil() as usize; for _ in 0..iterations { last_round = hmac_sha(h, secret, &last_round)?; let mut last_round_seed = last_round.clone(); last_round_seed.extend_from_slice(seed); let with_secret = hmac_sha(h, secret, &last_round_seed)?; out.extend_from_slice(&with_secret); } Ok(out[..requested_length].to_vec()) } pub(crate) fn prf_extended_master_secret( pre_master_secret: &[u8], session_hash: &[u8], h: CipherSuiteHash, ) -> Result<Vec<u8>> { let mut seed = PRF_EXTENDED_MASTER_SECRET_LABEL.as_bytes().to_vec(); seed.extend_from_slice(session_hash); prf_p_hash(pre_master_secret, &seed, 48, h) } pub(crate) fn prf_master_secret( pre_master_secret: &[u8], client_random: &[u8], server_random: &[u8], h: CipherSuiteHash, ) -> Result<Vec<u8>> { let mut seed = PRF_MASTER_SECRET_LABEL.as_bytes().to_vec(); seed.extend_from_slice(client_random); seed.extend_from_slice(server_random); prf_p_hash(pre_master_secret, &seed, 48, h) } pub(crate) fn prf_encryption_keys( master_secret: &[u8], client_random: &[u8], server_random: &[u8], prf_mac_len: usize, prf_key_len: usize, prf_iv_len: usize, h: CipherSuiteHash, ) -> Result<EncryptionKeys> { let mut seed = PRF_KEY_EXPANSION_LABEL.as_bytes().to_vec(); seed.extend_from_slice(server_random); seed.extend_from_slice(client_random); let material = prf_p_hash( master_secret, &seed, (2 * prf_mac_len) + (2 * prf_key_len) + (2 * prf_iv_len), h, )?; let mut key_material = &material[..]; let client_mac_key = key_material[..prf_mac_len].to_vec(); key_material = &key_material[prf_mac_len..]; let server_mac_key = key_material[..prf_mac_len].to_vec(); key_material = &key_material[prf_mac_len..]; let client_write_key = key_material[..prf_key_len].to_vec(); key_material = &key_material[prf_key_len..]; let server_write_key = key_material[..prf_key_len].to_vec(); key_material = &key_material[prf_key_len..]; let client_write_iv = key_material[..prf_iv_len].to_vec(); key_material = &key_material[prf_iv_len..]; let server_write_iv = key_material[..prf_iv_len].to_vec(); Ok(EncryptionKeys { master_secret: master_secret.to_vec(), client_mac_key, server_mac_key, client_write_key, server_write_key, client_write_iv, server_write_iv, }) } pub(crate) fn prf_verify_data( master_secret: &[u8], handshake_bodies: &[u8], label: &str, h: CipherSuiteHash, ) -> Result<Vec<u8>> { let mut hasher = match h { CipherSuiteHash::Sha256 => Sha256::new(), }; hasher.update(handshake_bodies); let result = hasher.finalize(); let mut seed = label.as_bytes().to_vec(); seed.extend_from_slice(&result); prf_p_hash(master_secret, &seed, 12, h) } pub(crate) fn prf_verify_data_client( master_secret: &[u8], handshake_bodies: &[u8], h: CipherSuiteHash, ) -> Result<Vec<u8>> { prf_verify_data( master_secret, handshake_bodies, PRF_VERIFY_DATA_CLIENT_LABEL, h, ) } pub(crate) fn prf_verify_data_server( master_secret: &[u8], handshake_bodies: &[u8], h: CipherSuiteHash, ) -> Result<Vec<u8>> { prf_verify_data( master_secret, handshake_bodies, PRF_VERIFY_DATA_SERVER_LABEL, h, ) } // compute the MAC using HMAC-SHA1 pub(crate) fn prf_mac( epoch: u16, sequence_number: u64, content_type: ContentType, protocol_version: ProtocolVersion, payload: &[u8], key: &[u8], ) -> Result<Vec<u8>> { let mut hmac = HmacSha1::new_varkey(key).map_err(|e| Error::Other(e.to_string()))?; let mut msg = vec![0u8; 13]; msg[..2].copy_from_slice(&epoch.to_be_bytes()); msg[2..8].copy_from_slice(&sequence_number.to_be_bytes()[2..]); msg[8] = content_type as u8; msg[9] = protocol_version.major; msg[10] = protocol_version.minor; msg[11..].copy_from_slice(&(payload.len() as u16).to_be_bytes()); hmac.update(&msg); hmac.update(payload); let result = hmac.finalize(); Ok(result.into_bytes().to_vec()) }
32.165049
95
0.648254
484586b941e9dbc9734e07899a7ff36812e8fa6d
4,951
#![allow(dead_code)] use crate::common::configuration::{Block0ConfigurationBuilder, NodeConfigBuilder}; use crate::common::file_utils; use chain_core::mempack; use chain_impl_mockchain::{block::Block, fee::LinearFee, fragment::Fragment}; use jormungandr_lib::{ interfaces::{Block0Configuration, NodeConfig, NodeSecret, TrustedPeer, UTxOInfo}, wallet::Wallet, }; use std::path::PathBuf; #[derive(Debug, Clone)] pub struct JormungandrConfig { pub genesis_block_path: PathBuf, pub genesis_block_hash: String, pub node_config_path: PathBuf, pub secret_model_paths: Vec<PathBuf>, pub block0_configuration: Block0Configuration, pub node_config: NodeConfig, pub secret_models: Vec<NodeSecret>, pub log_file_path: PathBuf, pub rewards_history: bool, } impl JormungandrConfig { pub fn get_node_address(&self) -> String { let rest = &self.node_config.rest; let output = format!("http://{}/api", rest.listen); output } pub fn refresh_node_dynamic_params(&mut self) { self.regenerate_ports(); self.update_node_config(); self.log_file_path = file_utils::get_path_in_temp("log_file.log"); } pub fn update_node_config(&mut self) { self.node_config_path = NodeConfigBuilder::serialize(&self.node_config); } fn regenerate_ports(&mut self) { self.node_config.rest.listen = format!("127.0.0.1:{}", super::get_available_port().to_string()) .parse() .unwrap(); self.node_config.p2p.public_address = format!( "/ip4/127.0.0.1/tcp/{}", super::get_available_port().to_string() ) .parse() .unwrap(); self.node_config.p2p.listen_address = self.node_config.p2p.public_address.clone(); } pub fn fees(&self) -> LinearFee { self.block0_configuration .blockchain_configuration .linear_fees .clone() } pub fn get_p2p_listen_port(&self) -> u16 { let address = self.node_config.p2p.listen_address.clone().to_string(); let tokens: Vec<&str> = address.split("/").collect(); let port_str = tokens .get(4) .expect("cannot extract port from p2p.public_address"); port_str.parse().unwrap() } pub fn new() -> Self { JormungandrConfig::from( Block0ConfigurationBuilder::new().build(), NodeConfigBuilder::new().build(), ) } pub fn as_trusted_peer(&self) -> TrustedPeer { self.node_config.p2p.make_trusted_peer_setting() } pub fn from(block0_configuration: Block0Configuration, node_config: NodeConfig) -> Self { JormungandrConfig { genesis_block_path: PathBuf::from(""), genesis_block_hash: String::from(""), node_config_path: PathBuf::from(""), secret_model_paths: Vec::new(), log_file_path: PathBuf::from(""), block0_configuration: block0_configuration, node_config: node_config, secret_models: Vec::new(), rewards_history: false, } } pub fn block0_utxo(&self) -> Vec<UTxOInfo> { let block0_bytes = std::fs::read(&self.genesis_block_path).expect(&format!( "Failed to load block 0 binary file '{}'", self.genesis_block_path.display() )); mempack::read_from_raw::<Block>(&block0_bytes) .expect(&format!( "Failed to parse block in block 0 file '{}'", self.genesis_block_path.display() )) .contents .iter() .filter_map(|fragment| match fragment { Fragment::Transaction(transaction) => Some((transaction, fragment.hash())), _ => None, }) .map(|(transaction, fragment_id)| { transaction .as_slice() .outputs() .iter() .enumerate() .map(move |(idx, output)| { UTxOInfo::new( fragment_id.into(), idx as u8, output.address.clone().into(), output.value.into(), ) }) }) .flatten() .collect() } pub fn block0_utxo_for_address(&self, wallet: &Wallet) -> UTxOInfo { let utxo = self .block0_utxo() .into_iter() .find(|utxo| *utxo.address() == wallet.address()) .expect(&format!( "No UTxO found in block 0 for address '{:?}'", wallet )); println!( "Utxo found for address {}: {:?}", wallet.address().to_string(), &utxo ); utxo } }
32.788079
93
0.551808
87cc1f40f44279a6cb6258065bf806a4756b1c2f
2,473
// Copyright 2020 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT use super::cli::Subcommand; use super::paramfetch::{get_params_default, SectorSizeOpt}; use fil_types::SectorSize; /// Converts a human readable string to a u64 size. fn ram_to_int(size: &str) -> Result<SectorSize, String> { // TODO there is no library to do this, but if other sector sizes are supported in future // this should probably be changed to parse from string to `SectorSize` let mut trimmed = size.trim_end_matches('B'); trimmed = trimmed.trim_end_matches('b'); match trimmed { "2048" | "2Ki" | "2ki" => Ok(SectorSize::_2KiB), "8388608" | "8Mi" | "8mi" => Ok(SectorSize::_8MiB), "536870912" | "512Mi" | "512mi" => Ok(SectorSize::_512MiB), "34359738368" | "32Gi" | "32gi" => Ok(SectorSize::_32GiB), "68719476736" | "64Gi" | "64gi" => Ok(SectorSize::_64GiB), _ => Err(format!( "Failed to parse: {}. Must be a valid sector size", size )), } } /// Process CLI subcommand pub(super) async fn process(command: Subcommand) { match command { Subcommand::FetchParams { params_size, all, keys, verbose, } => { let sizes = if all { SectorSizeOpt::All } else if let Some(size) = params_size { let sector_size = ram_to_int(&size).unwrap(); SectorSizeOpt::Size(sector_size) } else if keys { SectorSizeOpt::Keys } else { panic!( "Sector size option must be chosen. Choose between --all, --keys, or <size>" ); }; get_params_default(sizes, verbose).await.unwrap(); } } } #[cfg(test)] mod tests { use super::*; #[test] fn ram_str_conversions() { assert_eq!(ram_to_int("2048").unwrap(), SectorSize::_2KiB); assert_eq!(ram_to_int("2048B").unwrap(), SectorSize::_2KiB); assert_eq!(ram_to_int("2kib").unwrap(), SectorSize::_2KiB); assert_eq!(ram_to_int("8Mib").unwrap(), SectorSize::_8MiB); assert_eq!(ram_to_int("512MiB").unwrap(), SectorSize::_512MiB); assert_eq!(ram_to_int("32Gi").unwrap(), SectorSize::_32GiB); assert_eq!(ram_to_int("32GiB").unwrap(), SectorSize::_32GiB); assert_eq!(ram_to_int("64Gib").unwrap(), SectorSize::_64GiB); } }
34.830986
96
0.577841
1d56f3410d0188700d656cc1550451baeed9f2e1
1,448
use std::net::IpAddr; use std::{io, time}; pub struct Pinger { addr: IpAddr, } impl Pinger { ///Creates new instance /// ///`addr` - Destination address for the ICMP packet. pub const fn new(addr: IpAddr) -> Self { Self { addr, } } #[inline] pub fn ping(&self) -> io::Result<time::Duration> { self.ping_timeout(time::Duration::from_millis(250)) } pub fn ping_timeout(&self, timeout: time::Duration) -> io::Result<time::Duration> { let addr = std::net::SocketAddr::new(self.addr, 53); let before = time::Instant::now(); match std::net::TcpStream::connect_timeout(&addr, timeout) { Ok(_) => Ok(time::Instant::now().duration_since(before)), Err(error) => { rogu::debug!("Ping fail: {}", error); Err(error) } } } } #[cfg(test)] mod tests { use super::Pinger; use std::net::{IpAddr, Ipv4Addr}; #[test] fn pinger_should_ping_google_dns() { let ip = IpAddr::V4(Ipv4Addr::new(8, 8, 8, 8)); let pinger = Pinger::new(ip); let duration = pinger.ping().expect("To ping"); assert_ne!(duration.as_millis(), 0); } #[test] fn pinger_should_ping_non_existing() { let ip = IpAddr::V4(Ipv4Addr::new(8, 8, 8, 9)); let pinger = Pinger::new(ip); pinger.ping().expect_err("Fail to ping"); } }
24.542373
87
0.542818
e2957bcbe391aa579ff4f6357b5b1063474c6561
159
use serde::Serialize; #[derive(Serialize)] pub struct PDFMetadata { pub name: String, pub preview_filenames: Vec<String>, pub page_count: i32 }
14.454545
39
0.691824
ccb6ce1831257de57311926905b222514f84d238
2,293
use specs::*; use server::component::event::TimerEvent; use server::consts::timer::DELAYED_MESSAGE; use server::protocol::server::ServerMessage; use server::protocol::ServerMessageType; use server::types::FutureDispatcher; use server::utils::*; use server::*; use component::*; use config::*; use std::time::Duration; use systems::on_flag::CheckWin; const MESSAGE_1_MIN: &'static str = "New game starting in 1 minute"; const MESSAGE_30_SECONDS: &'static str = "Game starting in 30 seconds - shuffling teams"; const MESSAGE_10_SECONDS: &'static str = "Game starting in 10 seconds"; const MESSAGE_5_SECONDS: &'static str = "Game starting in 5 seconds"; const MESSAGE_4_SECONDS: &'static str = "Game starting in 4 seconds"; const MESSAGE_3_SECONDS: &'static str = "Game starting in 3 seconds"; const MESSAGE_2_SECONDS: &'static str = "Game starting in 2 seconds"; const MESSAGE_1_SECONDS: &'static str = "Game starting in a second"; const MESSAGE_0_SECONDS: &'static str = "Game starting!"; const MESSAGES: [(u32, u64, &'static str); 9] = [ (12, 60, MESSAGE_1_MIN), (7, 30, MESSAGE_30_SECONDS), (7, 10, MESSAGE_10_SECONDS), (2, 5, MESSAGE_5_SECONDS), (2, 4, MESSAGE_4_SECONDS), (2, 3, MESSAGE_3_SECONDS), (2, 2, MESSAGE_2_SECONDS), (2, 1, MESSAGE_1_SECONDS), (3, 0, MESSAGE_0_SECONDS), ]; #[derive(Default)] pub struct SetupMessages; #[derive(SystemData)] pub struct SetupMessagesData<'a> { future: ReadExpect<'a, FutureDispatcher>, } impl EventHandlerTypeProvider for SetupMessages { type Event = GameWinEvent; } impl<'a> EventHandler<'a> for SetupMessages { type SystemData = SetupMessagesData<'a>; fn on_event(&mut self, _: &GameWinEvent, data: &mut Self::SystemData) { for (duration, delay, msg) in MESSAGES.iter() { data.future.run_delayed( *GAME_RESET_TIME - Duration::from_secs(*delay), move |inst| { Some(TimerEvent { ty: *DELAYED_MESSAGE, instant: inst, data: Some(Box::new(ServerMessage { ty: ServerMessageType::TimeToGameStart, duration: *duration * 1000, text: msg.to_string(), })), }) }, ); } } } impl SystemInfo for SetupMessages { type Dependencies = CheckWin; fn name() -> &'static str { concat!(module_path!(), "::", line!()) } fn new() -> Self { Self::default() } }
27.297619
89
0.692543
e87b6af321f34ac3f96816bfce15b352dbc07d7c
17,001
#![allow(clippy::not_unsafe_ptr_arg_deref)] extern crate kaiju_core as core; extern crate kaiju_vm_core as vm_core; extern crate libc; #[macro_use] extern crate lazy_static; use core::error::*; use std::collections::HashMap; use std::ffi::CString; use std::mem::{size_of, transmute}; use std::ptr::{copy_nonoverlapping, null, null_mut}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; use vm_core::processor::{OpAction, Processor}; use vm_core::state::Value; use vm_core::vm::Vm; lazy_static! { static ref HANDLE_GEN: AtomicUsize = AtomicUsize::new(0); static ref VMS: Mutex<HashMap<Handle, Vm>> = Mutex::new(HashMap::new()); static ref PROCESS_OP: Mutex<Option<(usize, KaijuFuncProcessOp)>> = Mutex::new(None); static ref VM: Mutex<Option<&'static mut Vm>> = Mutex::new(None); static ref OP_ACTION: Mutex<OpAction> = Mutex::new(OpAction::None); } type KaijuFuncProcessOp = fn( context: *mut libc::c_void, op: *const libc::c_char, params: *const usize, params_count: usize, targets: *const usize, targets_count: usize, ); type Handle = usize; struct ExternalProcessor {} impl Processor for ExternalProcessor { fn process_op( op: &String, params: &[usize], targets: &[usize], vm: &mut Vm, ) -> SimpleResult<OpAction> { if let Some((context, on_process_op)) = *PROCESS_OP.lock().unwrap() { let csop = CString::new(op.as_str()).unwrap(); { *VM.lock().unwrap() = Some(unsafe { transmute::<&mut Vm, &'static mut Vm>(vm) }); } on_process_op( context as *mut libc::c_void, csop.as_ptr(), params.as_ptr(), params.len(), targets.as_ptr(), targets.len(), ); { *VM.lock().unwrap() = None; } let mut action = OP_ACTION.lock().unwrap(); let a = *action; *action = OpAction::None; Ok(a) } else { Err(SimpleError::new("There is no active processor".to_owned())) } } } #[repr(C)] pub struct KaijuInfoState { pub stack_size: usize, pub memory_size: usize, pub all_size: usize, pub stack_free: usize, pub memory_free: usize, pub all_free: usize, } #[no_mangle] pub extern "C" fn kaiju_start_program( bytes: *const libc::c_uchar, size: usize, entry: *const libc::c_char, memsize: usize, stacksize: usize, error: fn(*mut libc::c_void, *const libc::c_char), error_context: *mut libc::c_void, ) -> Handle { if bytes.is_null() || size == 0 || entry.is_null() || memsize == 0 || stacksize == 0 || (error as *const libc::c_void).is_null() { if !(error as *const libc::c_void).is_null() { let err = CString::new("Some of parameters are zeros or null pointers!").unwrap(); error(error_context, err.as_ptr()); } return 0; } let bytes = bytes_from_raw(bytes, size as usize); match Vm::from_bytes(bytes, stacksize as usize, memsize as usize) { Ok(mut vm) => match vm.start(&string_from_raw_unsized(entry as *const libc::c_uchar)) { Ok(_) => { let handle = { let gen = HANDLE_GEN.load(Ordering::Relaxed); let handle = gen + 1; HANDLE_GEN.store(handle, Ordering::Relaxed); handle }; VMS.lock().unwrap().insert(handle, vm); handle } Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); 0 } }, Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); 0 } } } #[no_mangle] pub extern "C" fn kaiju_run_program( bytes: *const libc::c_uchar, size: usize, entry: *const libc::c_char, memsize: usize, stacksize: usize, on_process_op: fn( *mut libc::c_void, *const libc::c_char, *const usize, usize, *const usize, usize, ), processor_context: *mut libc::c_void, error: fn(*mut libc::c_void, *const libc::c_char), error_context: *mut libc::c_void, ) -> bool { if bytes.is_null() || size == 0 || entry.is_null() || memsize == 0 || stacksize == 0 || (on_process_op as *const libc::c_void).is_null() || (error as *const libc::c_void).is_null() { if !(error as *const libc::c_void).is_null() { let err = CString::new("Some of parameters are zeros or null pointers!").unwrap(); error(error_context, err.as_ptr()); } return false; } let bytes = bytes_from_raw(bytes, size as usize); match Vm::from_bytes(bytes, stacksize as usize, memsize as usize) { Ok(mut vm) => { { *PROCESS_OP.lock().unwrap() = Some((processor_context as usize, on_process_op)); } let result = match vm .run::<ExternalProcessor>(&string_from_raw_unsized(entry as *const libc::c_uchar)) { Ok(_) => true, Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); false } }; { *PROCESS_OP.lock().unwrap() = None; } result } Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); false } } } #[no_mangle] pub extern "C" fn kaiju_resume_program( handle: Handle, on_process_op: fn( *mut libc::c_void, *const libc::c_char, *const usize, usize, *const usize, usize, ), processor_context: *mut libc::c_void, error: fn(*mut libc::c_void, *const libc::c_char), error_context: *mut libc::c_void, ) -> bool { if (on_process_op as *const libc::c_void).is_null() || (error as *const libc::c_void).is_null() { if !(error as *const libc::c_void).is_null() { let err = CString::new("Some of parameters are null pointers!").unwrap(); error(error_context, err.as_ptr()); } return false; } let mut vms = VMS.lock().unwrap(); match vms.get_mut(&handle) { Some(vm) => { if !vm.can_resume() { vms.remove(&handle); return false; } { *PROCESS_OP.lock().unwrap() = Some((processor_context as usize, on_process_op)); } let result = match vm.resume::<ExternalProcessor>() { Ok(_) => true, Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); false } }; { *PROCESS_OP.lock().unwrap() = None; } result } None => { let err = CString::new(format!("There is no VM with handle: {}", handle)).unwrap(); error(error_context, err.as_ptr()); false } } } #[no_mangle] pub extern "C" fn kaiju_consume_program( handle: Handle, on_process_op: fn( *mut libc::c_void, *const libc::c_char, *const usize, usize, *const usize, usize, ), processor_context: *mut libc::c_void, error: fn(*mut libc::c_void, *const libc::c_char), error_context: *mut libc::c_void, ) -> bool { if (on_process_op as *const libc::c_void).is_null() || (error as *const libc::c_void).is_null() { if !(error as *const libc::c_void).is_null() { let err = CString::new("Some of parameters are null pointers!").unwrap(); error(error_context, err.as_ptr()); } return false; } let mut vms = VMS.lock().unwrap(); match vms.get_mut(&handle) { Some(vm) => { if !vm.can_resume() { vms.remove(&handle); return false; } { *PROCESS_OP.lock().unwrap() = Some((processor_context as usize, on_process_op)); } let result = match vm.consume::<ExternalProcessor>() { Ok(_) => true, Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); false } }; vms.remove(&handle); { *PROCESS_OP.lock().unwrap() = None; } result } None => { let err = CString::new(format!("There is no VM with handle: {}", handle)).unwrap(); error(error_context, err.as_ptr()); false } } } #[no_mangle] pub extern "C" fn kaiju_cancel_program(handle: Handle) { VMS.lock().unwrap().remove(&handle); } #[no_mangle] pub extern "C" fn kaiju_fork_program( handle: Handle, entry: *const libc::c_char, memsize: usize, stacksize: usize, error: fn(*mut libc::c_void, *const libc::c_char), error_context: *mut libc::c_void, ) -> Handle { if entry.is_null() || memsize == 0 || stacksize == 0 || (error as *const libc::c_void).is_null() { if !(error as *const libc::c_void).is_null() { let err = CString::new("Some of parameters are zeros or null pointers!").unwrap(); error(error_context, err.as_ptr()); } return 0; } let mut vms = VMS.lock().unwrap(); match vms.get(&handle) { Some(vm) => match vm.fork_advanced(stacksize as usize, memsize as usize) { Ok(mut vm) => match vm.start(&string_from_raw_unsized(entry as *const libc::c_uchar)) { Ok(_) => { let handle = { let gen = HANDLE_GEN.load(Ordering::Relaxed); let handle = gen + 1; HANDLE_GEN.store(handle, Ordering::Relaxed); handle }; vms.insert(handle, vm); handle } Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); 0 } }, Err(err) => { let err = CString::new(err.message).unwrap(); error(error_context, err.as_ptr()); 0 } }, None => { let err = CString::new(format!("There is no VM with handle: {}", handle)).unwrap(); error(error_context, err.as_ptr()); 0 } } } #[no_mangle] pub extern "C" fn kaiju_with_program( handle: Handle, on_perform: fn(*mut libc::c_void), perform_context: *mut libc::c_void, error: fn(*mut libc::c_void, *const libc::c_char), error_context: *mut libc::c_void, ) -> bool { if (on_perform as *const libc::c_void).is_null() || (error as *const libc::c_void).is_null() { if !(error as *const libc::c_void).is_null() { let err = CString::new("Some of parameters are null pointers!").unwrap(); error(error_context, err.as_ptr()); } return false; } if VM.lock().unwrap().is_some() { return false; } let mut vms = VMS.lock().unwrap(); match vms.get_mut(&handle) { Some(vm) => { { *VM.lock().unwrap() = Some(unsafe { transmute::<&mut Vm, &'static mut Vm>(vm) }); } on_perform(perform_context); { *VM.lock().unwrap() = None; } true } None => { let err = CString::new(format!("There is no VM with handle: {}", handle)).unwrap(); error(error_context, err.as_ptr()); false } } } #[no_mangle] pub extern "C" fn kaiju_state_size() -> usize { if let Some(ref vm) = *VM.lock().unwrap() { return vm.state().all_size(); } 0 } #[no_mangle] pub extern "C" fn kaiju_state_ptr(address: usize) -> *const libc::c_void { if let Some(ref vm) = *VM.lock().unwrap() { let mem = vm.state().map_all(); if address < mem.len() { return unsafe { mem.as_ptr().add(address) as *const libc::c_void }; } } null() } #[no_mangle] pub extern "C" fn kaiju_state_ptr_mut(address: usize) -> *mut libc::c_void { if let Some(ref mut vm) = *VM.lock().unwrap() { let mem = vm.state_mut().map_all_mut(); if address < mem.len() { return unsafe { mem.as_mut_ptr().add(address) as *mut libc::c_void }; } } null_mut() } #[no_mangle] pub extern "C" fn kaiju_state_info(out_info: *mut KaijuInfoState) -> bool { if let Some(ref vm) = *VM.lock().unwrap() { unsafe { *out_info = KaijuInfoState { stack_size: vm.state().stack_size(), memory_size: vm.state().memory_size(), all_size: vm.state().all_size(), stack_free: vm.state().stack_free(), memory_free: vm.state().memory_free(), all_free: vm.state().all_free(), }; } return true; } false } #[no_mangle] pub extern "C" fn kaiju_state_alloc_stack(size: usize, out_address: *mut usize) -> bool { if let Some(ref mut vm) = *VM.lock().unwrap() { if let Ok(val) = vm.state_mut().alloc_stack_value(size) { unsafe { *out_address = val.address; } return true; } } false } #[no_mangle] pub extern "C" fn kaiju_state_pop_stack(size: usize) -> bool { if let Some(ref mut vm) = *VM.lock().unwrap() { let pos = vm.state().stack_pos(); if pos >= size && vm.state_mut().stack_reset(pos - size).is_ok() { return true; } } false } #[no_mangle] pub extern "C" fn kaiju_state_stack_address(out_address: *mut usize) -> bool { if let Some(ref vm) = *VM.lock().unwrap() { unsafe { *out_address = vm.state().stack_pos(); } return true; } false } #[no_mangle] pub extern "C" fn kaiju_state_alloc_memory(size: usize, out_address: *mut usize) -> bool { if let Some(ref mut vm) = *VM.lock().unwrap() { let bs = size_of::<usize>(); let val = vm.state_mut().alloc_memory_value(size + bs); if val.is_err() { return false; } let val = val.unwrap(); if vm.state_mut().store_data(val.address, &size).is_err() { vm.state_mut().dealloc_memory_value(&val).unwrap_or(()); return false; } unsafe { *out_address = val.address + bs; } return true; } false } #[no_mangle] pub extern "C" fn kaiju_state_dealloc_memory(address: usize) -> bool { if let Some(ref mut vm) = *VM.lock().unwrap() { let bs = size_of::<usize>(); let size = vm.state().load_data::<usize>(address - bs); if size.is_err() { return false; } let size = size.unwrap() + bs; if vm .state_mut() .dealloc_memory_value(&Value::new(address - bs, size)) .is_ok() { return true; } } false } #[no_mangle] pub extern "C" fn kaiju_context_go_to(label: *const libc::c_char) -> bool { if let Some(ref vm) = *VM.lock().unwrap() { if let Some(pos) = vm.find_label(&string_from_raw_unsized(label as *const libc::c_uchar)) { *OP_ACTION.lock().unwrap() = OpAction::GoTo(pos); return true; } } false } #[no_mangle] pub extern "C" fn kaiju_context_return() { *OP_ACTION.lock().unwrap() = OpAction::Return; } fn bytes_from_raw(source: *const libc::c_uchar, size: usize) -> Vec<u8> { if source.is_null() || size == 0 { return vec![]; } let mut result = vec![0; size]; let target = result.as_mut_ptr(); unsafe { copy_nonoverlapping(source, target, size) }; result } fn string_from_raw_unsized(mut source: *const libc::c_uchar) -> String { if source.is_null() { return "".to_owned(); } let mut bytes = vec![]; unsafe { while *source != 0 { bytes.push(*source); source = source.add(1); } } let cstring = unsafe { CString::from_vec_unchecked(bytes) }; cstring.into_string().unwrap() }
30.197158
100
0.520205
dd25df27b42d2a59dbecac80f2a766fe2c130a96
2,546
#[doc = r"Value read from the register"] pub struct R { bits: u32, } #[doc = r"Value to write to the register"] pub struct W { bits: u32, } impl super::ADDR2L { #[doc = r"Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); self.register.set(f(&R { bits }, &mut W { bits }).bits); } #[doc = r"Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r"Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { self.register.set( f(&mut W { bits: Self::reset_value(), }) .bits, ); } #[doc = r"Reset value of the register"] #[inline(always)] pub const fn reset_value() -> u32 { 0 } #[doc = r"Writes the reset value to the register"] #[inline(always)] pub fn reset(&self) { self.register.set(Self::reset_value()) } } #[doc = r"Value of the field"] pub struct EMAC_ADDR2L_ADDRLOR { bits: u32, } impl EMAC_ADDR2L_ADDRLOR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } } #[doc = r"Proxy"] pub struct _EMAC_ADDR2L_ADDRLOW<'a> { w: &'a mut W, } impl<'a> _EMAC_ADDR2L_ADDRLOW<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits &= !(4294967295 << 0); self.w.bits |= ((value as u32) & 4294967295) << 0; self.w } } impl R { #[doc = r"Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - MAC Address2 \\[31:0\\]"] #[inline(always)] pub fn emac_addr2l_addrlo(&self) -> EMAC_ADDR2L_ADDRLOR { let bits = ((self.bits >> 0) & 4294967295) as u32; EMAC_ADDR2L_ADDRLOR { bits } } } impl W { #[doc = r"Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - MAC Address2 \\[31:0\\]"] #[inline(always)] pub fn emac_addr2l_addrlo(&mut self) -> _EMAC_ADDR2L_ADDRLOW { _EMAC_ADDR2L_ADDRLOW { w: self } } }
25.46
66
0.534171
e27ab0fe11b2aa51c2ade6081779f0924e909abe
19,533
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use super::{ node::{LeafNode, LeafValue, SparseMerkleNode}, AccountStatus, ProofRead, SparseMerkleTree, }; use libra_crypto::{ hash::{CryptoHash, TestOnlyHash, SPARSE_MERKLE_PLACEHOLDER_HASH}, HashValue, }; use libra_types::{account_state_blob::AccountStateBlob, proof::SparseMerkleProof}; use std::{collections::HashMap, sync::Arc}; fn hash_internal(left_child: HashValue, right_child: HashValue) -> HashValue { libra_types::proof::SparseMerkleInternalNode::new(left_child, right_child).hash() } fn hash_leaf(key: HashValue, value_hash: HashValue) -> HashValue { libra_types::proof::SparseMerkleLeafNode::new(key, value_hash).hash() } #[derive(Default)] struct ProofReader(HashMap<HashValue, SparseMerkleProof>); impl ProofReader { fn new(key_with_proof: Vec<(HashValue, SparseMerkleProof)>) -> Self { ProofReader(key_with_proof.into_iter().collect()) } } impl ProofRead for ProofReader { fn get_proof(&self, key: HashValue) -> Option<&SparseMerkleProof> { self.0.get(&key) } } #[test] fn test_construct_subtree_zero_siblings() { let node_hash = HashValue::new([1; HashValue::LENGTH]); let node = SparseMerkleNode::new_subtree(node_hash); let subtree_node = SparseMerkleTree::construct_subtree(std::iter::empty(), std::iter::empty(), Arc::new(node)); let smt = SparseMerkleTree { root: subtree_node }; assert_eq!(smt.root_hash(), node_hash); } #[test] fn test_construct_subtree_three_siblings() { // x // / \ // [4; 32] c y // / \ // z b [3; 32] // / \ // node a [2; 32] let key = b"hello".test_only_hash(); let blob = AccountStateBlob::from(b"world".to_vec()); let leaf_hash = hash_leaf(key, blob.hash()); let node = SparseMerkleNode::new_leaf(key, LeafValue::BlobHash(blob.hash())); let bits = vec![false, false, true]; let a_hash = HashValue::new([2; HashValue::LENGTH]); let b_hash = HashValue::new([3; HashValue::LENGTH]); let c_hash = HashValue::new([4; HashValue::LENGTH]); let siblings = vec![a_hash, b_hash, c_hash] .into_iter() .map(|hash| Arc::new(SparseMerkleNode::new_subtree(hash))); let subtree_node = SparseMerkleTree::construct_subtree(bits.into_iter(), siblings, Arc::new(node)); let smt = SparseMerkleTree { root: subtree_node }; let z_hash = hash_internal(leaf_hash, a_hash); let y_hash = hash_internal(z_hash, b_hash); let root_hash = hash_internal(c_hash, y_hash); assert_eq!(smt.root_hash(), root_hash); } #[test] #[should_panic] fn test_construct_subtree_panic() { let node_hash = HashValue::new([1; HashValue::LENGTH]); let node = SparseMerkleNode::new_subtree(node_hash); let _subtree_node = SparseMerkleTree::construct_subtree( std::iter::once(true), std::iter::empty(), Arc::new(node), ); } #[test] fn test_construct_subtree_with_new_leaf_override_existing_leaf() { let key = b"hello".test_only_hash(); let old_blob = AccountStateBlob::from(b"old_old_old".to_vec()); let new_blob = AccountStateBlob::from(b"new_new_new".to_vec()); let existing_leaf = LeafNode::new(key, LeafValue::BlobHash(old_blob.hash())); let subtree = SparseMerkleTree::construct_subtree_with_new_leaf( key, new_blob.clone(), &existing_leaf, /* distance_from_root_to_existing_leaf = */ 3, ); let smt = SparseMerkleTree { root: subtree }; let new_blob_hash = new_blob.hash(); let root_hash = hash_leaf(key, new_blob_hash); assert_eq!(smt.root_hash(), root_hash); } #[test] fn test_construct_subtree_with_new_leaf_create_extension() { // root root // / \ / \ // o o o o // / \ / \ // o existing_key => o subtree // / \ // y placeholder // / \ // x placeholder // / \ // existing_key new_key let existing_key = b"world".test_only_hash(); let existing_blob = AccountStateBlob::from(b"world".to_vec()); let existing_blob_hash = existing_blob.hash(); let new_key = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".test_only_hash(); let new_blob = AccountStateBlob::from(b"new_blob!!!!!".to_vec()); assert_eq!(existing_key[0], 0b0100_0010); assert_eq!(new_key[0], 0b0100_1011); let existing_leaf = LeafNode::new(existing_key, LeafValue::BlobHash(existing_blob.hash())); let subtree = SparseMerkleTree::construct_subtree_with_new_leaf( new_key, new_blob.clone(), &existing_leaf, /* distance_from_root_to_existing_leaf = */ 2, ); let smt = SparseMerkleTree { root: subtree }; let new_blob_hash = new_blob.hash(); let existing_leaf_hash = hash_leaf(existing_key, existing_blob_hash); let new_leaf_hash = hash_leaf(new_key, new_blob_hash); let x_hash = hash_internal(existing_leaf_hash, new_leaf_hash); let y_hash = hash_internal(x_hash, *SPARSE_MERKLE_PLACEHOLDER_HASH); let root_hash = hash_internal(y_hash, *SPARSE_MERKLE_PLACEHOLDER_HASH); assert_eq!(smt.root_hash(), root_hash); } #[test] #[should_panic(expected = "Reached an internal node at the bottom of the tree.")] fn test_construct_subtree_at_bottom_found_internal_node() { let left_child = Arc::new(SparseMerkleNode::new_subtree(HashValue::new( [1; HashValue::LENGTH], ))); let right_child = Arc::new(SparseMerkleNode::new_empty()); let current_node = Arc::new(SparseMerkleNode::new_internal(left_child, right_child)); let key = b"hello".test_only_hash(); let new_blob = AccountStateBlob::from(b"new_blob".to_vec()); let remaining_bits = key.iter_bits(); let proof_reader = ProofReader::default(); let _subtree_node = SparseMerkleTree::construct_subtree_at_bottom( current_node, key, new_blob, remaining_bits, &proof_reader, ); } #[test] fn test_construct_subtree_at_bottom_found_leaf_node() { // root root // / \ / \ // o o o o // / \ / \ // o existing_key => o subtree // / \ // y placeholder // / \ // x placeholder // / \ // existing_key new_key let existing_key = b"world".test_only_hash(); let existing_blob = AccountStateBlob::from(b"world".to_vec()); let existing_blob_hash = existing_blob.hash(); let new_key = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".test_only_hash(); let new_blob = AccountStateBlob::from(b"new_blob!!!!!".to_vec()); assert_eq!(existing_key[0], 0b0100_0010); assert_eq!(new_key[0], 0b0100_1011); let current_node = Arc::new(SparseMerkleNode::new_leaf( existing_key, LeafValue::BlobHash(existing_blob_hash), )); let remaining_bits = { let mut iter = new_key.iter_bits(); iter.next(); iter.next(); iter }; let leaf = Some((existing_key, existing_blob_hash)); let siblings: Vec<_> = (0..2) .map(|x| HashValue::new([x; HashValue::LENGTH])) .collect(); let proof = SparseMerkleProof::new(leaf, siblings); let proof_reader = ProofReader::new(vec![(new_key, proof)]); let subtree = SparseMerkleTree::construct_subtree_at_bottom( current_node, new_key, new_blob.clone(), remaining_bits, &proof_reader, ) .unwrap(); let smt = SparseMerkleTree { root: subtree }; let existing_leaf_hash = hash_leaf(existing_key, existing_blob_hash); let new_blob_hash = new_blob.hash(); let new_leaf_hash = hash_leaf(new_key, new_blob_hash); let x_hash = hash_internal(existing_leaf_hash, new_leaf_hash); let y_hash = hash_internal(x_hash, *SPARSE_MERKLE_PLACEHOLDER_HASH); let root_hash = hash_internal(y_hash, *SPARSE_MERKLE_PLACEHOLDER_HASH); assert_eq!(smt.root_hash(), root_hash); } #[test] fn test_construct_subtree_at_bottom_found_empty_node() { // root root // / \ / \ // o o o o // / \ / \ // o placeholder => o new_key let new_key = b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".test_only_hash(); let new_blob = AccountStateBlob::from(b"new_blob!!!!!".to_vec()); assert_eq!(new_key[0], 0b0100_1011); let current_node = Arc::new(SparseMerkleNode::new_empty()); let remaining_bits = { let mut iter = new_key.iter_bits(); // Skip first two. iter.next(); iter.next(); iter }; let proof_reader = ProofReader::default(); let subtree = SparseMerkleTree::construct_subtree_at_bottom( current_node, new_key, new_blob.clone(), remaining_bits, &proof_reader, ) .unwrap(); let smt = SparseMerkleTree { root: subtree }; let new_blob_hash = new_blob.hash(); let new_leaf_hash = hash_leaf(new_key, new_blob_hash); assert_eq!(smt.root_hash(), new_leaf_hash); } #[test] fn test_construct_subtree_at_bottom_found_subtree_node() { // root root // / \ / \ // o o o o // / \ / \ // o subtree => o new_subtree // / \ // x sibling [5; 32] (from proof) // / \ // sibling [6; 32] (from proof) new_leaf let new_key = b"aaaaaaaa".test_only_hash(); let new_blob = AccountStateBlob::from(b"new_blob!!!!!".to_vec()); assert_eq!(new_key[0], 0b0101_1111); let current_node = Arc::new(SparseMerkleNode::new_subtree(HashValue::new( [1; HashValue::LENGTH], ))); let remaining_bits = { let mut iter = new_key.iter_bits(); // Skip first two. iter.next(); iter.next(); iter }; let leaf = None; let siblings: Vec<_> = (3..7) .rev() .map(|x| HashValue::new([x; HashValue::LENGTH])) .collect(); let proof = SparseMerkleProof::new(leaf, siblings); let proof_reader = ProofReader::new(vec![(new_key, proof)]); let new_subtree = SparseMerkleTree::construct_subtree_at_bottom( current_node, new_key, new_blob.clone(), remaining_bits, &proof_reader, ) .unwrap(); let smt = SparseMerkleTree { root: new_subtree }; let new_blob_hash = new_blob.hash(); let new_leaf_hash = hash_leaf(new_key, new_blob_hash); let x_hash = hash_internal(HashValue::new([6; HashValue::LENGTH]), new_leaf_hash); let new_subtree_hash = hash_internal(x_hash, HashValue::new([5; HashValue::LENGTH])); assert_eq!(smt.root_hash(), new_subtree_hash); } #[test] fn test_update_256_siblings_in_proof() { // root // / \ // o placeholder // / \ // o placeholder // / \ // . placeholder // . // . (256 levels) // o // / \ // key1 key2 let key1 = HashValue::new([0; HashValue::LENGTH]); let key2 = { let mut buf = key1.to_vec(); *buf.last_mut().unwrap() |= 1; HashValue::from_slice(&buf).unwrap() }; let blob1 = AccountStateBlob::from(b"value1".to_vec()); let blob2 = AccountStateBlob::from(b"value2".to_vec()); let value1_hash = blob1.hash(); let value2_hash = blob2.hash(); let leaf1_hash = hash_leaf(key1, value1_hash); let leaf2_hash = hash_leaf(key2, value2_hash); let mut siblings: Vec<_> = std::iter::repeat(*SPARSE_MERKLE_PLACEHOLDER_HASH) .take(255) .collect(); siblings.push(leaf2_hash); siblings.reverse(); let proof_of_key1 = SparseMerkleProof::new(Some((key1, value1_hash)), siblings.clone()); let old_root_hash = siblings.iter().fold(leaf1_hash, |previous_hash, hash| { hash_internal(previous_hash, *hash) }); assert!(proof_of_key1 .verify(old_root_hash, key1, Some(&blob1)) .is_ok()); let new_blob1 = AccountStateBlob::from(b"value1111111111111".to_vec()); let proof_reader = ProofReader::new(vec![(key1, proof_of_key1)]); let smt = SparseMerkleTree::new(old_root_hash); let new_smt = smt .update(vec![(key1, new_blob1.clone())], &proof_reader) .unwrap(); let new_blob1_hash = new_blob1.hash(); let new_leaf1_hash = hash_leaf(key1, new_blob1_hash); let new_root_hash = siblings.iter().fold(new_leaf1_hash, |previous_hash, hash| { hash_internal(previous_hash, *hash) }); assert_eq!(new_smt.root_hash(), new_root_hash); assert_eq!( new_smt.get(key1), AccountStatus::ExistsInScratchPad(new_blob1) ); assert_eq!(new_smt.get(key2), AccountStatus::Unknown); } #[test] fn test_new_subtree() { let root_hash = HashValue::new([1; HashValue::LENGTH]); let smt = SparseMerkleTree::new(root_hash); assert!(smt.root.read_lock().is_subtree()); assert_eq!(smt.root_hash(), root_hash); } #[test] fn test_new_empty() { let root_hash = *SPARSE_MERKLE_PLACEHOLDER_HASH; let smt = SparseMerkleTree::new(root_hash); assert!(smt.root.read_lock().is_empty()); assert_eq!(smt.root_hash(), root_hash); } #[test] fn test_update() { // Before the update, the tree was: // root // / \ // y key3 // / \ // x placeholder // / \ // key1 key2 let key1 = b"aaaaa".test_only_hash(); let key2 = b"bb".test_only_hash(); let key3 = b"cccc".test_only_hash(); assert_eq!(key1[0], 0b0000_0100); assert_eq!(key2[0], 0b0010_0100); assert_eq!(key3[0], 0b1110_0111); let value1 = AccountStateBlob::from(b"value1".to_vec()); let value1_hash = value1.hash(); let value2_hash = AccountStateBlob::from(b"value2".to_vec()).hash(); let value3_hash = AccountStateBlob::from(b"value3".to_vec()).hash(); // A new key at the "placeholder" position. let key4 = b"d".test_only_hash(); assert_eq!(key4[0], 0b0100_1100); let value4 = AccountStateBlob::from(b"value".to_vec()); // Create a proof for this new key. let leaf1_hash = hash_leaf(key1, value1_hash); let leaf2_hash = hash_leaf(key2, value2_hash); let leaf3_hash = hash_leaf(key3, value3_hash); let x_hash = hash_internal(leaf1_hash, leaf2_hash); let y_hash = hash_internal(x_hash, *SPARSE_MERKLE_PLACEHOLDER_HASH); let old_root_hash = hash_internal(y_hash, leaf3_hash); let proof = SparseMerkleProof::new(None, vec![x_hash, leaf3_hash]); assert!(proof.verify(old_root_hash, key4, None).is_ok()); // Create the old tree and update the tree with new value and proof. let proof_reader = ProofReader::new(vec![(key4, proof)]); let old_smt = SparseMerkleTree::new(old_root_hash); let smt1 = old_smt .update(vec![(key4, value4.clone())], &proof_reader) .unwrap(); // Now smt1 should look like this: // root // / \ // y key3 (subtree) // / \ // x key4 assert_eq!(smt1.get(key1), AccountStatus::Unknown); assert_eq!(smt1.get(key2), AccountStatus::Unknown); assert_eq!(smt1.get(key3), AccountStatus::Unknown); assert_eq!( smt1.get(key4), AccountStatus::ExistsInScratchPad(value4.clone()) ); let non_existing_key = b"foo".test_only_hash(); assert_eq!(non_existing_key[0], 0b0111_0110); assert_eq!(smt1.get(non_existing_key), AccountStatus::DoesNotExist); // Verify root hash. let value4_hash = value4.hash(); let leaf4_hash = hash_leaf(key4, value4_hash); let y_hash = hash_internal(x_hash, leaf4_hash); let root_hash = hash_internal(y_hash, leaf3_hash); assert_eq!(smt1.root_hash(), root_hash); // Next, we are going to modify key1. Create a proof for key1. let proof = SparseMerkleProof::new( Some((key1, value1_hash)), vec![leaf2_hash, *SPARSE_MERKLE_PLACEHOLDER_HASH, leaf3_hash], ); assert!(proof.verify(old_root_hash, key1, Some(&value1)).is_ok()); let value1 = AccountStateBlob::from(b"value11111".to_vec()); let proof_reader = ProofReader::new(vec![(key1, proof)]); let smt2 = smt1 .update(vec![(key1, value1.clone())], &proof_reader) .unwrap(); // Now the tree looks like: // root // / \ // y key3 (subtree) // / \ // x key4 // / \ // key1 key2 (subtree) assert_eq!( smt2.get(key1), AccountStatus::ExistsInScratchPad(value1.clone()) ); assert_eq!(smt2.get(key2), AccountStatus::Unknown); assert_eq!(smt2.get(key3), AccountStatus::Unknown); assert_eq!(smt2.get(key4), AccountStatus::ExistsInScratchPad(value4)); // Verify root hash. let value1_hash = value1.hash(); let leaf1_hash = hash_leaf(key1, value1_hash); let x_hash = hash_internal(leaf1_hash, leaf2_hash); let y_hash = hash_internal(x_hash, leaf4_hash); let root_hash = hash_internal(y_hash, leaf3_hash); assert_eq!(smt2.root_hash(), root_hash); // We now try to create another branch on top of smt1. let value4 = AccountStateBlob::from(b"new value 4444444444".to_vec()); // key4 already exists in the tree. let proof_reader = ProofReader::default(); let smt22 = smt1 .update(vec![(key4, value4.clone())], &proof_reader) .unwrap(); assert_eq!(smt22.get(key1), AccountStatus::Unknown); assert_eq!(smt22.get(key2), AccountStatus::Unknown); assert_eq!(smt22.get(key3), AccountStatus::Unknown); assert_eq!( smt22.get(key4), AccountStatus::ExistsInScratchPad(value4.clone()) ); // Now prune smt1. smt1.prune(); // For smt2, only key1 should be available since smt2 was constructed by updating smt1 with // key1. assert_eq!(smt2.get(key1), AccountStatus::ExistsInScratchPad(value1)); assert_eq!(smt2.get(key2), AccountStatus::Unknown); assert_eq!(smt2.get(key3), AccountStatus::Unknown); assert_eq!(smt2.get(key4), AccountStatus::Unknown); // For smt22, only key4 should be available since smt22 was constructed by updating smt1 with // key4. assert_eq!(smt22.get(key1), AccountStatus::Unknown); assert_eq!(smt22.get(key2), AccountStatus::Unknown); assert_eq!(smt22.get(key3), AccountStatus::Unknown); assert_eq!(smt22.get(key4), AccountStatus::ExistsInScratchPad(value4)); }
36.716165
100
0.60257
7a0e11c4b2284c906bd926ec32451b0e34f670a5
1,895
use gdnative::export::StaticallyNamed; use gdnative::prelude::*; pub(crate) fn run_tests() -> bool { let mut status = true; status &= test_variant_call_args(); status } pub(crate) fn register(handle: InitHandle) { handle.add_class::<VariantCallArgs>(); } struct VariantCallArgs; impl NativeClass for VariantCallArgs { type Base = Reference; type UserData = user_data::MutexData<VariantCallArgs>; fn init(_owner: TRef<Reference>) -> VariantCallArgs { VariantCallArgs } fn register_properties(_builder: &ClassBuilder<Self>) {} } impl StaticallyNamed for VariantCallArgs { const CLASS_NAME: &'static str = "VariantCallArgs"; } #[methods] impl VariantCallArgs { #[export] fn zero(&mut self, _owner: &Reference) -> i32 { 42 } #[export] fn one(&mut self, _owner: &Reference, a: i32) -> i32 { a * 42 } #[export] fn two(&mut self, _owner: &Reference, a: i32, b: i32) -> i32 { a * 42 + b } #[export] fn three(&mut self, _owner: &Reference, a: i32, b: i32, c: i32) -> i32 { a * 42 + b * c } } crate::godot_itest! { test_variant_call_args { let obj = Instance::<VariantCallArgs, _>::new(); let mut base = obj.into_base().into_shared().to_variant(); assert_eq!(Some(42), call_i64(&mut base, "zero", &[])); assert_eq!(Some(126), call_i64(&mut base, "one", &[Variant::new(3)])); assert_eq!( Some(-10), call_i64(&mut base, "two", &[Variant::new(-1), Variant::new(32)]) ); assert_eq!( Some(-52), call_i64( &mut base, "three", &[Variant::new(-2), Variant::new(4), Variant::new(8),] ) ); }} fn call_i64(variant: &mut Variant, method: &str, args: &[Variant]) -> Option<i64> { let result = unsafe { variant.call(method, args) }; result.unwrap().to() }
22.831325
83
0.587335
916180e36838f5fa29b09726d853e7831baac216
1,139
//! Crate that provides helpers and/or middlewares for Tide //! related to structured logging with slog. #![cfg_attr(docrs, feature(doc_cfg))] #![warn( nonstandard_style, rust_2018_idioms, future_incompatible, missing_debug_implementations )] mod per_request_logger; mod request_logger; #[cfg(feature = "scope")] mod set_slog_scope_logger; pub use per_request_logger::PerRequestLogger; pub use request_logger::RequestLogger; #[cfg(feature = "scope")] pub use set_slog_scope_logger::SetSlogScopeLogger; use tide_core::Context; /// An extension to [`Context`] that provides access to a per-request [`slog::Logger`] pub trait ContextExt { /// Returns a [`slog::Logger`] scoped to this request. /// /// # Panics /// /// Will panic if no [`PerRequestLogger`] middleware has been used to setup the request scoped /// logger. fn logger(&self) -> &slog::Logger; } impl<State> ContextExt for Context<State> { fn logger(&self) -> &slog::Logger { self.extensions() .get::<slog::Logger>() .expect("PerRequestLogger must be used to populate request logger") } }
27.119048
98
0.687445
7afe8c0da93873f8d7c55a9bba5b2ce96b707922
1,602
use i2p::sam_options::{ I2CPOptions, I2CPRouterOptions, I2CPTunnelInboundOptions, I2CPTunnelOutboundOptions, SAMOptions, }; use serde::{Deserialize, Serialize}; #[derive(Clone, Default, Debug, Serialize, Deserialize)] pub struct Tunnel { pub in_length: u8, pub in_quantity: u8, pub in_backup_quantity: u8, pub out_length: u8, pub out_quantity: u8, pub out_backup_quantity: u8, pub name: String, pub random_key: Option<String>, } impl Tunnel { pub fn options(&self) -> SAMOptions { let mut sam_options = SAMOptions::default(); sam_options.i2cp_options = Some(I2CPOptions { router_options: Some(I2CPRouterOptions { inbound: Some(I2CPTunnelInboundOptions { length: Some(self.in_length), quantity: Some(self.in_quantity), backup_quantity: Some(self.in_backup_quantity), random_key: self.random_key.clone(), ..Default::default() }), outbound: Some(I2CPTunnelOutboundOptions { length: Some(self.in_length), quantity: Some(self.in_quantity), backup_quantity: Some(self.in_backup_quantity), random_key: self.random_key.clone(), ..Default::default() }), fast_receive: Some(true), should_bundle_reply_info: Some(false), ..Default::default() }), ..Default::default() }); sam_options } }
34.826087
100
0.569288
d54694d7b3c8d04a0536c1f9e0970124e642da64
2,430
use piston_window::*; use quick_xml::de::from_reader; use serde::Deserialize; use std::fs::File; use std::io::BufReader; #[derive(Debug, Clone)] pub struct SpriteSheet { atlas: TextureAtlas, texture: G2dTexture, } #[allow(dead_code)] impl SpriteSheet { pub fn new( assets_folder: &str, sprites: &str, texture_context: &mut G2dTextureContext, ) -> SpriteSheet { let assets = find_folder::Search::ParentsThenKids(3, 3) .for_folder(assets_folder) .expect("Assets folder missing"); let file = File::open(assets.join(sprites)).expect("Missing descriptor for sprites"); let reader = BufReader::new(file); let atlas: TextureAtlas = from_reader(reader).expect("Invalid sprite descriptor format"); let texture_path = assets.join(&atlas.image_path); let texture: G2dTexture = Texture::from_path( texture_context, &texture_path, Flip::None, &TextureSettings::new(), ) .expect("Cannot load sprites as texture"); SpriteSheet { atlas, texture } } pub fn render_sprite(&self, name: &String, pos: [f64; 2], c: Context, g: &mut G2d) { let sub_texture = self.atlas.sub_textures.iter().find(|t| t.name == *name); if let Some(sprite) = sub_texture { let src_rect = [sprite.x, sprite.y, sprite.width, sprite.height]; Image::new().src_rect(src_rect).draw( &self.texture, &c.draw_state, c.transform.trans(pos[0], pos[1]), g, ); } } pub fn sprite_size(&self, name: &String) -> Option<[f64; 2]> { self.find_sub_texture(name).map(|s| [s.width, s.height]) } fn find_sub_texture(&self, name: &String) -> Option<&SubTexture> { self.atlas.sub_textures.iter().find(|t| t.name == *name) } } #[derive(Debug, Deserialize, PartialEq, Clone)] #[serde(rename_all = "camelCase")] struct TextureAtlas { pub image_path: String, #[serde(rename = "SubTexture")] pub sub_textures: Vec<SubTexture>, } #[derive(Debug, Deserialize, PartialEq, Clone)] #[serde(rename_all = "camelCase")] struct SubTexture { pub name: String, pub x: f64, pub y: f64, pub width: f64, pub height: f64, pub frame_x: f64, pub frame_y: f64, pub frame_width: f64, pub frame_height: f64, }
28.588235
97
0.601235
fe1b2e051c479f190c375fe6747ddcb4d4380c24
668
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // pretty-expanded FIXME #23616 enum E { V0, V16(u16) } static C: (E, u16, u16) = (E::V0, 0x600D, 0xBAD); pub fn main() { let (_, n, _) = C; assert!(n != 0xBAD); assert_eq!(n, 0x600D); }
31.809524
68
0.684132
ace8b30000a2e9dbe950cbf708adb05533401b2f
640
//use hdk::prelude::*; mod current_games; mod game; mod game_move; mod signal; mod turn_based_game; mod mixin; /*#[hdk_extern] pub fn validate(op:Op) -> ExternResult<ValidateCallbackResult>{ match op{ Ok(ValidateCallbackResult::Valid) } }*/ pub use current_games::{get_my_current_games, remove_my_current_game, remove_current_game}; pub use game::{ create_game, get_game, get_game_state, GameEntry //validate_game_entry, GameEntry, }; pub use game_move::{ create_move, get_game_moves, GameMoveEntry, MoveInfo, //validate_game_move_entry, GameMoveEntry, MoveInfo, }; pub use mixin::*; pub use turn_based_game::*;
21.333333
110
0.746875
21742c70ca53f2d71c1afbabb687a5dfa80ad0ae
12,069
pub mod v1 { //! ## Merkle Tree //! //! #### 算法说明 //! - 哈希树。 //! //! #### 应用场景 //! - 数据检验,存在性证明。 //! //! #### 实现属性 //! - <font color=Red>×</font> 多线程安全 //! - <font color=Green>√</font> 无 unsafe 代码 use ring::digest::{Context, SHA1}; use std::cell::RefCell; use std::collections::HashMap; use std::rc::{Rc, Weak}; pub struct Merkle { tree: Option<Rc<RefCell<MerkleTree>>>, leaves: HashMap<Vec<u8>, Rc<RefCell<MerkleTree>>>, } #[derive(Default, Debug)] pub struct MerkleTree { hash: Vec<u8>, parent: Option<Rc<RefCell<MerkleTree>>>, brother_left: Option<Weak<RefCell<MerkleTree>>>, brother_right: Option<Rc<RefCell<MerkleTree>>>, } #[derive(Clone, Debug)] pub struct Proof { prepend: bool, hash: Vec<u8>, } impl Merkle { //should be a tail recursion fn gen(mut todo: Vec<Rc<RefCell<MerkleTree>>>) -> Vec<Rc<RefCell<MerkleTree>>> { if 1 == todo.len() { return todo; } if 0 < todo.len() % 2 { todo.push(Rc::new(RefCell::new(MerkleTree { hash: vec![], parent: None, brother_left: None, brother_right: None, }))); } let mut res = Vec::with_capacity(todo.len() / 2); let mut hashsig; let todo = todo.chunks(2); for pair in todo { hashsig = pair[0].borrow().hash.clone(); hashsig.extend(pair[1].borrow().hash.iter()); hashsig = hash(&hashsig); res.push(Rc::new(RefCell::new(MerkleTree { hash: hashsig, parent: None, brother_left: None, brother_right: None, }))); pair[0].borrow_mut().parent = Some(Rc::clone(res.last().unwrap())); pair[1].borrow_mut().parent = Some(Rc::clone(res.last().unwrap())); pair[0].borrow_mut().brother_right = Some(Rc::clone(&pair[1])); pair[1].borrow_mut().brother_left = Some(Rc::downgrade(&Rc::clone(&pair[0]))); } Self::gen(res) } pub fn new(mut leaves: Vec<Vec<u8>>) -> Option<Merkle> { let mut res = Merkle { tree: None, leaves: HashMap::new(), }; if leaves.is_empty() { return None; } else if 1 == leaves.len() { res.tree = Some(Rc::new(RefCell::new(MerkleTree { hash: leaves[0].clone(), parent: None, brother_left: None, brother_right: None, }))); res.leaves.insert( leaves.pop().unwrap(), Rc::clone(&res.tree.as_ref().unwrap()), ); return Some(res); } //确保索引形成偶数对 if 0 < leaves.len() % 2 { leaves.push(vec![]); } let todo = leaves .into_iter() .map(|hash| { let leaf = Rc::new(RefCell::new(MerkleTree { hash: hash.clone(), parent: None, brother_left: None, brother_right: None, })); res.leaves.insert(hash, Rc::clone(&leaf)); leaf }) .collect::<Vec<Rc<RefCell<MerkleTree>>>>(); res.tree = Some(Self::gen(todo).pop().unwrap()); Some(res) } fn get_proof(me: Rc<RefCell<MerkleTree>>, res: &mut Vec<Proof>) { if me.borrow().parent.is_some() { let next; if let Some(v) = me.borrow().brother_right.as_ref() { res.push(Proof { prepend: false, hash: v.borrow().hash.clone(), }); next = Rc::clone(&me.borrow().parent.as_ref().unwrap()); } else if let Some(v) = me.borrow().brother_left.as_ref() { res.push(Proof { prepend: true, hash: v.upgrade().unwrap().borrow().hash.clone(), }); next = Rc::clone(&me.borrow().parent.as_ref().unwrap()); } else { panic!("BUG"); } Self::get_proof(next, res); } else { return; } } ///unsorted merkle tree can ONLY give positive proof pub fn proof(&self, hash: Vec<u8>) -> Option<Vec<Proof>> { if let Some(v) = self.leaves.get(&hash) { let mut res = vec![]; res.push(Proof { prepend: false, hash: v.borrow().hash.clone(), }); Self::get_proof(Rc::clone(&v), &mut res); Some(res) } else { None } } pub fn calculate_root( hash_path: &[Proof], hasher: impl Fn(&[u8]) -> Vec<u8>, ) -> Option<Vec<u8>> { let res = hash_path[0].clone(); if hash_path.is_empty() { return None; } else if 1 == hash_path.len() { return Some(res.hash); } Some( hash_path .iter() .skip(1) .fold(res, |mut prev, last| { if last.prepend { let mut h = last.hash.clone(); h.append(&mut prev.hash); prev.hash = hasher(&h); } else { prev.hash.extend(last.hash.iter()); prev.hash = hasher(&prev.hash); } prev }) .hash, ) } } #[inline] fn hash(item: &[u8]) -> Vec<u8> { let mut context = Context::new(&SHA1); context.update(item); context.finish().as_ref().to_vec() } #[cfg(test)] mod test { use super::*; #[test] fn merkle() { let mut sample = vec![]; for i in 0i32..40 { sample.push(hash(&i.to_le_bytes())); } let merkle = Merkle::new(sample.clone()).unwrap(); //positive proof sample.into_iter().for_each(|i| { assert_eq!( &merkle.tree.as_ref().unwrap().borrow().hash, &Merkle::calculate_root(&merkle.proof(i).unwrap(), hash).unwrap() ); }); } } } pub mod v2 { //! ## Merkle Tree //! //! #### 算法说明 //! - 使用多维Vec实现的哈希树。 //! //! #### 应用场景 //! - 数据检验,存在性证明。 //! //! #### 实现属性 //! - <font color=Red>×</font> 多线程安全 //! - <font color=Green>√</font> 无 unsafe 代码 use ring::digest::{Context, SHA1}; use std::ops::{Deref, DerefMut}; type HashSig = Vec<u8>; type HashLayer = Vec<HashSig>; #[derive(Debug)] struct Merkle(Vec<HashLayer>); impl Deref for Merkle { type Target = Vec<HashLayer>; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Merkle { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[inline] fn hash(item: &[u8]) -> Vec<u8> { let mut context = Context::new(&SHA1); context.update(item); context.finish().as_ref().to_vec() } #[derive(Clone, Debug)] pub struct Proof { prepend: bool, hash: HashSig, } impl Merkle { pub fn new(leaf_layer: HashLayer) -> Option<Merkle> { if leaf_layer.is_empty() { return None; } let mut res = Merkle(vec![leaf_layer]); if 1 == res[0].len() { return Some(res); } //for binary_search() res[0].sort(); //leaf_layer pad if 0 < res[0].len() % 2 { res[0].push(vec![]); } let mut next_layer; let mut toplayer_idx; let mut i; let mut h; while 1 < res[res.len() - 1].len() { next_layer = vec![]; toplayer_idx = res.len() - 1; i = 0; while i < res[toplayer_idx].len() { h = res[toplayer_idx][i].clone(); h.extend(res[toplayer_idx][i + 1].iter()); next_layer.push(hash(&h)); i += 2; } if 1 < next_layer.len() && 0 < next_layer.len() % 2 { next_layer.push(vec![]); } res.push(next_layer); } Some(res) } ///unsorted merkle tree can ONLY give positive proof pub fn proof(&self, hashsig: Vec<u8>) -> Option<Vec<Proof>> { if let Ok(mut idx) = self[0][..].binary_search(&hashsig) { let mut res = vec![]; res.push(Proof { prepend: false, hash: self[0][idx].clone(), }); //排除root层 for layer in self.iter().take(self.len() - 1) { if 0 == idx % 2 { //自身在左,兄弟节点一定在右 res.push(Proof { prepend: false, hash: layer[idx + 1].clone(), }); } else { //自身在右,则不可能是第一个元素,兄弟节点一定在左 res.push(Proof { prepend: true, hash: layer[idx - 1].clone(), }); } //计算向上一层(father layer)中的`父`索引 idx /= 2; } Some(res) } else { None } } pub fn calculate_root( hash_path: &[Proof], hasher: impl Fn(&[u8]) -> Vec<u8>, ) -> Option<Vec<u8>> { if hash_path.is_empty() { return None; } let res = hash_path[0].clone(); if 1 == hash_path.len() { return Some(res.hash); } Some( hash_path .iter() .skip(1) .fold(res, |mut prev, last| { if last.prepend { let mut h = last.hash.clone(); h.append(&mut prev.hash); prev.hash = hasher(&h); } else { prev.hash.extend(last.hash.iter()); prev.hash = hasher(&prev.hash); } prev }) .hash, ) } } #[cfg(test)] mod test { use super::*; #[test] fn merkle() { let mut sample = vec![]; for i in 0i32..4 { sample.push(hash(&i.to_le_bytes())); } let merkle = Merkle::new(sample.clone()).unwrap(); //positive proof sample.into_iter().for_each(|i| { assert_eq!( &merkle[merkle.len() - 1][0], &Merkle::calculate_root(&merkle.proof(i).unwrap(), hash).unwrap() ); }); } } }
29.508557
94
0.391582
e27809d36ec443808e3c6df7c8a7934f97b8f2e6
4,114
// Copyright 2019 The druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! File open/save dialogs. use std::path::{Path, PathBuf}; /// Information about a file to be opened or saved. #[derive(Debug, Clone)] pub struct FileInfo { pub(crate) path: PathBuf, } /// Type of file dialog. #[cfg(not(feature = "x11"))] pub enum FileDialogType { /// File open dialog. Open, /// File save dialog. Save, } /// Options for file dialogs. #[derive(Debug, Clone, Default)] pub struct FileDialogOptions { pub show_hidden: bool, pub allowed_types: Option<Vec<FileSpec>>, pub default_type: Option<FileSpec>, pub select_directories: bool, pub multi_selection: bool, // we don't want a library user to be able to construct this type directly __non_exhaustive: (), } /// A description of a filetype, for specifiying allowed types in a file dialog. /// /// # Windows /// /// On windows, each instance of this type is converted to a [`COMDLG_FILTERSPEC`] /// struct. /// /// [`COMDLG_FILTERSPEC`]: https://docs.microsoft.com/en-ca/windows/win32/api/shtypes/ns-shtypes-comdlg_filterspec #[derive(Debug, Clone, Copy, PartialEq)] pub struct FileSpec { /// A human readable name, describing this filetype. /// /// This is used in the Windows file dialog, where the user can select /// from a dropdown the type of file they would like to choose. /// /// This should not include the file extensions; they will be added automatically. /// For instance, if we are describing Word documents, the name would be "Word Document", /// and the displayed string would be "Word Document (*.doc)". pub name: &'static str, /// The file extensions used by this file type. /// /// This should not include the leading '.'. pub extensions: &'static [&'static str], } impl FileInfo { /// The file's path. pub fn path(&self) -> &Path { &self.path } } impl FileDialogOptions { /// Create a new set of options. pub fn new() -> FileDialogOptions { FileDialogOptions::default() } /// Set the 'show hidden files' bit. pub fn show_hidden(mut self) -> Self { self.show_hidden = true; self } /// Set whether folders should be selectable. pub fn select_directories(mut self) -> Self { self.select_directories = true; self } /// Set whether multiple files can be selected. pub fn multi_selection(mut self) -> Self { self.multi_selection = true; self } /// Set the file types the user is allowed to select. pub fn allowed_types(mut self, types: Vec<FileSpec>) -> Self { self.allowed_types = Some(types); self } /// Set the default file type. /// If it's `None` or not present in [`allowed_types`](#method.allowed_types) /// then the first entry in [`allowed_types`](#method.allowed_types) will be used as default. pub fn default_type(mut self, default_type: FileSpec) -> Self { self.default_type = Some(default_type); self } } impl FileSpec { pub const TEXT: FileSpec = FileSpec::new("Text", &["txt"]); pub const JPG: FileSpec = FileSpec::new("Jpeg", &["jpg", "jpeg"]); pub const GIF: FileSpec = FileSpec::new("Gif", &["gif"]); pub const PDF: FileSpec = FileSpec::new("PDF", &["pdf"]); pub const HTML: FileSpec = FileSpec::new("Web Page", &["htm", "html"]); /// Create a new `FileSpec`. pub const fn new(name: &'static str, extensions: &'static [&'static str]) -> Self { FileSpec { name, extensions } } }
31.891473
114
0.652649
1838b257c6c000c59350f7f4ee95d0f38eebcda7
25,501
#[cfg(target_arch = "x86_64")] use std::borrow::BorrowMut; #[cfg(target_arch = "riscv64")] use core::borrow::BorrowMut; /// A the functions herein are for transposing an 8x8 bit matrix, that /// is presumed to be a block of a larger matrix of size m x n bytes. Brief /// description of the functions: /// /// transpose8vn: Very naive method, directly places one bit at a time. /// transpose8b64: Basic shifting method that directly places a few bits at a time. /// transpose8b64c: Compact version of 8b64; uses a for-loop. /// transpose8bS64: Like 8b64, but uses GLS's bit swapping device. /// transpose8r64: Basic recursive method (the main point of this comparison study). /// transpose8rS64: Like 8r64, but uses GLS's bit swapping device. /// transpose8b32, ..., 8rS32: Above four coded for a 32-bit machine. /// transpose8rSr32: Like 8rS32 but done in reverse (coarse to fine granularity). /* This is the very naive method, that directly places one bit at a time. This may be too naive to include in the book (i.e., maybe we should take this out if there should be another edition). This is equay suitable (or unsuitable, if you wish) for a 32- or a 64-bit machine. Instruction counts for the calculation part: 62 ANDs 56 shifts 56 ORs (or ADDs or XORs) -- 174 total (very naive method, placing one bit at a time) */ pub fn transpose8vn<'a>(A: &'a mut [char], m: usize, n: usize, mut B: &'a mut [char]) -> &'a mut [char] { // Load the array into eight one-byte variables. let a0 = A[0] as u8; let a1 = A[m] as u8; let a2 = A[2 * m] as u8; let a3 = A[3 * m] as u8; let a4 = A[4 * m] as u8; let a5 = A[5 * m] as u8; let a6 = A[6 * m] as u8; let a7 = A[7 * m] as u8; let b0 = (a0 & 128) | (a1 & 128) / 2 | (a2 & 128) / 4 | (a3 & 128) / 8 | (a4 & 128) / 16 | (a5 & 128) / 32 | (a6 & 128) / 64 | (a7) / 128; let b1 = (a0 & 64) * 2 | (a1 & 64) | (a2 & 64) / 2 | (a3 & 64) / 4 | (a4 & 64) / 8 | (a5 & 64) / 16 | (a6 & 64) / 32 | (a7 & 64) / 64; let b2 = (a0 & 32) * 4 | (a1 & 32) * 2 | (a2 & 32) | (a3 & 32) / 2 | (a4 & 32) / 4 | (a5 & 32) / 8 | (a6 & 32) / 16 | (a7 & 32) / 32; let b3 = (a0 & 16) * 8 | (a1 & 16) * 4 | (a2 & 16) * 2 | (a3 & 16) | (a4 & 16) / 2 | (a5 & 16) / 4 | (a6 & 16) / 8 | (a7 & 16) / 16; let b4 = (a0 & 8) * 16 | (a1 & 8) * 8 | (a2 & 8) * 4 | (a3 & 8) * 2 | (a4 & 8) | (a5 & 8) / 2 | (a6 & 8) / 4 | (a7 & 8) / 8; let b5 = (a0 & 4) * 32 | (a1 & 4) * 16 | (a2 & 4) * 8 | (a3 & 4) * 4 | (a4 & 4) * 2 | (a5 & 4) | (a6 & 4) / 2 | (a7 & 4) / 4; let b6 = (a0 & 2) * 64 | (a1 & 2) * 32 | (a2 & 2) * 16 | (a3 & 2) * 8 | (a4 & 2) * 4 | (a5 & 2) * 2 | (a6 & 2) | (a7 & 2) / 2; let b7 = (a0) * 128 | (a1 & 1) * 64 | (a2 & 1) * 32 | (a3 & 1) * 16 | (a4 & 1) * 8 | (a5 & 1) * 4 | (a6 & 1) * 2 | (a7 & 1); B[0] = b0 as char; B[n] = b1 as char; B[2 * n] = b2 as char; B[3 * n] = b3 as char; B[4 * n] = b4 as char; B[5 * n] = b5 as char; B[6 * n] = b6 as char; B[7 * n] = b7 as char; B } /* The above executes in 174 instructions (just the calculation part). 62 ANDs, 56 shifts, and 56 ORs. transpose8b64 directly places the bits in the target array. It uses 64-bit quantities, which makes it easy to understand. It can be easily translated for execution on a 32-bit machine, either by hand or by letting the compiler do it, if your compiler supports 64-bit integers on a 32-bit machine. It is based on the observation that the bits in the 64-bit doubleword a move either 0, 7, 14, 21, 28, 35, 42, or 49 positions to the left or right. This is iustrated by the diagram below. Each digit, letter, $, or period represents a single bit in a 64-bit word. A dash represents a 0, resulting from the shift instructions. Looking at the Input and Output lines, the bit denoted by the character '0' does not move, '1' moves 7 positions to the right, '2' moves 14 positions to the right, etc. Note: Rotate shifts do not help. That is, they do not move any additional bits to their output positions. Input x: 01234567 89abcdef ghijklmn opqrstuv wxyzABCD EFGHIJKL MNOPQRST UVWXYZ$. Output: 08gowEMU 19hpxFNV 2aiqyGOW 3bjrzHPX 4cksAIQY 5dltBJRZ 6emuCKS$ 7fnvDLT. x: 01234567 89abcdef ghijklmn opqrstuv wxyzABCD EFGHIJKL MNOPQRST UVWXYZ$. x << 7: 789abcde fghijklm nopqrstu vwxyzABC DEFGHIJK LMNOPQRS TUVWXYZ$ .------- x << 14: efghijkl mnopqrst uvwxyzAB CDEFGHIJ KLMNOPQR STUVWXYZ $.------ -------- x << 21: lmnopqrs tuvwxyzA BCDEFGHI JKLMNOPQ RSTUVWXY Z$.----- -------- -------- x << 28: stuvwxyz ABCDEFGH IJKLMNOP QRSTUVWX YZ$.---- -------- -------- -------- x << 35: zABCDEFG HIJKLMNO PQRSTUVW XYZ$.--- -------- -------- -------- -------- x << 42: GHIJKLMN OPQRSTUV WXYZ$.-- -------- -------- -------- -------- -------- x << 49: NOPQRSTU VWXYZ$.- --------- ------- -------- -------- -------- -------- x >> 7: -------0 12345678 9abcdefg hijklmno pqrstuvw xyzABCDE FGHIJKLM NOPQRSTU x >> 14: -------- ------01 23456789 abcdefgh ijklmnop qrstuvwx yzABCDEF GHIJKLMN x >> 21: -------- -------- -----012 3456789a bcdefghi jklmnopq rstuvwxy zABCDEFG x >> 28: -------- -------- -------- ----0123 456789ab cdefghij klmnopqr stuvwxyz x >> 35: -------- -------- -------- -------- ---01234 56789abc defghijk lmnopqrs x >> 42: -------- -------- -------- ----------------- -0123456 789abcde fghijklm x >> 49: -------- -------- -------- ----------------- -------- 01234567 89abcdef The function below positions some of the bits with an expression of the form (x & mask) << s, and some with an expression of the form (x >> s) & mask. This is reduces the number of distinct masks that are required. Instruction counts for the calculation part, for a 64-bit machine: 14 shifts 15 ANDs 14 ORs (or ADDs or XORs) 9 Mask generation (4 for the first and 1 for each subsequent one, except the two smaest masks can be immediate fields). -- 52 total (64-bit machine, direct placement) */ pub fn transpose8b64<'a>(A: &'a mut [char], m: usize, n: usize, mut B: &'a mut [i32]) -> &'a mut [i32] { let mut x = 0; for i in 0..7 { // Load 8 bytes from the x = x << 8 | A[m * i] as u64;// input array and pack } // them into x. let mut y = x & 0x8040201008040201 | (x & 0x0080402010080402) << 7 | (x & 0x0000804020100804) << 14 | (x & 0x0000008040201008) << 21 | (x & 0x0000000080402010) << 28 | (x & 0x0000000000804020) << 35 | (x & 0x0000000000008040) << 42 | (x & 0x0000000000000080) << 49 | (x >> 7) & 0x0080402010080402 | (x >> 14) & 0x0000804020100804 | (x >> 21) & 0x0000008040201008 | (x >> 28) & 0x0000000080402010 | (x >> 35) & 0x0000000000804020 | (x >> 42) & 0x0000000000008040 | (x >> 49) & 0x0000000000000080; for i in 7..0 { // Store result into B[n * i] = y as i32; y = y >> 8; } // output array B B } /* This is a compact version of transpose8b64, ca. 75 instructions for the calculation part. */ pub fn transpose8b64c<'a>(A: &'a mut [char], m: usize, n: usize, mut B: &'a mut [char]) -> &'a mut [char] { let mut x: u64 = 0; for i in 0..7 { // Load 8 bytes from the x = x << 8 | A[m * i] as u64; } // input array and pack let mut mask: u64 = 0x8040201008040201; let mut y: u64 = x & mask; for s in (7..49).step_by(7) { mask = mask >> 8; y = y | ((x & mask) << s) | ((x >> s) & mask); } for i in 7..0 { // Store result into B[n * i] = (y as u8) as char; y = y >> 8; } // output array B. B } /* This is transpose8b64 but using the GLS method of bit field swapping. Instruction counts for the calculation part: 7 ANDs 21 XORs 14 shifts 8 Mask generation (many can be generated from earlier masks) -- 50 total (direct placement method for a 64-bit machine, using GLS's bit swapping) */ pub fn transpose8bS64<'a>(A: &'a mut [char], m: usize, n: usize, mut B: &'a mut [char]) -> &'a mut [char] { let mut x: u64 = 0; for i in 7..0 { // Load 8 bytes from the x = x << 8 | A[m * i] as u64; } // input array and pack // them into x. let mut t = (x ^ (x >> 7)) & 0x0080402010080402; x = x ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000804020100804; x = x ^ t ^ (t << 14); t = (x ^ (x >> 21)) & 0x0000008040201008; x = x ^ t ^ (t << 21); t = (x ^ (x >> 28)) & 0x0000000080402010; x = x ^ t ^ (t << 28); t = (x ^ (x >> 35)) & 0x0000000000804020; x = x ^ t ^ (t << 35); t = (x ^ (x >> 42)) & 0x0000000000008040; x = x ^ t ^ (t << 42); t = (x ^ (x >> 49)) & 0x0000000000000080; x = x ^ t ^ (t << 49); for i in 7..0 { // Store result into B[n * i] = (x as u8) as char; x = x >> 8; } // output array B. B } /* transpose8r64 is the basic recursive method for a 64-bit machine. This function positions some of the bits with an expression of the form (x & mask) << s, and some with an expression of the form (x >> s) & mask. This is reduces the number of distinct masks that are required. Instruction counts for the calculation part, for a 64-bit machine: 6 shifts 9 ANDs 6 ORs (or ADDs or XORs) 17 Mask generation -- 38 total (64-bit machine, basic recursive method) */ pub fn transpose8r64(A: & mut [i32], m: usize, n: usize, mut B: & mut [i32]) { let mut x: u64 = 0; for i in 0..7 { // Load 8 bytes from the x = x << 8 | A[m * i] as u64; } // input array and pack // them into x. x = x & 0xAA55AA55AA55AA55 | (x & 0x00AA00AA00AA00AA) << 7 | (x >> 7) & 0x00AA00AA00AA00AA; x = x & 0xCCCC3333CCCC3333 | (x & 0x0000CCCC0000CCCC) << 14 | (x >> 14) & 0x0000CCCC0000CCCC; x = x & 0xF0F0F0F00F0F0F0F | (x & 0x00000000F0F0F0F0) << 28 | (x >> 28) & 0x00000000F0F0F0F0; for i in 7..0 { // Store result into B[n * i] = x as i32; x = x >> 8; } // output array B. } /* This is transpose8r64 but using the GLS method of bit field swapping. Instruction counts for the calculation part, for a 64-bit machine: 6 shifts 3 ANDs 9 XORs 8 Mask generation -- 26 total (64-bit machine, recursive method with GLS bit swapping) */ pub fn transpose8rS64(A: & mut [i32], m: usize, n: usize, mut B: & mut [i32]) { let mut x: u64 = 0; for i in 0..7 { // Load 8 bytes from the x = x << 8 | A[m * i] as u64; } // input array and pack // them into x. let mut t = (x ^ (x >> 7)) & 0x00AA00AA00AA00AA; x = x ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC0000CCCC; x = x ^ t ^ (t << 14); t = (x ^ (x >> 28)) & 0x00000000F0F0F0F0; x = x ^ t ^ (t << 28); for i in 7..0 { // Store result into B[n * i] = x as i32; x = x >> 8; } // output array B. } /* This is transpose8b64 adapted to a 32-bit machine more-or-less mechanicay. Because of the double-length shifts, some of the terms of the from (x & mask) << n were changed to (x << n) & mask'. Then, for consistency, a were changed to that form. Instruction counts for the calculation part, for a 32-bit machine: 26 shifts 22 ANDs 26 ORs (or ADDs or XORs) 10 Mask generation (many can be generated from earlier masks) -- 84 total (32-bit machine, direct placement) */ pub fn transpose8b32(A: & mut [i32], m: usize, n: usize, mut B:& mut [i32]) { // Load the array and pack it into xh and xl. let xh: u64 = ((A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]) as u64; let xl: u64 = ((A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]) as u64; let yh = xh & 0x80402010 | (xh << 7 | xl >> 25) & 0x40201008 | (xh << 14 | xl >> 18) & 0x20100804 | (xh << 21 | xl >> 11) & 0x10080402 | (xh << 28 | xl >> 4) & 0x08040201 | (xl << 3) & 0x04020100 | (xl << 10) & 0x02010000 | (xl << 17) & 0x01000000 | (xh >> 7) & 0x00804020 | (xh >> 14) & 0x00008040 | (xh >> 21) & 0x00000080; let yl = xl & 0x08040201 | (xl << 7) & 0x04020100 | (xl << 14) & 0x02010000 | (xl << 21) & 0x01000000 | (xh << 25 | xl >> 7) & 0x10080402 | (xh << 18 | xl >> 14) & 0x20100804 | (xh << 11 | xl >> 21) & 0x40201008 | (xh << 4) & 0x80402010 | (xh >> 3) & 0x00804020 | (xh >> 10) & 0x00008040 | (xh >> 17) & 0x00000080; B[0] = (yh >> 24) as i32; B[n] = (yh >> 16) as i32; B[2 * n] = (yh >> 8) as i32; B[3 * n] = yh as i32; B[4 * n] = (yl >> 24) as i32; B[5 * n] = (yl >> 16) as i32; B[6 * n] = (yl >> 8) as i32; B[7 * n] = yl as i32; } /* This is transpose8b32 but using the GLS method of bit field swapping. Instruction counts for the calculation part, for a 32-bit machine: 27 shifts 10 ANDs 6 ORs (or ADDs or XORs) 31 XORs 7 Mask generation (many can be generated from earlier masks) -- 81 total (32-bit machine, direct placement with GLS bit swapping) */ pub fn transpose8bS32(A: & mut [i32], m: usize, n: usize, mut B: & mut [i32]) { // Load the array and pack it into xh and xl. let mut xh: u64 = ((A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]) as u64; let mut xl: u64 = ((A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]) as u64; let mut th = (xh ^ (xh >> 7)) & 0x00804020; let mut tl = (xl ^ (xl >> 7 | xh << 25)) & 0x10080402; xh = xh ^ th ^ (th << 7 | tl >> 25); xl = xl ^ tl ^ (tl << 7); th = (xh ^ (xh >> 14)) & 0x00008040; tl = (xl ^ (xl >> 14 | xh << 18)) & 0x20100804; xh = xh ^ th ^ (th << 14 | tl >> 18); xl = xl ^ tl ^ (tl << 14); th = (xh ^ (xh >> 21)) & 0x00000080; tl = (xl ^ (xl >> 21 | xh << 11)) & 0x40201008; xh = xh ^ th ^ (th << 21 | tl >> 11); xl = xl ^ tl ^ (tl << 21); tl = (xl ^ (xh << 4)) & 0x80402010; xh = xh ^ (tl >> 4); xl = xl ^ tl ^ (tl << 28); tl = (xl ^ (xh >> 3)) & 0x00804020; xh = xh ^ (tl << 3); xl = xl ^ tl; tl = (xl ^ (xh >> 10)) & 0x00008040; xh = xh ^ (tl << 10); xl = xl ^ tl; tl = (xl ^ (xh >> 17)) & 0x00000080; xh = xh ^ (tl << 17); xl = xl ^ tl; B[0] = (xh >> 24) as i32; B[n] = (xh >> 16) as i32; B[2 * n] = (xh >> 8) as i32; B[3 * n] = xh as i32; B[4 * n] = (xl >> 24) as i32; B[5 * n] = (xl >> 16) as i32; B[6 * n] = (xl >> 8) as i32; B[7 * n] = xl as i32; } /* Next is the basic "recursive" method. Decided not to include this in HD. It's too similar to transpose8rS32, which is a little better (probably). Instruction counts for the calculation part: 16 ANDs 10 shifts 10 ORs 9 mask generation -- 45 total (recursive method, direct placement at each step) */ pub fn transpose8r32(A: & mut [i32], m: usize, n: usize, mut B: & mut [i32]) { // Load the array and pack it into x and y. let mut x: u64 = ((A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]) as u64; let mut y: u64 = ((A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]) as u64; x = (x & 0xAA55AA55) | ((x & 0x00AA00AA) << 7) | ((x >> 7) & 0x00AA00AA); y = (y & 0xAA55AA55) | ((y & 0x00AA00AA) << 7) | ((y >> 7) & 0x00AA00AA); x = (x & 0xCCCC3333) | ((x & 0x0000CCCC) << 14) | ((x >> 14) & 0x0000CCCC); y = (y & 0xCCCC3333) | ((y & 0x0000CCCC) << 14) | ((y >> 14) & 0x0000CCCC); let t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[0] = (x >> 24) as i32; B[n] = (x >> 16) as i32; B[2 * n] = (x >> 8) as i32; B[3 * n] = x as i32; B[4 * n] = (y >> 24) as i32; B[5 * n] = (y >> 16) as i32; B[6 * n] = (y >> 8) as i32; B[7 * n] = y as i32; } /* This is transpose8r32 but using the GLS method of bit field swapping. Instruction counts for the calculation part: 8 ANDs 12 XORs 10 shifts 2 ORs 5 mask generation -- 37 total (recursive method using GLS's bit swapping) */ pub fn transpose8rS32(A: & mut [i32], m: usize, n: usize, mut B: & mut [i32],) { // Load the array and pack it into x and y. let mut x = ((A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]) as u64; let mut y = ((A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]) as u64; let mut t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; B[0] = (x >> 24) as i32; B[n] = (x >> 16) as i32; B[2 * n] = (x >> 8) as i32; B[3 * n] = x as i32; B[4 * n] = (y >> 24) as i32; B[5 * n] = (y >> 16) as i32; B[6 * n] = (y >> 8) as i32; B[7 * n] = y as i32; } /* This is transpose8rS32 done "backwards" (coarse to fine granularity). Why? Just to show that this works. Instruction counts for the calculation part: 8 ANDs 12 XORs 10 shifts 2 ORs 5 mask generation -- 37 total (recursive method in reverse, using GLS's bit swapping) */ pub fn transpose8rSr32(A: & mut [i32], m: usize, n: usize, mut B: & mut [i32]) { // Load the array and pack it into x and y. let mut x: u64 = ((A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m]) as u64; let mut y: u64 = ((A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m]) as u64; let mut t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F); y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F); x = t; t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14); t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14); t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7); t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7); B[0] = (x >> 24) as i32; B[n] = (x >> 16) as i32; B[2 * n] = (x >> 8) as i32; B[3 * n] = x as i32; B[4 * n] = (y >> 24) as i32; B[5 * n] = (y >> 16) as i32; B[6 * n] = (y >> 8) as i32; B[7 * n] = y as i32; } /* Summary of instruction counts and operation counts (instruction counts less those for mask generation) for all the transpose8 functions above except for transpose8b64c: Instructions Operations Machine Machine 64-bit 32-bit 64-bit 32-bit 8vn 174 174 174 174 Very naive method 8b 54 84 43 74 Basic direct placement 8bS 50 81 42 74 Above with GLS bit swapping 8r 38 45 21 36 Basic recursive 8rS 26 37 18 32 Above with GLS bit swapping 8rSr 37 32 8rS in reverse order Below is the original version from Guy Steele. */ pub fn transpose32a(mut a: & mut [i64]) { let mut j = 16; let mut m = 0x0000FFFF; loop { if j == 0 { break; } let mut k = 0; loop { if k >= 32 { break; } let t = (a[k] ^ (a[k | j] >> j)) & m; a[k] ^= t; a[k | j] ^= (t << j); k = ((k | j) + 1) & !j } j >>= 1; m ^= m << j; } } /* Below is essentiay the same code, but modified to apub fn certain C expressions out of sympathy for readers who are not very familiar with C. Also modified to use k + j rather than k | j, because + seems more natural for this program. None of these changes affects the number of instructions executed. */ pub fn transpose32b(mut A: & mut [i32]) { let mut m = 0x0000FFFF; let mut j = 16; loop { if j == 0 { break; } let mut k = 0; loop { if k >= 32 { break; } let t = (A[k] ^ (A[k + j] >> j)) & m; A[k] = A[k] ^ t; A[k + j] = A[k + j] ^ (t << j); k = (k + j + 1) & !j } j = j >> 1; m = m ^ (m << j); } } /* Straight-line version of transpose32a & b. */ macro_rules! rotateright { ($x:expr,$k:expr) => { ($x >> $k) | ($x << (32 - $k)) } } macro_rules! rotateleft { ($x:expr,$k:expr) => { ($x << $k) | ($x >> (32 - $k)) } } macro_rules! swap { ($a0:expr,$a1:expr,$j:expr,$m:expr) => { let t = ($a0 ^ ($a1 >> $j)) & $m; $a0 = $a0 ^ t; $a1 = $a1 ^ (t << $j); } } pub fn transpose32c(mut A: & mut [i32], mut B: & mut [i32]) { let mut a0 = A[0]; let mut a1 = A[1]; let mut a2 = A[2]; let mut a3 = A[3]; let mut a4 = A[4]; let mut a5 = A[5]; let mut a6 = A[6]; let mut a7 = A[7]; let mut a8 = A[8]; let mut a9 = A[9]; let mut a10 = A[10]; let mut a11 = A[11]; let mut a12 = A[12]; let mut a13 = A[13]; let mut a14 = A[14]; let mut a15 = A[15]; let mut a16 = A[16]; let mut a17 = A[17]; let mut a18 = A[18]; let mut a19 = A[19]; let mut a20 = A[20]; let mut a21 = A[21]; let mut a22 = A[22]; let mut a23 = A[23]; let mut a24 = A[24]; let mut a25 = A[25]; let mut a26 = A[26]; let mut a27 = A[27]; let mut a28 = A[28]; let mut a29 = A[29]; let mut a30 = A[30]; let mut a31 = A[31]; let mut m = 0x0000FFFF; swap!(a0, a16, 16, m); swap!(a1, a17, 16, m); swap!(a2, a18, 16, m); swap!(a3, a19, 16, m); swap!(a4, a20, 16, m); swap!(a5, a21, 16, m); swap!(a6, a22, 16, m); swap!(a7, a23, 16, m); swap!(a8, a24, 16, m); swap!(a9, a25, 16, m); swap!(a10, a26, 16, m); swap!(a11, a27, 16, m); swap!(a12, a28, 16, m); swap!(a13, a29, 16, m); swap!(a14, a30, 16, m); swap!(a15, a31, 16, m); m = 0x00FF00FF; swap!(a0, a8, 8, m); swap!(a1, a9, 8, m); swap!(a2, a10, 8, m); swap!(a3, a11, 8, m); swap!(a4, a12, 8, m); swap!(a5, a13, 8, m); swap!(a6, a14, 8, m); swap!(a7, a15, 8, m); swap!(a16, a24, 8, m); swap!(a17, a25, 8, m); swap!(a18, a26, 8, m); swap!(a19, a27, 8, m); swap!(a20, a28, 8, m); swap!(a21, a29, 8, m); swap!(a22, a30, 8, m); swap!(a23, a31, 8, m); m = 0x0F0F0F0F; swap!(a0, a4, 4, m); swap!(a1, a5, 4, m); swap!(a2, a6, 4, m); swap!(a3, a7, 4, m); swap!(a8, a12, 4, m); swap!(a9, a13, 4, m); swap!(a10, a14, 4, m); swap!(a11, a15, 4, m); swap!(a16, a20, 4, m); swap!(a17, a21, 4, m); swap!(a18, a22, 4, m); swap!(a19, a23, 4, m); swap!(a24, a28, 4, m); swap!(a25, a29, 4, m); swap!(a26, a30, 4, m); swap!(a27, a31, 4, m); m = 0x33333333; swap!(a0, a2, 2, m); swap!(a1, a3, 2, m); swap!(a4, a6, 2, m); swap!(a5, a7, 2, m); swap!(a8, a10, 2, m); swap!(a9, a11, 2, m); swap!(a12, a14, 2, m); swap!(a13, a15, 2, m); swap!(a16, a18, 2, m); swap!(a17, a19, 2, m); swap!(a20, a22, 2, m); swap!(a21, a23, 2, m); swap!(a24, a26, 2, m); swap!(a25, a27, 2, m); swap!(a28, a30, 2, m); swap!(a29, a31, 2, m); m = 0x55555555; swap!(a0, a1, 1, m); swap!(a2, a3, 1, m); swap!(a4, a5, 1, m); swap!(a6, a7, 1, m); swap!(a8, a9, 1, m); swap!(a10, a11, 1, m); swap!(a12, a13, 1, m); swap!(a14, a15, 1, m); swap!(a16, a17, 1, m); swap!(a18, a19, 1, m); swap!(a20, a21, 1, m); swap!(a22, a23, 1, m); swap!(a24, a25, 1, m); swap!(a26, a27, 1, m); swap!(a28, a29, 1, m); swap!(a30, a31, 1, m); B[0] = a0; B[1] = a1; B[2] = a2; B[3] = a3; B[4] = a4; B[5] = a5; B[6] = a6; B[7] = a7; B[8] = a8; B[9] = a9; B[10] = a10; B[11] = a11; B[12] = a12; B[13] = a13; B[14] = a14; B[15] = a15; B[16] = a16; B[17] = a17; B[18] = a18; B[19] = a19; B[20] = a20; B[21] = a21; B[22] = a22; B[23] = a23; B[24] = a24; B[25] = a25; B[26] = a26; B[27] = a27; B[28] = a28; B[29] = a29; B[30] = a30; B[31] = a31; } /* Copied from GLS's note. This is the "three shearing transformations" method. The code below takes 1280 ops to do the bit rearrangements (i.e., not counting loop control, loads, stores, and indexing). Not competitive with the other methods. */ pub fn transpose32d(mut a: & mut [i32]) { for k in 0..32 { a[k] = rotateright!(a[k], k); } let mut j = 16; let mut m: u64 = 0xFFFF0000; loop { if j == 0 { break; } for k in 0..j { let mut t = a[k] as u64 & m; a[k] = a[k] ^ t as i32; for q in (k + j..32).step_by(j) { let u = a[q] as u64 & m; a[q] = a[q] ^ u as i32 ^ t as i32; t = u; } a[k] = a[k] ^ t as i32; } j = j >> 1; m ^= m >> j; } for k in 0..32 { a[k] = rotateleft!(a[k], 31 - k); } for k in 0..16 { let t = a[k]; a[k] = a[31 - k]; a[31 - k] = t; } } #[cfg_attr(not(target_arch = "x86_64"),test_case)] #[cfg_attr(not(target_arch = "riscv64"),test)] fn test_transpose(){ let mut a = ['\x00';50]; let mut b = ['\x00';50]; let c = ['\x00';50]; assert_eq!(transpose8b64c(a.borrow_mut(),1,1,b.borrow_mut()),c); }
34.092246
142
0.49998
ff8832aa15f907f316caf73dca56da4eebbeec1c
3,088
use log::{info, warn}; use rust_gpu_tools::Device; use std::collections::HashMap; use std::env; lazy_static::lazy_static! { static ref CORE_COUNTS: HashMap<String, usize> = { let mut core_counts : HashMap<String, usize> = vec![ // AMD ("gfx1010".to_string(), 2560), // This value was chosen to give (approximately) empirically best performance for a Radeon Pro VII. ("gfx906".to_string(), 7400), // NVIDIA ("Quadro RTX 6000".to_string(), 4608), ("Quadro RTX A6000".to_string(), 10752), ("TITAN RTX".to_string(), 4608), ("Tesla V100".to_string(), 5120), ("Tesla P100".to_string(), 3584), ("Tesla T4".to_string(), 2560), ("Quadro M5000".to_string(), 2048), ("GeForce RTX 3090".to_string(), 10496), ("GeForce RTX 3080".to_string(), 8704), ("GeForce RTX 3070".to_string(), 5888), ("GeForce RTX 3060 Ti".to_string(), 4864), ("GeForce RTX 2080 Ti".to_string(), 4352), ("GeForce RTX 2080 SUPER".to_string(), 3072), ("GeForce RTX 2080".to_string(), 2944), ("GeForce RTX 2070 SUPER".to_string(), 2560), ("GeForce GTX 1080 Ti".to_string(), 3584), ("GeForce GTX 1080".to_string(), 2560), ("GeForce GTX 2060".to_string(), 1920), ("GeForce GTX 1660 Ti".to_string(), 1536), ("GeForce GTX 1060".to_string(), 1280), ("GeForce GTX 1650 SUPER".to_string(), 1280), ("GeForce GTX 1650".to_string(), 896), ].into_iter().collect(); if let Ok(var) = env::var("BELLMAN_CUSTOM_GPU") { for card in var.split(',') { let splitted = card.split(':').collect::<Vec<_>>(); if splitted.len() != 2 { panic!("Invalid BELLMAN_CUSTOM_GPU!"); } let name = splitted[0].trim().to_string(); let cores : usize = splitted[1].trim().parse().expect("Invalid BELLMAN_CUSTOM_GPU!"); info!("Adding \"{}\" to GPU list with {} CUDA cores.", name, cores); core_counts.insert(name, cores); } } core_counts }; } const DEFAULT_CORE_COUNT: usize = 2560; pub fn get_core_count(name: &str) -> usize { match CORE_COUNTS.get(name) { Some(&cores) => cores, None => { warn!( "Number of CUDA cores for your device ({}) is unknown! Best performance is \ only achieved when the number of CUDA cores is known! You can find the \ instructions on how to support custom GPUs here: \ https://lotu.sh/en+hardware-mining", name ); DEFAULT_CORE_COUNT } } } pub fn dump_device_list() { for d in Device::all() { info!("Device: {:?}", d); } } #[cfg(any(feature = "cuda", feature = "opencl"))] #[test] pub fn test_list_devices() { let _ = env_logger::try_init(); dump_device_list(); }
35.090909
111
0.533355
87ac91f25b7778da0332405bd93f66d7fa71da51
6,230
//! Helpful functionality around the `serde_cbor` crate. use alloc::vec::Vec; use serde::Serialize; #[cfg_attr(tarpaulin, skip)] mod error; pub use error::CborError; /// The result type for the `cbor` module. pub type Result<T> = core::result::Result<T, CborError>; /// Serializes an object into CBOR. pub fn encode(object: impl Serialize) -> Result<Vec<u8>> { serialize(object, 0) } /// Serializes an object into a sequence of CBOR encoded data items. /// /// Only works for objects that serialize to a CBOR array of at most 23 items. pub fn encode_sequence(object: impl Serialize) -> Result<Vec<u8>> { // We serialize something that encodes as a CBOR array. // What we want is just the sequence of items, so we can omit the // first byte (indicating array type and length), and get the items. // That only works as long as we have at most 23 items, after that it // takes an additional byte to indicate the length. serialize(object, 1) } /// Serializes an object, returning its bytes from an offset. fn serialize(object: impl Serialize, offset: usize) -> Result<Vec<u8>> { // Serialize to byte vector let mut v = serde_cbor::to_vec(&object)?; // Return everything starting from the offset Ok(v.drain(offset..).collect()) } /// Deserializes a CBOR encoded object. pub fn decode<'a, T>(bytes: &'a [u8]) -> Result<T> where T: serde::Deserialize<'a>, { Ok(serde_cbor::from_slice(bytes)?) } /// Deserializes a sequence of CBOR encoded data items into an object. /// /// Requires a `Vec<u8>` of length `bytes` + 1 to use as a buffer and only /// works for sequences of at most 23 items. /// /// # Arguments /// * `bytes` - The sequence of CBOR items. /// * `n_items` - The number of items. /// * `tmp_vec` - Buffer used for deserialization. pub fn decode_sequence<'a, T>( bytes: &[u8], n_items: usize, tmp_vec: &'a mut Vec<u8>, ) -> Result<T> where T: serde::Deserialize<'a>, { // We receive a sequence of CBOR items. For parsing we need an array, so // start a CBOR array of the given length. tmp_vec.push(array_byte(n_items)?); // After the start byte, insert the message (sequence of CBOR items) tmp_vec.extend(bytes); // Now we can try to deserialize that Ok(serde_cbor::from_slice(tmp_vec)?) } /// Changes the given CBOR bytes from an array of n elements to a map of n / 2 /// key/value pairs. /// /// Only works for arrays with at most 23 items. pub fn array_to_map(bytes: &mut [u8]) -> Result<()> { // The 5 least significant bits are the number of elements in the array let n = 0b000_11111 & bytes[0]; match n { _ if n > 23 => Err(CborError::TooManyItems), n => { // Change the major type and number of elements accordingly bytes[0] = 0b101_00000 | (n / 2); Ok(()) } } } /// Changes the given CBOR bytes from a map of n key/value pairs to an array /// of n * 2 items. /// /// Only works for arrays with at most 23 items. #[allow(dead_code)] pub fn map_to_array(bytes: &mut [u8]) -> Result<()> { // The 5 least significant bits are the number of key/value pairs let n = 0b000_11111 & bytes[0]; match n { _ if n * 2 > 23 => Err(CborError::TooManyItems), n => { // Change the major type and number of elements accordingly bytes[0] = 0b100_00000 | (n * 2); Ok(()) } } } /// Returns the byte indicating the CBOR array type with the given number of /// elements. fn array_byte(n: usize) -> Result<u8> { match n { _ if n > 23 => Err(CborError::TooManyItems), // The major type for arrays is indicated by the three leftmost bits. // By doing bitwise OR with the number of items, we assign the // remaining bits for the number of elements. n => Ok(0b100_00000 | n as u8), } } #[cfg(test)] mod tests { use super::*; #[test] fn array_length() { assert_eq!(0x80, array_byte(0).unwrap()); assert_eq!(0x81, array_byte(1).unwrap()); assert_eq!(0x94, array_byte(20).unwrap()); assert_eq!(0x97, array_byte(23).unwrap()); assert!(array_byte(24).is_err()); } const MAP_0: [u8; 1] = [0xA0]; const ARR_0: [u8; 1] = [0x80]; const MAP_1: [u8; 4] = [0xA1, 0x01, 0x18, 0x2A]; const ARR_2: [u8; 4] = [0x82, 0x01, 0x18, 0x2a]; const MAP_11: [u8; 23] = [ 0xAB, 0x01, 0x01, 0x02, 0x01, 0x03, 0x01, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x07, 0x01, 0x08, 0x01, 0x09, 0x01, 0x0A, 0x01, 0x0B, 0x01, ]; const ARR_22: [u8; 23] = [ 0x96, 0x01, 0x01, 0x02, 0x01, 0x03, 0x01, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x07, 0x01, 0x08, 0x01, 0x09, 0x01, 0x0A, 0x01, 0x0B, 0x01, ]; const MAP_12: [u8; 25] = [ 0xAC, 0x01, 0x01, 0x02, 0x01, 0x03, 0x01, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x07, 0x01, 0x08, 0x01, 0x09, 0x01, 0x0A, 0x01, 0x0B, 0x01, 0x0C, 0x01, ]; const ARR_24: [u8; 26] = [ 0x98, 0x18, 0x01, 0x01, 0x02, 0x01, 0x03, 0x01, 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x07, 0x01, 0x08, 0x01, 0x09, 0x01, 0x0A, 0x01, 0x0B, 0x01, 0x0C, 0x01, ]; #[test] fn transformations() { let mut map_0 = MAP_0.to_vec(); let mut arr_0 = ARR_0.to_vec(); map_to_array(&mut map_0).unwrap(); array_to_map(&mut arr_0).unwrap(); assert_eq!(&ARR_0[..], &map_0[..]); assert_eq!(&MAP_0[..], &arr_0[..]); let mut map_1 = MAP_1.to_vec(); let mut arr_2 = ARR_2.to_vec(); map_to_array(&mut map_1).unwrap(); array_to_map(&mut arr_2).unwrap(); assert_eq!(&ARR_2[..], &map_1[..]); assert_eq!(&MAP_1[..], &arr_2[..]); let mut map_11 = MAP_11.to_vec(); let mut arr_22 = ARR_22.to_vec(); map_to_array(&mut map_11).unwrap(); array_to_map(&mut arr_22).unwrap(); assert_eq!(&ARR_22[..], &map_11[..]); assert_eq!(&MAP_11[..], &arr_22[..]); let mut map_12 = MAP_12.to_vec(); let mut arr_24 = ARR_24.to_vec(); assert!(map_to_array(&mut map_12).is_err()); assert!(array_to_map(&mut arr_24).is_err()); } }
33.315508
78
0.604334
33272fb0e686901aaf5a328cf38e063c2d4c4d65
607
#[cfg(all(not(feature = "neon-sys"), not(feature = "napi")))] compile_error!("The Neon runtime must have at least one of the `neon-sys` or `napi` backends enabled."); use cfg_if::cfg_if; cfg_if! { if #[cfg(feature = "napi")] { pub mod napi; } } cfg_if! { if #[cfg(feature = "neon-sys")] { pub mod nan; // The legacy variant is the default API as long as it's present. pub use crate::nan::*; } else if #[cfg(feature = "napi")] { // The N-API variant is only the default API if the legacy variant is disabled. pub use crate::napi::*; } }
27.590909
104
0.584843
33542e7a15acd81384fb6709d7a2249960d6ecf7
75,583
use crate::config::{ AnchorPackage, Config, ConfigOverride, Manifest, ProgramDeployment, ProgramWorkspace, WithPath, }; use anchor_client::Cluster; use anchor_lang::idl::{IdlAccount, IdlInstruction}; use anchor_lang::{AccountDeserialize, AnchorDeserialize, AnchorSerialize}; use anchor_syn::idl::Idl; use anyhow::{anyhow, Context, Result}; use clap::Clap; use flate2::read::GzDecoder; use flate2::read::ZlibDecoder; use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::Compression; use heck::SnakeCase; use rand::rngs::OsRng; use reqwest::blocking::multipart::{Form, Part}; use reqwest::blocking::Client; use serde::{Deserialize, Serialize}; use solana_client::rpc_client::RpcClient; use solana_client::rpc_config::RpcSendTransactionConfig; use solana_program::instruction::{AccountMeta, Instruction}; use solana_sdk::account_utils::StateMut; use solana_sdk::bpf_loader_upgradeable::UpgradeableLoaderState; use solana_sdk::commitment_config::CommitmentConfig; use solana_sdk::pubkey::Pubkey; use solana_sdk::signature::Keypair; use solana_sdk::signature::Signer; use solana_sdk::sysvar; use solana_sdk::transaction::Transaction; use std::collections::BTreeMap; use std::collections::HashMap; use std::fs::{self, File}; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::process::{Child, Stdio}; use std::string::ToString; use tar::Archive; pub mod config; pub mod template; // Version of the docker image. pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const DOCKER_BUILDER_VERSION: &str = VERSION; #[derive(Debug, Clap)] #[clap(version = VERSION)] pub struct Opts { #[clap(flatten)] pub cfg_override: ConfigOverride, #[clap(subcommand)] pub command: Command, } #[derive(Debug, Clap)] pub enum Command { /// Initializes a workspace. Init { name: String, #[clap(short, long)] typescript: bool, }, /// Builds the workspace. Build { /// Output directory for the IDL. #[clap(short, long)] idl: Option<String>, /// True if the build artifact needs to be deterministic and verifiable. #[clap(short, long)] verifiable: bool, #[clap(short, long)] program_name: Option<String>, /// Version of the Solana toolchain to use. For --verifiable builds /// only. #[clap(short, long)] solana_version: Option<String>, /// Arguments to pass to the underlying `cargo build-bpf` command #[clap( required = false, takes_value = true, multiple_values = true, last = true )] cargo_args: Vec<String>, }, /// Verifies the on-chain bytecode matches the locally compiled artifact. /// Run this command inside a program subdirectory, i.e., in the dir /// containing the program's Cargo.toml. Verify { /// The deployed program to compare against. program_id: Pubkey, #[clap(short, long)] program_name: Option<String>, /// Version of the Solana toolchain to use. For --verifiable builds /// only. #[clap(short, long)] solana_version: Option<String>, /// Arguments to pass to the underlying `cargo build-bpf` command. #[clap( required = false, takes_value = true, multiple_values = true, last = true )] cargo_args: Vec<String>, }, /// Runs integration tests against a localnetwork. Test { /// Use this flag if you want to run tests against previously deployed /// programs. #[clap(long)] skip_deploy: bool, /// Flag to skip starting a local validator, if the configured cluster /// url is a localnet. #[clap(long)] skip_local_validator: bool, /// Flag to skip building the program in the workspace, /// use this to save time when running test and the program code is not altered. #[clap(long)] skip_build: bool, /// Flag to keep the local validator running after tests /// to be able to check the transactions. #[clap(long)] detach: bool, #[clap(multiple_values = true)] args: Vec<String>, /// Arguments to pass to the underlying `cargo build-bpf` command. #[clap( required = false, takes_value = true, multiple_values = true, last = true )] cargo_args: Vec<String>, }, /// Creates a new program. New { name: String }, /// Commands for interacting with interface definitions. Idl { #[clap(subcommand)] subcmd: IdlCommand, }, /// Deploys each program in the workspace. Deploy { #[clap(short, long)] program_name: Option<String>, }, /// Runs the deploy migration script. Migrate, /// Deploys, initializes an IDL, and migrates all in one command. /// Upgrades a single program. The configured wallet must be the upgrade /// authority. Upgrade { /// The program to upgrade. #[clap(short, long)] program_id: Pubkey, /// Filepath to the new program binary. program_filepath: String, }, #[cfg(feature = "dev")] /// Runs an airdrop loop, continuously funding the configured wallet. Airdrop { #[clap(short, long)] url: Option<String>, }, /// Cluster commands. Cluster { #[clap(subcommand)] subcmd: ClusterCommand, }, /// Starts a node shell with an Anchor client setup according to the local /// config. Shell, /// Runs the script defined by the current workspace's Anchor.toml. Run { /// The name of the script to run. script: String, }, /// Saves an api token from the registry locally. Login { /// API access token. token: String, }, /// Publishes a verified build to the Anchor registry. Publish { /// The name of the program to publish. program: String, /// Arguments to pass to the underlying `cargo build-bpf` command. #[clap( required = false, takes_value = true, multiple_values = true, last = true )] cargo_args: Vec<String>, }, /// Keypair commands. Keys { #[clap(subcommand)] subcmd: KeysCommand, }, } #[derive(Debug, Clap)] pub enum KeysCommand { List, } #[derive(Debug, Clap)] pub enum IdlCommand { /// Initializes a program's IDL account. Can only be run once. Init { program_id: Pubkey, #[clap(short, long)] filepath: String, }, /// Writes an IDL into a buffer account. This can be used with SetBuffer /// to perform an upgrade. WriteBuffer { program_id: Pubkey, #[clap(short, long)] filepath: String, }, /// Sets a new IDL buffer for the program. SetBuffer { program_id: Pubkey, /// Address of the buffer account to set as the idl on the program. #[clap(short, long)] buffer: Pubkey, }, /// Upgrades the IDL to the new file. An alias for first writing and then /// then setting the idl buffer account. Upgrade { program_id: Pubkey, #[clap(short, long)] filepath: String, }, /// Sets a new authority on the IDL account. SetAuthority { /// The IDL account buffer to set the authority of. If none is given, /// then the canonical IDL account is used. address: Option<Pubkey>, /// Program to change the IDL authority. #[clap(short, long)] program_id: Pubkey, /// New authority of the IDL account. #[clap(short, long)] new_authority: Pubkey, }, /// Command to remove the ability to modify the IDL account. This should /// likely be used in conjection with eliminating an "upgrade authority" on /// the program. EraseAuthority { #[clap(short, long)] program_id: Pubkey, }, /// Outputs the authority for the IDL account. Authority { /// The program to view. program_id: Pubkey, }, /// Parses an IDL from source. Parse { /// Path to the program's interface definition. #[clap(short, long)] file: String, /// Output file for the idl (stdout if not specified). #[clap(short, long)] out: Option<String>, }, /// Fetches an IDL for the given address from a cluster. /// The address can be a program, IDL account, or IDL buffer. Fetch { address: Pubkey, /// Output file for the idl (stdout if not specified). #[clap(short, long)] out: Option<String>, }, } #[derive(Debug, Clap)] pub enum ClusterCommand { /// Prints common cluster urls. List, } pub fn entry(opts: Opts) -> Result<()> { match opts.command { Command::Init { name, typescript } => init(&opts.cfg_override, name, typescript), Command::New { name } => new(&opts.cfg_override, name), Command::Build { idl, verifiable, program_name, solana_version, cargo_args, } => build( &opts.cfg_override, idl, verifiable, program_name, solana_version, None, None, cargo_args, ), Command::Verify { program_id, program_name, solana_version, cargo_args, } => verify( &opts.cfg_override, program_id, program_name, solana_version, cargo_args, ), Command::Deploy { program_name } => deploy(&opts.cfg_override, program_name), Command::Upgrade { program_id, program_filepath, } => upgrade(&opts.cfg_override, program_id, program_filepath), Command::Idl { subcmd } => idl(&opts.cfg_override, subcmd), Command::Migrate => migrate(&opts.cfg_override), Command::Test { skip_deploy, skip_local_validator, skip_build, detach, args, cargo_args, } => test( &opts.cfg_override, skip_deploy, skip_local_validator, skip_build, detach, args, cargo_args, ), #[cfg(feature = "dev")] Command::Airdrop => airdrop(cfg_override), Command::Cluster { subcmd } => cluster(subcmd), Command::Shell => shell(&opts.cfg_override), Command::Run { script } => run(&opts.cfg_override, script), Command::Login { token } => login(&opts.cfg_override, token), Command::Publish { program, cargo_args, } => publish(&opts.cfg_override, program, cargo_args), Command::Keys { subcmd } => keys(&opts.cfg_override, subcmd), } } fn init(cfg_override: &ConfigOverride, name: String, typescript: bool) -> Result<()> { if Config::discover(cfg_override)?.is_some() { return Err(anyhow!("Workspace already initialized")); } fs::create_dir(name.clone())?; std::env::set_current_dir(&name)?; fs::create_dir("app")?; let mut cfg = Config::default(); cfg.scripts.insert( "test".to_owned(), if typescript { "ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts" } else { "mocha -t 1000000 tests/" } .to_owned(), ); let mut localnet = BTreeMap::new(); localnet.insert( name.to_snake_case(), ProgramDeployment { address: template::default_program_id(), path: None, idl: None, }, ); cfg.programs.insert(Cluster::Localnet, localnet); let toml = cfg.to_string(); let mut file = File::create("Anchor.toml")?; file.write_all(toml.as_bytes())?; // Build virtual manifest. let mut virt_manifest = File::create("Cargo.toml")?; virt_manifest.write_all(template::virtual_manifest().as_bytes())?; // Initialize .gitignore file let mut virt_manifest = File::create(".gitignore")?; virt_manifest.write_all(template::git_ignore().as_bytes())?; // Build the program. fs::create_dir("programs")?; new_program(&name)?; // Build the test suite. fs::create_dir("tests")?; // Build the migrations directory. fs::create_dir("migrations")?; if typescript { // Build typescript config let mut ts_config = File::create("tsconfig.json")?; ts_config.write_all(template::ts_config().as_bytes())?; let mut ts_package_json = File::create("package.json")?; ts_package_json.write_all(template::ts_package_json().as_bytes())?; let mut deploy = File::create("migrations/deploy.ts")?; deploy.write_all(template::ts_deploy_script().as_bytes())?; let mut mocha = File::create(&format!("tests/{}.ts", name))?; mocha.write_all(template::ts_mocha(&name).as_bytes())?; } else { let mut package_json = File::create("package.json")?; package_json.write_all(template::package_json().as_bytes())?; let mut mocha = File::create(&format!("tests/{}.js", name))?; mocha.write_all(template::mocha(&name).as_bytes())?; let mut deploy = File::create("migrations/deploy.js")?; deploy.write_all(template::deploy_script().as_bytes())?; } // Install node modules. let yarn_result = std::process::Command::new("yarn") .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("yarn install failed: {}", e.to_string()))?; if !yarn_result.status.success() { println!("Failed yarn install will attempt to npm install"); std::process::Command::new("npm") .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("npm install failed: {}", e.to_string()))?; println!("Failed to install node dependencies") } println!("{} initialized", name); Ok(()) } // Creates a new program crate in the `programs/<name>` directory. fn new(cfg_override: &ConfigOverride, name: String) -> Result<()> { with_workspace(cfg_override, |cfg| { match cfg.path().parent() { None => { println!("Unable to make new program"); } Some(parent) => { std::env::set_current_dir(&parent)?; new_program(&name)?; println!("Created new program."); } }; Ok(()) }) } // Creates a new program crate in the current directory with `name`. fn new_program(name: &str) -> Result<()> { fs::create_dir(&format!("programs/{}", name))?; fs::create_dir(&format!("programs/{}/src/", name))?; let mut cargo_toml = File::create(&format!("programs/{}/Cargo.toml", name))?; cargo_toml.write_all(template::cargo_toml(name).as_bytes())?; let mut xargo_toml = File::create(&format!("programs/{}/Xargo.toml", name))?; xargo_toml.write_all(template::xargo_toml().as_bytes())?; let mut lib_rs = File::create(&format!("programs/{}/src/lib.rs", name))?; lib_rs.write_all(template::lib_rs(name).as_bytes())?; Ok(()) } #[allow(clippy::too_many_arguments)] pub fn build( cfg_override: &ConfigOverride, idl: Option<String>, verifiable: bool, program_name: Option<String>, solana_version: Option<String>, stdout: Option<File>, // Used for the package registry server. stderr: Option<File>, // Used for the package registry server. cargo_args: Vec<String>, ) -> Result<()> { // Change to the workspace member directory, if needed. if let Some(program_name) = program_name.as_ref() { cd_member(cfg_override, program_name)?; } let cfg = Config::discover(cfg_override)?.expect("Not in workspace."); let cargo = Manifest::discover()?; let idl_out = match idl { Some(idl) => Some(PathBuf::from(idl)), None => { let cfg_parent = match cfg.path().parent() { None => return Err(anyhow!("Invalid Anchor.toml")), Some(parent) => parent, }; fs::create_dir_all(cfg_parent.join("target/idl"))?; Some(cfg_parent.join("target/idl")) } }; let solana_version = match solana_version.is_some() { true => solana_version, false => cfg.solana_version.clone(), }; match cargo { // No Cargo.toml so build the entire workspace. None => build_all( &cfg, cfg.path(), idl_out, verifiable, solana_version, stdout, stderr, cargo_args, )?, // If the Cargo.toml is at the root, build the entire workspace. Some(cargo) if cargo.path().parent() == cfg.path().parent() => build_all( &cfg, cfg.path(), idl_out, verifiable, solana_version, stdout, stderr, cargo_args, )?, // Cargo.toml represents a single package. Build it. Some(cargo) => build_cwd( &cfg, cargo.path().to_path_buf(), idl_out, verifiable, solana_version, stdout, stderr, cargo_args, )?, } set_workspace_dir_or_exit(); Ok(()) } #[allow(clippy::too_many_arguments)] fn build_all( cfg: &WithPath<Config>, cfg_path: &Path, idl_out: Option<PathBuf>, verifiable: bool, solana_version: Option<String>, stdout: Option<File>, // Used for the package registry server. stderr: Option<File>, // Used for the package registry server. cargo_args: Vec<String>, ) -> Result<()> { let cur_dir = std::env::current_dir()?; let r = match cfg_path.parent() { None => Err(anyhow!("Invalid Anchor.toml at {}", cfg_path.display())), Some(_parent) => { for p in cfg.get_program_list()? { build_cwd( cfg, p.join("Cargo.toml"), idl_out.clone(), verifiable, solana_version.clone(), stdout.as_ref().map(|f| f.try_clone()).transpose()?, stderr.as_ref().map(|f| f.try_clone()).transpose()?, cargo_args.clone(), )?; } Ok(()) } }; std::env::set_current_dir(cur_dir)?; r } // Runs the build command outside of a workspace. #[allow(clippy::too_many_arguments)] fn build_cwd( cfg: &WithPath<Config>, cargo_toml: PathBuf, idl_out: Option<PathBuf>, verifiable: bool, solana_version: Option<String>, stdout: Option<File>, stderr: Option<File>, cargo_args: Vec<String>, ) -> Result<()> { match cargo_toml.parent() { None => return Err(anyhow!("Unable to find parent")), Some(p) => std::env::set_current_dir(&p)?, }; match verifiable { false => _build_cwd(idl_out, cargo_args), true => build_cwd_verifiable(cfg, cargo_toml, solana_version, stdout, stderr), } } // Builds an anchor program in a docker image and copies the build artifacts // into the `target/` directory. fn build_cwd_verifiable( cfg: &WithPath<Config>, cargo_toml: PathBuf, solana_version: Option<String>, stdout: Option<File>, stderr: Option<File>, ) -> Result<()> { // Create output dirs. let workspace_dir = cfg.path().parent().unwrap().canonicalize()?; fs::create_dir_all(workspace_dir.join("target/verifiable"))?; fs::create_dir_all(workspace_dir.join("target/idl"))?; let container_name = "anchor-program"; // Build the binary in docker. let result = docker_build( cfg, container_name, cargo_toml, solana_version, stdout, stderr, ); // Wipe the generated docker-target dir. println!("Cleaning up the docker target directory"); let exit = std::process::Command::new("docker") .args(&[ "exec", container_name, "rm", "-rf", "/workdir/docker-target", ]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("Docker rm docker-target failed: {}", e.to_string()))?; if !exit.status.success() { return Err(anyhow!("Failed to build program")); } // Remove the docker image. println!("Removing the docker image"); let exit = std::process::Command::new("docker") .args(&["rm", "-f", container_name]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("{}", e.to_string()))?; if !exit.status.success() { println!("Unable to remove docker container"); std::process::exit(exit.status.code().unwrap_or(1)); } // Build the idl. if let Ok(Some(idl)) = extract_idl("src/lib.rs") { println!("Extracting the IDL"); let out_file = workspace_dir.join(format!("target/idl/{}.json", idl.name)); write_idl(&idl, OutFile::File(out_file))?; } result } fn docker_build( cfg: &WithPath<Config>, container_name: &str, cargo_toml: PathBuf, solana_version: Option<String>, stdout: Option<File>, stderr: Option<File>, ) -> Result<()> { let binary_name = Manifest::from_path(&cargo_toml)?.lib_name()?; // Docker vars. let image_name = cfg.docker(); let volume_mount = format!( "{}:/workdir", cfg.path().parent().unwrap().canonicalize()?.display() ); println!("Using image {:?}", image_name); // Start the docker image running detached in the background. println!("Run docker image"); let exit = std::process::Command::new("docker") .args(&[ "run", "-it", "-d", "--name", container_name, "--env", "CARGO_TARGET_DIR=/workdir/docker-target", "-v", &volume_mount, &image_name, "bash", ]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("Docker build failed: {}", e.to_string()))?; if !exit.status.success() { return Err(anyhow!("Failed to build program")); } // Set the solana version in the container, if given. Otherwise use the // default. if let Some(solana_version) = solana_version { println!("Using solana version: {}", solana_version); // Fetch the installer. let exit = std::process::Command::new("docker") .args(&[ "exec", container_name, "curl", "-sSfL", &format!("https://release.solana.com/v{0}/install", solana_version,), "-o", "solana_installer.sh", ]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow!("Failed to set solana version: {:?}", e))?; if !exit.status.success() { return Err(anyhow!("Failed to set solana version")); } // Run the installer. let exit = std::process::Command::new("docker") .args(&["exec", container_name, "sh", "solana_installer.sh"]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow!("Failed to set solana version: {:?}", e))?; if !exit.status.success() { return Err(anyhow!("Failed to set solana version")); } // Remove the installer. let exit = std::process::Command::new("docker") .args(&["exec", container_name, "rm", "-f", "solana_installer.sh"]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow!("Failed to remove installer: {:?}", e))?; if !exit.status.success() { return Err(anyhow!("Failed to remove installer")); } } let manifest_path = pathdiff::diff_paths( cargo_toml.canonicalize()?, cfg.path().parent().unwrap().canonicalize()?, ) .ok_or_else(|| anyhow!("Unable to diff paths"))?; println!( "Building {} manifest: {:?}", binary_name, manifest_path.display().to_string() ); // Execute the build. let exit = std::process::Command::new("docker") .args(&[ "exec", container_name, "cargo", "build-bpf", "--manifest-path", &manifest_path.display().to_string(), ]) .stdout(match stdout { None => Stdio::inherit(), Some(f) => f.into(), }) .stderr(match stderr { None => Stdio::inherit(), Some(f) => f.into(), }) .output() .map_err(|e| anyhow::format_err!("Docker build failed: {}", e.to_string()))?; if !exit.status.success() { return Err(anyhow!("Failed to build program")); } // Copy the binary out of the docker image. println!("Copying out the build artifacts"); let out_file = cfg .path() .parent() .unwrap() .canonicalize()? .join(format!("target/verifiable/{}.so", binary_name)) .display() .to_string(); // This requires the target directory of any built program to be located at // the root of the workspace. let bin_artifact = format!( "{}:/workdir/docker-target/deploy/{}.so", container_name, binary_name ); let exit = std::process::Command::new("docker") .args(&["cp", &bin_artifact, &out_file]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("{}", e.to_string()))?; if !exit.status.success() { return Err(anyhow!( "Failed to copy binary out of docker. Is the target directory set correctly?" )); } // Done. Ok(()) } fn _build_cwd(idl_out: Option<PathBuf>, cargo_args: Vec<String>) -> Result<()> { let exit = std::process::Command::new("cargo") .arg("build-bpf") .args(cargo_args) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(|e| anyhow::format_err!("{}", e.to_string()))?; if !exit.status.success() { std::process::exit(exit.status.code().unwrap_or(1)); } // Always assume idl is located ar src/lib.rs. if let Some(idl) = extract_idl("src/lib.rs")? { let out = match idl_out { None => PathBuf::from(".").join(&idl.name).with_extension("json"), Some(o) => PathBuf::from(&o.join(&idl.name).with_extension("json")), }; write_idl(&idl, OutFile::File(out))?; } Ok(()) } fn verify( cfg_override: &ConfigOverride, program_id: Pubkey, program_name: Option<String>, solana_version: Option<String>, cargo_args: Vec<String>, ) -> Result<()> { // Change to the workspace member directory, if needed. if let Some(program_name) = program_name.as_ref() { cd_member(cfg_override, program_name)?; } // Proceed with the command. let cfg = Config::discover(cfg_override)?.expect("Not in workspace."); let cargo = Manifest::discover()?.ok_or_else(|| anyhow!("Cargo.toml not found"))?; // Build the program we want to verify. let cur_dir = std::env::current_dir()?; build( cfg_override, None, true, None, match solana_version.is_some() { true => solana_version, false => cfg.solana_version.clone(), }, None, None, cargo_args, )?; std::env::set_current_dir(&cur_dir)?; // Verify binary. let binary_name = cargo.lib_name()?; let bin_path = cfg .path() .parent() .ok_or_else(|| anyhow!("Unable to find workspace root"))? .join("target/verifiable/") .join(format!("{}.so", binary_name)); let bin_ver = verify_bin(program_id, &bin_path, cfg.provider.cluster.url())?; if !bin_ver.is_verified { println!("Error: Binaries don't match"); std::process::exit(1); } // Verify IDL (only if it's not a buffer account). if let Some(local_idl) = extract_idl("src/lib.rs")? { if bin_ver.state != BinVerificationState::Buffer { let deployed_idl = fetch_idl(cfg_override, program_id)?; if local_idl != deployed_idl { println!("Error: IDLs don't match"); std::process::exit(1); } } } println!("{} is verified.", program_id); Ok(()) } fn cd_member(cfg_override: &ConfigOverride, program_name: &str) -> Result<()> { // Change directories to the given `program_name`, if given. let cfg = Config::discover(cfg_override)?.expect("Not in workspace."); for program in cfg.read_all_programs()? { let cargo_toml = program.path.join("Cargo.toml"); if !cargo_toml.exists() { return Err(anyhow!( "Did not find Cargo.toml at the path: {}", program.path.display() )); } let p_lib_name = Manifest::from_path(&cargo_toml)?.lib_name()?; if program_name == p_lib_name { std::env::set_current_dir(&program.path)?; return Ok(()); } } return Err(anyhow!("{} is not part of the workspace", program_name,)); } pub fn verify_bin(program_id: Pubkey, bin_path: &Path, cluster: &str) -> Result<BinVerification> { let client = RpcClient::new(cluster.to_string()); // Get the deployed build artifacts. let (deployed_bin, state) = { let account = client .get_account_with_commitment(&program_id, CommitmentConfig::default())? .value .map_or(Err(anyhow!("Account not found")), Ok)?; match account.state()? { UpgradeableLoaderState::Program { programdata_address, } => { let account = client .get_account_with_commitment(&programdata_address, CommitmentConfig::default())? .value .map_or(Err(anyhow!("Account not found")), Ok)?; let bin = account.data [UpgradeableLoaderState::programdata_data_offset().unwrap_or(0)..] .to_vec(); if let UpgradeableLoaderState::ProgramData { slot, upgrade_authority_address, } = account.state()? { let state = BinVerificationState::ProgramData { slot, upgrade_authority_address, }; (bin, state) } else { return Err(anyhow!("Expected program data")); } } UpgradeableLoaderState::Buffer { .. } => { let offset = UpgradeableLoaderState::buffer_data_offset().unwrap_or(0); ( account.data[offset..].to_vec(), BinVerificationState::Buffer, ) } _ => return Err(anyhow!("Invalid program id")), } }; let mut local_bin = { let mut f = File::open(bin_path)?; let mut contents = vec![]; f.read_to_end(&mut contents)?; contents }; // The deployed program probably has zero bytes appended. The default is // 2x the binary size in case of an upgrade. if local_bin.len() < deployed_bin.len() { local_bin.append(&mut vec![0; deployed_bin.len() - local_bin.len()]); } // Finally, check the bytes. let is_verified = local_bin == deployed_bin; Ok(BinVerification { state, is_verified }) } #[derive(PartialEq)] pub struct BinVerification { pub state: BinVerificationState, pub is_verified: bool, } #[derive(PartialEq)] pub enum BinVerificationState { Buffer, ProgramData { slot: u64, upgrade_authority_address: Option<Pubkey>, }, } // Fetches an IDL for the given program_id. fn fetch_idl(cfg_override: &ConfigOverride, idl_addr: Pubkey) -> Result<Idl> { let cfg = Config::discover(cfg_override)?.expect("Inside a workspace"); let client = RpcClient::new(cfg.provider.cluster.url().to_string()); let mut account = client .get_account_with_commitment(&idl_addr, CommitmentConfig::processed())? .value .map_or(Err(anyhow!("Account not found")), Ok)?; if account.executable { let idl_addr = IdlAccount::address(&idl_addr); account = client .get_account_with_commitment(&idl_addr, CommitmentConfig::processed())? .value .map_or(Err(anyhow!("Account not found")), Ok)?; } // Cut off account discriminator. let mut d: &[u8] = &account.data[8..]; let idl_account: IdlAccount = AnchorDeserialize::deserialize(&mut d)?; let mut z = ZlibDecoder::new(&idl_account.data[..]); let mut s = Vec::new(); z.read_to_end(&mut s)?; serde_json::from_slice(&s[..]).map_err(Into::into) } fn extract_idl(file: &str) -> Result<Option<Idl>> { let file = shellexpand::tilde(file); anchor_syn::idl::file::parse(&*file) } fn idl(cfg_override: &ConfigOverride, subcmd: IdlCommand) -> Result<()> { match subcmd { IdlCommand::Init { program_id, filepath, } => idl_init(cfg_override, program_id, filepath), IdlCommand::WriteBuffer { program_id, filepath, } => idl_write_buffer(cfg_override, program_id, filepath).map(|_| ()), IdlCommand::SetBuffer { program_id, buffer } => { idl_set_buffer(cfg_override, program_id, buffer) } IdlCommand::Upgrade { program_id, filepath, } => idl_upgrade(cfg_override, program_id, filepath), IdlCommand::SetAuthority { program_id, address, new_authority, } => idl_set_authority(cfg_override, program_id, address, new_authority), IdlCommand::EraseAuthority { program_id } => idl_erase_authority(cfg_override, program_id), IdlCommand::Authority { program_id } => idl_authority(cfg_override, program_id), IdlCommand::Parse { file, out } => idl_parse(file, out), IdlCommand::Fetch { address, out } => idl_fetch(cfg_override, address, out), } } fn idl_init(cfg_override: &ConfigOverride, program_id: Pubkey, idl_filepath: String) -> Result<()> { with_workspace(cfg_override, |cfg| { let keypair = cfg.provider.wallet.to_string(); let bytes = std::fs::read(idl_filepath)?; let idl: Idl = serde_json::from_reader(&*bytes)?; let idl_address = create_idl_account(cfg, &keypair, &program_id, &idl)?; println!("Idl account created: {:?}", idl_address); Ok(()) }) } fn idl_write_buffer( cfg_override: &ConfigOverride, program_id: Pubkey, idl_filepath: String, ) -> Result<Pubkey> { with_workspace(cfg_override, |cfg| { let keypair = cfg.provider.wallet.to_string(); let bytes = std::fs::read(idl_filepath)?; let idl: Idl = serde_json::from_reader(&*bytes)?; let idl_buffer = create_idl_buffer(cfg, &keypair, &program_id, &idl)?; idl_write(cfg, &program_id, &idl, idl_buffer)?; println!("Idl buffer created: {:?}", idl_buffer); Ok(idl_buffer) }) } fn idl_set_buffer(cfg_override: &ConfigOverride, program_id: Pubkey, buffer: Pubkey) -> Result<()> { with_workspace(cfg_override, |cfg| { let keypair = solana_sdk::signature::read_keypair_file(&cfg.provider.wallet.to_string()) .map_err(|_| anyhow!("Unable to read keypair file"))?; let client = RpcClient::new(cfg.provider.cluster.url().to_string()); // Instruction to set the buffer onto the IdlAccount. let set_buffer_ix = { let accounts = vec![ AccountMeta::new(buffer, false), AccountMeta::new(IdlAccount::address(&program_id), false), AccountMeta::new(keypair.pubkey(), true), ]; let mut data = anchor_lang::idl::IDL_IX_TAG.to_le_bytes().to_vec(); data.append(&mut IdlInstruction::SetBuffer.try_to_vec()?); Instruction { program_id, accounts, data, } }; // Build the transaction. let (recent_hash, _fee_calc) = client.get_recent_blockhash()?; let tx = Transaction::new_signed_with_payer( &[set_buffer_ix], Some(&keypair.pubkey()), &[&keypair], recent_hash, ); // Send the transaction. client.send_and_confirm_transaction_with_spinner_and_config( &tx, CommitmentConfig::confirmed(), RpcSendTransactionConfig { skip_preflight: true, ..RpcSendTransactionConfig::default() }, )?; Ok(()) }) } fn idl_upgrade( cfg_override: &ConfigOverride, program_id: Pubkey, idl_filepath: String, ) -> Result<()> { let buffer = idl_write_buffer(cfg_override, program_id, idl_filepath)?; idl_set_buffer(cfg_override, program_id, buffer) } fn idl_authority(cfg_override: &ConfigOverride, program_id: Pubkey) -> Result<()> { with_workspace(cfg_override, |cfg| { let client = RpcClient::new(cfg.provider.cluster.url().to_string()); let idl_address = { let account = client .get_account_with_commitment(&program_id, CommitmentConfig::processed())? .value .map_or(Err(anyhow!("Account not found")), Ok)?; if account.executable { IdlAccount::address(&program_id) } else { program_id } }; let account = client.get_account(&idl_address)?; let mut data: &[u8] = &account.data; let idl_account: IdlAccount = AccountDeserialize::try_deserialize(&mut data)?; println!("{:?}", idl_account.authority); Ok(()) }) } fn idl_set_authority( cfg_override: &ConfigOverride, program_id: Pubkey, address: Option<Pubkey>, new_authority: Pubkey, ) -> Result<()> { with_workspace(cfg_override, |cfg| { // Misc. let idl_address = match address { None => IdlAccount::address(&program_id), Some(addr) => addr, }; let keypair = solana_sdk::signature::read_keypair_file(&cfg.provider.wallet.to_string()) .map_err(|_| anyhow!("Unable to read keypair file"))?; let client = RpcClient::new(cfg.provider.cluster.url().to_string()); // Instruction data. let data = serialize_idl_ix(anchor_lang::idl::IdlInstruction::SetAuthority { new_authority })?; // Instruction accounts. let accounts = vec![ AccountMeta::new(idl_address, false), AccountMeta::new_readonly(keypair.pubkey(), true), ]; // Instruction. let ix = Instruction { program_id, accounts, data, }; // Send transaction. let (recent_hash, _fee_calc) = client.get_recent_blockhash()?; let tx = Transaction::new_signed_with_payer( &[ix], Some(&keypair.pubkey()), &[&keypair], recent_hash, ); client.send_and_confirm_transaction_with_spinner_and_config( &tx, CommitmentConfig::confirmed(), RpcSendTransactionConfig { skip_preflight: true, ..RpcSendTransactionConfig::default() }, )?; println!("Authority update complete."); Ok(()) }) } fn idl_erase_authority(cfg_override: &ConfigOverride, program_id: Pubkey) -> Result<()> { println!("Are you sure you want to erase the IDL authority: [y/n]"); let stdin = std::io::stdin(); let mut stdin_lines = stdin.lock().lines(); let input = stdin_lines.next().unwrap().unwrap(); if input != "y" { println!("Not erasing."); return Ok(()); } // Program will treat the zero authority as erased. let new_authority = Pubkey::new_from_array([0u8; 32]); idl_set_authority(cfg_override, program_id, None, new_authority)?; Ok(()) } // Write the idl to the account buffer, chopping up the IDL into pieces // and sending multiple transactions in the event the IDL doesn't fit into // a single transaction. fn idl_write(cfg: &Config, program_id: &Pubkey, idl: &Idl, idl_address: Pubkey) -> Result<()> { // Remove the metadata before deploy. let mut idl = idl.clone(); idl.metadata = None; // Misc. let keypair = solana_sdk::signature::read_keypair_file(&cfg.provider.wallet.to_string()) .map_err(|_| anyhow!("Unable to read keypair file"))?; let client = RpcClient::new(cfg.provider.cluster.url().to_string()); // Serialize and compress the idl. let idl_data = { let json_bytes = serde_json::to_vec(&idl)?; let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&json_bytes)?; e.finish()? }; const MAX_WRITE_SIZE: usize = 1000; let mut offset = 0; while offset < idl_data.len() { // Instruction data. let data = { let start = offset; let end = std::cmp::min(offset + MAX_WRITE_SIZE, idl_data.len()); serialize_idl_ix(anchor_lang::idl::IdlInstruction::Write { data: idl_data[start..end].to_vec(), })? }; // Instruction accounts. let accounts = vec![ AccountMeta::new(idl_address, false), AccountMeta::new_readonly(keypair.pubkey(), true), ]; // Instruction. let ix = Instruction { program_id: *program_id, accounts, data, }; // Send transaction. let (recent_hash, _fee_calc) = client.get_recent_blockhash()?; let tx = Transaction::new_signed_with_payer( &[ix], Some(&keypair.pubkey()), &[&keypair], recent_hash, ); client.send_and_confirm_transaction_with_spinner_and_config( &tx, CommitmentConfig::confirmed(), RpcSendTransactionConfig { skip_preflight: true, ..RpcSendTransactionConfig::default() }, )?; offset += MAX_WRITE_SIZE; } Ok(()) } fn idl_parse(file: String, out: Option<String>) -> Result<()> { let idl = extract_idl(&file)?.ok_or_else(|| anyhow!("IDL not parsed"))?; let out = match out { None => OutFile::Stdout, Some(out) => OutFile::File(PathBuf::from(out)), }; write_idl(&idl, out) } fn idl_fetch(cfg_override: &ConfigOverride, address: Pubkey, out: Option<String>) -> Result<()> { let idl = fetch_idl(cfg_override, address)?; let out = match out { None => OutFile::Stdout, Some(out) => OutFile::File(PathBuf::from(out)), }; write_idl(&idl, out) } fn write_idl(idl: &Idl, out: OutFile) -> Result<()> { let idl_json = serde_json::to_string_pretty(idl)?; match out { OutFile::Stdout => println!("{}", idl_json), OutFile::File(out) => std::fs::write(out, idl_json)?, }; Ok(()) } enum OutFile { Stdout, File(PathBuf), } // Builds, deploys, and tests all workspace programs in a single command. fn test( cfg_override: &ConfigOverride, skip_deploy: bool, skip_local_validator: bool, skip_build: bool, detach: bool, extra_args: Vec<String>, cargo_args: Vec<String>, ) -> Result<()> { with_workspace(cfg_override, |cfg| { // Build if needed. if !skip_build { build( cfg_override, None, false, None, None, None, None, cargo_args, )?; } // Run the deploy against the cluster in two cases: // // 1. The cluster is not localnet. // 2. The cluster is localnet, but we're not booting a local validator. // // In either case, skip the deploy if the user specifies. let is_localnet = cfg.provider.cluster == Cluster::Localnet; if (!is_localnet || skip_local_validator) && !skip_deploy { deploy(cfg_override, None)?; } // Start local test validator, if needed. let mut validator_handle = None; if is_localnet && (!skip_local_validator) { let flags = match skip_deploy { true => None, false => Some(genesis_flags(cfg)?), }; validator_handle = Some(start_test_validator(cfg, flags)?); } // Setup log reader. let log_streams = stream_logs(cfg); // Run the tests. let test_result: Result<_> = { let cmd = cfg .scripts .get("test") .expect("Not able to find command for `test`") .clone(); let mut args: Vec<&str> = cmd .split(' ') .chain(extra_args.iter().map(|arg| arg.as_str())) .collect(); let program = args.remove(0); std::process::Command::new(program) .args(args) .env("ANCHOR_PROVIDER_URL", cfg.provider.cluster.url()) .env("ANCHOR_WALLET", cfg.provider.wallet.to_string()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .map_err(anyhow::Error::from) .context(cmd) }; // Keep validator running if needed. if test_result.is_ok() && detach { println!("Local validator still running. Press Ctrl + C quit."); std::io::stdin().lock().lines().next().unwrap().unwrap(); } // Check all errors and shut down. if let Some(mut child) = validator_handle { if let Err(err) = child.kill() { println!("Failed to kill subprocess {}: {}", child.id(), err); } } for mut child in log_streams? { if let Err(err) = child.kill() { println!("Failed to kill subprocess {}: {}", child.id(), err); } } // Must exist *after* shutting down the validator and log streams. match test_result { Ok(exit) => { if !exit.status.success() { std::process::exit(exit.status.code().unwrap()); } } Err(err) => { println!("Failed to run test: {:#}", err) } } Ok(()) }) } // Returns the solana-test-validator flags to embed the workspace programs // in the genesis block. This allows us to run tests without every deploying. fn genesis_flags(cfg: &WithPath<Config>) -> Result<Vec<String>> { let programs = cfg.programs.get(&Cluster::Localnet); let mut flags = Vec::new(); for mut program in cfg.read_all_programs()? { let binary_path = program.binary_path().display().to_string(); // Use the [programs.cluster] override and fallback to the keypair // files if no override is given. let address = programs .and_then(|m| m.get(&program.lib_name)) .map(|deployment| Ok(deployment.address.to_string())) .unwrap_or_else(|| program.pubkey().map(|p| p.to_string()))?; flags.push("--bpf-program".to_string()); flags.push(address.clone()); flags.push(binary_path); if let Some(mut idl) = program.idl.as_mut() { // Add program address to the IDL. idl.metadata = Some(serde_json::to_value(IdlTestMetadata { address })?); // Persist it. let idl_out = PathBuf::from("target/idl") .join(&idl.name) .with_extension("json"); write_idl(idl, OutFile::File(idl_out))?; } } if let Some(test) = cfg.test.as_ref() { for entry in &test.genesis { flags.push("--bpf-program".to_string()); flags.push(entry.address.clone()); flags.push(entry.program.clone()); } } Ok(flags) } fn stream_logs(config: &WithPath<Config>) -> Result<Vec<std::process::Child>> { let program_logs_dir = ".anchor/program-logs"; if Path::new(program_logs_dir).exists() { std::fs::remove_dir_all(program_logs_dir)?; } fs::create_dir_all(program_logs_dir)?; let mut handles = vec![]; for program in config.read_all_programs()? { let mut file = File::open(&format!("target/idl/{}.json", program.lib_name))?; let mut contents = vec![]; file.read_to_end(&mut contents)?; let idl: Idl = serde_json::from_slice(&contents)?; let metadata = idl .metadata .ok_or_else(|| anyhow!("Program address not found."))?; let metadata: IdlTestMetadata = serde_json::from_value(metadata)?; let log_file = File::create(format!( "{}/{}.{}.log", program_logs_dir, metadata.address, program.lib_name, ))?; let stdio = std::process::Stdio::from(log_file); let child = std::process::Command::new("solana") .arg("logs") .arg(metadata.address) .arg("--url") .arg(config.provider.cluster.url()) .stdout(stdio) .spawn()?; handles.push(child); } if let Some(test) = config.test.as_ref() { for entry in &test.genesis { let log_file = File::create(format!("{}/{}.log", program_logs_dir, entry.address))?; let stdio = std::process::Stdio::from(log_file); let child = std::process::Command::new("solana") .arg("logs") .arg(entry.address.clone()) .arg("--url") .arg(config.provider.cluster.url()) .stdout(stdio) .spawn()?; handles.push(child); } } Ok(handles) } #[derive(Debug, Serialize, Deserialize)] pub struct IdlTestMetadata { address: String, } fn start_test_validator(cfg: &Config, flags: Option<Vec<String>>) -> Result<Child> { fs::create_dir_all(".anchor")?; let test_ledger_filename = ".anchor/test-ledger"; let test_ledger_log_filename = ".anchor/test-ledger-log.txt"; if Path::new(test_ledger_filename).exists() { std::fs::remove_dir_all(test_ledger_filename)?; } if Path::new(test_ledger_log_filename).exists() { std::fs::remove_file(test_ledger_log_filename)?; } // Start a validator for testing. let test_validator_stdout = File::create(test_ledger_log_filename)?; let test_validator_stderr = test_validator_stdout.try_clone()?; let validator_handle = std::process::Command::new("solana-test-validator") .arg("--ledger") .arg(test_ledger_filename) .arg("--mint") .arg(cfg.wallet_kp()?.pubkey().to_string()) .args(flags.unwrap_or_default()) .stdout(Stdio::from(test_validator_stdout)) .stderr(Stdio::from(test_validator_stderr)) .spawn() .map_err(|e| anyhow::format_err!("{}", e.to_string()))?; // Wait for the validator to be ready. let client = RpcClient::new("http://localhost:8899".to_string()); let mut count = 0; let ms_wait = 5000; while count < ms_wait { let r = client.get_recent_blockhash(); if r.is_ok() { break; } std::thread::sleep(std::time::Duration::from_millis(1)); count += 1; } if count == 5000 { println!("Unable to start test validator."); std::process::exit(1); } Ok(validator_handle) } fn deploy(cfg_override: &ConfigOverride, program_str: Option<String>) -> Result<()> { with_workspace(cfg_override, |cfg| { let url = cfg.provider.cluster.url().to_string(); let keypair = cfg.provider.wallet.to_string(); // Deploy the programs. println!("Deploying workspace: {}", url); println!("Upgrade authority: {}", keypair); for mut program in cfg.read_all_programs()? { if let Some(single_prog_str) = &program_str { let program_name = program.path.file_name().unwrap().to_str().unwrap(); if single_prog_str.as_str() != program_name { continue; } } let binary_path = program.binary_path().display().to_string(); println!( "Deploying program {:?}...", program.path.file_name().unwrap().to_str().unwrap() ); println!("Program path: {}...", binary_path); let file = program.keypair_file()?; // Send deploy transactions. let exit = std::process::Command::new("solana") .arg("program") .arg("deploy") .arg("--url") .arg(&url) .arg("--keypair") .arg(&keypair) .arg("--program-id") .arg(file.path().display().to_string()) .arg(&binary_path) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .expect("Must deploy"); if !exit.status.success() { println!("There was a problem deploying: {:?}.", exit); std::process::exit(exit.status.code().unwrap_or(1)); } let program_pubkey = program.pubkey()?; if let Some(mut idl) = program.idl.as_mut() { // Add program address to the IDL. idl.metadata = Some(serde_json::to_value(IdlTestMetadata { address: program_pubkey.to_string(), })?); // Persist it. let idl_out = PathBuf::from("target/idl") .join(&idl.name) .with_extension("json"); write_idl(idl, OutFile::File(idl_out))?; } } println!("Deploy success"); Ok(()) }) } fn upgrade( cfg_override: &ConfigOverride, program_id: Pubkey, program_filepath: String, ) -> Result<()> { let path: PathBuf = program_filepath.parse().unwrap(); let program_filepath = path.canonicalize()?.display().to_string(); with_workspace(cfg_override, |cfg| { let exit = std::process::Command::new("solana") .arg("program") .arg("deploy") .arg("--url") .arg(cfg.provider.cluster.url()) .arg("--keypair") .arg(&cfg.provider.wallet.to_string()) .arg("--program-id") .arg(program_id.to_string()) .arg(&program_filepath) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .expect("Must deploy"); if !exit.status.success() { println!("There was a problem deploying: {:?}.", exit); std::process::exit(exit.status.code().unwrap_or(1)); } Ok(()) }) } fn create_idl_account( cfg: &Config, keypair_path: &str, program_id: &Pubkey, idl: &Idl, ) -> Result<Pubkey> { // Misc. let idl_address = IdlAccount::address(program_id); let keypair = solana_sdk::signature::read_keypair_file(keypair_path) .map_err(|_| anyhow!("Unable to read keypair file"))?; let client = RpcClient::new(cfg.provider.cluster.url().to_string()); let idl_data = serialize_idl(idl)?; // Run `Create instruction. { let data = serialize_idl_ix(anchor_lang::idl::IdlInstruction::Create { data_len: (idl_data.len() as u64) * 2, // Double for future growth. })?; let program_signer = Pubkey::find_program_address(&[], program_id).0; let accounts = vec![ AccountMeta::new_readonly(keypair.pubkey(), true), AccountMeta::new(idl_address, false), AccountMeta::new_readonly(program_signer, false), AccountMeta::new_readonly(solana_program::system_program::ID, false), AccountMeta::new_readonly(*program_id, false), AccountMeta::new_readonly(solana_program::sysvar::rent::ID, false), ]; let ix = Instruction { program_id: *program_id, accounts, data, }; let (recent_hash, _fee_calc) = client.get_recent_blockhash()?; let tx = Transaction::new_signed_with_payer( &[ix], Some(&keypair.pubkey()), &[&keypair], recent_hash, ); client.send_and_confirm_transaction_with_spinner_and_config( &tx, CommitmentConfig::confirmed(), RpcSendTransactionConfig { skip_preflight: true, ..RpcSendTransactionConfig::default() }, )?; } // Write directly to the IDL account buffer. idl_write(cfg, program_id, idl, IdlAccount::address(program_id))?; Ok(idl_address) } fn create_idl_buffer( cfg: &Config, keypair_path: &str, program_id: &Pubkey, idl: &Idl, ) -> Result<Pubkey> { let keypair = solana_sdk::signature::read_keypair_file(keypair_path) .map_err(|_| anyhow!("Unable to read keypair file"))?; let client = RpcClient::new(cfg.provider.cluster.url().to_string()); let buffer = Keypair::generate(&mut OsRng); // Creates the new buffer account with the system program. let create_account_ix = { let space = 8 + 32 + 4 + serialize_idl(idl)?.len() as usize; let lamports = client.get_minimum_balance_for_rent_exemption(space)?; solana_sdk::system_instruction::create_account( &keypair.pubkey(), &buffer.pubkey(), lamports, space as u64, program_id, ) }; // Program instruction to create the buffer. let create_buffer_ix = { let accounts = vec![ AccountMeta::new(buffer.pubkey(), false), AccountMeta::new_readonly(keypair.pubkey(), true), AccountMeta::new_readonly(sysvar::rent::ID, false), ]; let mut data = anchor_lang::idl::IDL_IX_TAG.to_le_bytes().to_vec(); data.append(&mut IdlInstruction::CreateBuffer.try_to_vec()?); Instruction { program_id: *program_id, accounts, data, } }; // Build the transaction. let (recent_hash, _fee_calc) = client.get_recent_blockhash()?; let tx = Transaction::new_signed_with_payer( &[create_account_ix, create_buffer_ix], Some(&keypair.pubkey()), &[&keypair, &buffer], recent_hash, ); // Send the transaction. client.send_and_confirm_transaction_with_spinner_and_config( &tx, CommitmentConfig::confirmed(), RpcSendTransactionConfig { skip_preflight: true, ..RpcSendTransactionConfig::default() }, )?; Ok(buffer.pubkey()) } // Serialize and compress the idl. fn serialize_idl(idl: &Idl) -> Result<Vec<u8>> { let json_bytes = serde_json::to_vec(idl)?; let mut e = ZlibEncoder::new(Vec::new(), Compression::default()); e.write_all(&json_bytes)?; e.finish().map_err(Into::into) } fn serialize_idl_ix(ix_inner: anchor_lang::idl::IdlInstruction) -> Result<Vec<u8>> { let mut data = anchor_lang::idl::IDL_IX_TAG.to_le_bytes().to_vec(); data.append(&mut ix_inner.try_to_vec()?); Ok(data) } fn migrate(cfg_override: &ConfigOverride) -> Result<()> { with_workspace(cfg_override, |cfg| { println!("Running migration deploy script"); let url = cfg.provider.cluster.url().to_string(); let cur_dir = std::env::current_dir()?; let use_ts = Path::new("tsconfig.json").exists() && Path::new("migrations/deploy.ts").exists(); if !Path::new(".anchor").exists() { fs::create_dir(".anchor")?; } std::env::set_current_dir(".anchor")?; let exit = if use_ts { let module_path = cur_dir.join("migrations/deploy.ts"); let deploy_script_host_str = template::deploy_ts_script_host(&url, &module_path.display().to_string()); std::fs::write("deploy.ts", deploy_script_host_str)?; std::process::Command::new("ts-node") .arg("deploy.ts") .env("ANCHOR_WALLET", cfg.provider.wallet.to_string()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output()? } else { let module_path = cur_dir.join("migrations/deploy.js"); let deploy_script_host_str = template::deploy_js_script_host(&url, &module_path.display().to_string()); std::fs::write("deploy.js", deploy_script_host_str)?; std::process::Command::new("node") .arg("deploy.js") .env("ANCHOR_WALLET", cfg.provider.wallet.to_string()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output()? }; if !exit.status.success() { println!("Deploy failed."); std::process::exit(exit.status.code().unwrap()); } println!("Deploy complete."); Ok(()) }) } fn set_workspace_dir_or_exit() { let d = match Config::discover(&ConfigOverride::default()) { Err(_) => { println!("Not in anchor workspace."); std::process::exit(1); } Ok(d) => d, }; match d { None => { println!("Not in anchor workspace."); std::process::exit(1); } Some(cfg) => { match cfg.path().parent() { None => { println!("Unable to make new program"); } Some(parent) => { if std::env::set_current_dir(&parent).is_err() { println!("Not in anchor workspace."); std::process::exit(1); } } }; } } } #[cfg(feature = "dev")] fn airdrop(cfg_override: &ConfigOverride) -> Result<()> { let url = cfg_override .cluster .unwrap_or_else(|| "https://api.devnet.solana.com".to_string()); loop { let exit = std::process::Command::new("solana") .arg("airdrop") .arg("10") .arg("--url") .arg(&url) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .expect("Must airdrop"); if !exit.status.success() { println!("There was a problem airdropping: {:?}.", exit); std::process::exit(exit.status.code().unwrap_or(1)); } std::thread::sleep(std::time::Duration::from_millis(10000)); } } fn cluster(_cmd: ClusterCommand) -> Result<()> { println!("Cluster Endpoints:\n"); println!("* Mainnet - https://solana-api.projectserum.com"); println!("* Mainnet - https://api.mainnet-beta.solana.com"); println!("* Devnet - https://api.devnet.solana.com"); println!("* Testnet - https://api.testnet.solana.com"); Ok(()) } fn shell(cfg_override: &ConfigOverride) -> Result<()> { with_workspace(cfg_override, |cfg| { let programs = { // Create idl map from all workspace programs. let mut idls: HashMap<String, Idl> = cfg .read_all_programs()? .iter() .filter(|program| program.idl.is_some()) .map(|program| { ( program.idl.as_ref().unwrap().name.clone(), program.idl.clone().unwrap(), ) }) .collect(); // Insert all manually specified idls into the idl map. if let Some(programs) = cfg.programs.get(&cfg.provider.cluster) { let _ = programs .iter() .map(|(name, pd)| { if let Some(idl_fp) = &pd.idl { let file_str = std::fs::read_to_string(idl_fp).expect("Unable to read IDL file"); let idl = serde_json::from_str(&file_str).expect("Idl not readable"); idls.insert(name.clone(), idl); } }) .collect::<Vec<_>>(); } // Finalize program list with all programs with IDLs. match cfg.programs.get(&cfg.provider.cluster) { None => Vec::new(), Some(programs) => programs .iter() .filter_map(|(name, program_deployment)| { Some(ProgramWorkspace { name: name.to_string(), program_id: program_deployment.address, idl: match idls.get(name) { None => return None, Some(idl) => idl.clone(), }, }) }) .collect::<Vec<ProgramWorkspace>>(), } }; let js_code = template::node_shell( cfg.provider.cluster.url(), &cfg.provider.wallet.to_string(), programs, )?; let mut child = std::process::Command::new("node") .args(&["-e", &js_code, "-i", "--experimental-repl-await"]) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .spawn() .map_err(|e| anyhow::format_err!("{}", e.to_string()))?; if !child.wait()?.success() { println!("Error running node shell"); return Ok(()); } Ok(()) }) } fn run(cfg_override: &ConfigOverride, script: String) -> Result<()> { with_workspace(cfg_override, |cfg| { let script = cfg .scripts .get(&script) .ok_or_else(|| anyhow!("Unable to find script"))?; let exit = std::process::Command::new("bash") .arg("-c") .arg(&script) .env("ANCHOR_PROVIDER_URL", cfg.provider.cluster.url()) .env("ANCHOR_WALLET", cfg.provider.wallet.to_string()) .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .output() .unwrap(); if !exit.status.success() { std::process::exit(exit.status.code().unwrap_or(1)); } Ok(()) }) } fn login(_cfg_override: &ConfigOverride, token: String) -> Result<()> { let dir = shellexpand::tilde("~/.config/anchor"); if !Path::new(&dir.to_string()).exists() { fs::create_dir(dir.to_string())?; } std::env::set_current_dir(dir.to_string())?; // Freely overwrite the entire file since it's not used for anything else. let mut file = File::create("credentials")?; file.write_all(template::credentials(&token).as_bytes())?; Ok(()) } fn publish( cfg_override: &ConfigOverride, program_name: String, cargo_args: Vec<String>, ) -> Result<()> { // Discover the various workspace configs. let cfg = Config::discover(cfg_override)?.expect("Not in workspace."); let program = cfg .get_program(&program_name)? .ok_or_else(|| anyhow!("Workspace member not found"))?; let program_cargo_lock = pathdiff::diff_paths( program.path().join("Cargo.lock"), cfg.path().parent().unwrap(), ) .ok_or_else(|| anyhow!("Unable to diff Cargo.lock path"))?; let cargo_lock = Path::new("Cargo.lock"); // There must be a Cargo.lock if !program_cargo_lock.exists() && !cargo_lock.exists() { return Err(anyhow!("Cargo.lock must exist for a verifiable build")); } println!("Publishing will make your code public. Are you sure? Enter (yes)/no:"); let answer = std::io::stdin().lock().lines().next().unwrap().unwrap(); if answer != "yes" { println!("Aborting"); return Ok(()); } let anchor_package = AnchorPackage::from(program_name.clone(), &cfg)?; let anchor_package_bytes = serde_json::to_vec(&anchor_package)?; // Set directory to top of the workspace. let workspace_dir = cfg.path().parent().unwrap(); std::env::set_current_dir(workspace_dir)?; // Create the workspace tarball. let dot_anchor = workspace_dir.join(".anchor"); fs::create_dir_all(&dot_anchor)?; let tarball_filename = dot_anchor.join(format!("{}.tar.gz", program_name)); let tar_gz = File::create(&tarball_filename)?; let enc = GzEncoder::new(tar_gz, Compression::default()); let mut tar = tar::Builder::new(enc); // Files that will always be included if they exist. println!("PACKING: Anchor.toml"); tar.append_path("Anchor.toml")?; if cargo_lock.exists() { println!("PACKING: Cargo.lock"); tar.append_path(cargo_lock)?; } if Path::new("Cargo.toml").exists() { println!("PACKING: Cargo.toml"); tar.append_path("Cargo.toml")?; } if Path::new("LICENSE").exists() { println!("PACKING: LICENSE"); tar.append_path("LICENSE")?; } if Path::new("README.md").exists() { println!("PACKING: README.md"); tar.append_path("README.md")?; } // All workspace programs. for path in cfg.get_program_list()? { let mut dirs = walkdir::WalkDir::new(&path) .into_iter() .filter_entry(|e| !is_hidden(e)); // Skip the parent dir. let _ = dirs.next().unwrap()?; for entry in dirs { let e = entry.map_err(|e| anyhow!("{:?}", e))?; let e = pathdiff::diff_paths(e.path(), cfg.path().parent().unwrap()) .ok_or_else(|| anyhow!("Unable to diff paths"))?; let path_str = e.display().to_string(); // Skip target dir. if !path_str.contains("target/") && !path_str.contains("/target") { // Only add the file if it's not empty. let metadata = std::fs::File::open(&e)?.metadata()?; if metadata.len() > 0 { println!("PACKING: {}", e.display().to_string()); if e.is_dir() { tar.append_dir_all(&e, &e)?; } else { tar.append_path(&e)?; } } } } } // Tar pack complete. tar.into_inner()?; // Create tmp directory for workspace. let ws_dir = dot_anchor.join("workspace"); if Path::exists(&ws_dir) { fs::remove_dir_all(&ws_dir)?; } fs::create_dir_all(&ws_dir)?; // Unpack the archive into the new workspace directory. std::env::set_current_dir(&ws_dir)?; unpack_archive(&tarball_filename)?; // Build the program before sending it to the server. build( cfg_override, None, true, Some(program_name), cfg.solana_version.clone(), None, None, cargo_args, )?; // Success. Now we can finally upload to the server without worrying // about a build failure. // Upload the tarball to the server. let token = registry_api_token(cfg_override)?; let form = Form::new() .part("manifest", Part::bytes(anchor_package_bytes)) .part("workspace", { let file = File::open(&tarball_filename)?; Part::reader(file) }); let client = Client::new(); let resp = client .post(&format!("{}/api/v0/build", cfg.registry.url)) .bearer_auth(token) .multipart(form) .send()?; if resp.status() == 200 { println!("Build triggered"); } else { println!( "{:?}", resp.text().unwrap_or_else(|_| "Server error".to_string()) ); } Ok(()) } // Unpacks the tarball into the current directory. fn unpack_archive(tar_path: impl AsRef<Path>) -> Result<()> { let tar = GzDecoder::new(std::fs::File::open(tar_path)?); let mut archive = Archive::new(tar); archive.unpack(".")?; archive.into_inner(); Ok(()) } fn registry_api_token(_cfg_override: &ConfigOverride) -> Result<String> { #[derive(Debug, Deserialize)] struct Registry { token: String, } #[derive(Debug, Deserialize)] struct Credentials { registry: Registry, } let filename = shellexpand::tilde("~/.config/anchor/credentials"); let mut file = File::open(filename.to_string())?; let mut contents = String::new(); file.read_to_string(&mut contents)?; let credentials_toml: Credentials = toml::from_str(&contents)?; Ok(credentials_toml.registry.token) } fn keys(cfg_override: &ConfigOverride, cmd: KeysCommand) -> Result<()> { match cmd { KeysCommand::List => keys_list(cfg_override), } } fn keys_list(cfg_override: &ConfigOverride) -> Result<()> { let cfg = Config::discover(cfg_override)?.expect("Not in workspace."); for program in cfg.read_all_programs()? { let pubkey = program.pubkey()?; println!("{}: {}", program.lib_name, pubkey.to_string()); } Ok(()) } // with_workspace ensures the current working directory is always the top level // workspace directory, i.e., where the `Anchor.toml` file is located, before // and after the closure invocation. // // The closure passed into this function must never change the working directory // to be outside the workspace. Doing so will have undefined behavior. fn with_workspace<R>(cfg_override: &ConfigOverride, f: impl FnOnce(&WithPath<Config>) -> R) -> R { set_workspace_dir_or_exit(); let cfg = Config::discover(cfg_override) .expect("Previously set the workspace dir") .expect("Anchor.toml must always exist"); let r = f(&cfg); set_workspace_dir_or_exit(); r } fn is_hidden(entry: &walkdir::DirEntry) -> bool { entry .file_name() .to_str() .map(|s| s == "." || s.starts_with('.') || s == "target") .unwrap_or(false) }
33.005677
100
0.560443
080b5c4f3cb5a14622317142c33cf236f41ace65
1,330
mod with_atom_left; mod with_big_integer_left; mod with_empty_list_left; mod with_external_pid_left; mod with_float_left; mod with_heap_binary_left; mod with_list_left; mod with_local_pid_left; mod with_local_reference_left; mod with_map_left; mod with_small_integer_left; mod with_subbinary_left; mod with_tuple_left; use proptest::prop_assert_eq; use proptest::test_runner::{Config, TestRunner}; use liblumen_alloc::erts::process::Process; use liblumen_alloc::erts::term::prelude::*; use crate::erlang::is_greater_than_or_equal_2::native; use crate::test::{external_arc_node, strategy}; use crate::test::{with_process, with_process_arc}; #[test] fn with_same_left_and_right_returns_true() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run(&strategy::term(arc_process.clone()), |operand| { prop_assert_eq!(native(operand, operand), true.into()); Ok(()) }) .unwrap(); }); } fn is_greater_than_or_equal<L, R>(left: L, right: R, expected: bool) where L: FnOnce(&Process) -> Term, R: FnOnce(Term, &Process) -> Term, { with_process(|process| { let left = left(&process); let right = right(left, &process); assert_eq!(native(left, right), expected.into()); }); }
26.6
71
0.693985
dba422ca5c63adb061568d481412b78884c80956
496
#[derive(Debug)] pub struct PerilGroup { pub id: String, pub desc: String, pub peril_ids: Vec<String> } impl PerilGroup { pub fn new(id: &str, desc: &str, group_ids: Vec<&str>) -> PerilGroup { let mut id_buffer = Vec::new(); for i in group_ids { id_buffer.push(String::from(i)); } return PerilGroup { id: String::from(id), desc: String::from(desc), peril_ids: id_buffer } } }
20.666667
74
0.524194
5031395c2498e74e6c975939a8918e38b22dcb95
2,641
//! Delimiter bindings to provide byte offsets for all stages. use super::conf::Configuration; /// Delimiters struct to store the input/output separators /// for all stages of a MapReduce lifecycle. Once created, /// this structure should be considered immutable. #[derive(Debug)] pub struct Delimiters { input: Vec<u8>, output: Vec<u8>, } impl Delimiters { /// Creates a new `Delimiters` from a job `Configuration`. pub fn new(conf: &Configuration) -> Self { // check to see if this is map/reduce stage let stage = match conf.get("mapreduce.task.ismap") { Some(val) if val == "true" => "map", _ => "reduce", }; // fetch the input/output separators for the current stage let input_key = format!("stream.{}.input.field.separator", stage); let output_key = format!("stream.{}.output.field.separator", stage); Self { // separators are optional, so default to a tab input: conf.get(&input_key).unwrap_or("\t").as_bytes().to_vec(), output: conf.get(&output_key).unwrap_or("\t").as_bytes().to_vec(), } } /// Returns a reference to the input delimiter. #[inline] pub fn input(&self) -> &[u8] { &self.input } /// Returns a reference to the output delimiter. #[inline] pub fn output(&self) -> &[u8] { &self.output } } #[cfg(test)] mod tests { use super::*; #[test] fn test_map_delimiters_creation() { let env = vec![ ("mapreduce.task.ismap", "true"), ("stream.map.input.field.separator", ":"), ("stream.map.output.field.separator", "|"), ]; let conf = Configuration::with_env(env.into_iter()); let delim = Delimiters::new(&conf); assert_eq!(delim.input(), b":"); assert_eq!(delim.output(), b"|"); } #[test] fn test_reduce_delimiters_creation() { let env = vec![ ("mapreduce.task.ismap", "false"), ("stream.reduce.input.field.separator", ":"), ("stream.reduce.output.field.separator", "|"), ]; let conf = Configuration::with_env(env.into_iter()); let delim = Delimiters::new(&conf); assert_eq!(delim.input(), b":"); assert_eq!(delim.output(), b"|"); } #[test] fn test_delimiter_defaults() { let env = Vec::<(String, String)>::new(); let conf = Configuration::with_env(env.into_iter()); let delim = Delimiters::new(&conf); assert_eq!(delim.input(), b"\t"); assert_eq!(delim.output(), b"\t"); } }
29.021978
78
0.568345
28a4c9b8c2cdf08d83751d1a380a2c588c022596
626
mod certificate_signing_request; pub use self::certificate_signing_request::CertificateSigningRequest; #[cfg(feature = "api")] pub use self::certificate_signing_request::{ReadCertificateSigningRequestOptional, ReadCertificateSigningRequestResponse}; mod certificate_signing_request_condition; pub use self::certificate_signing_request_condition::CertificateSigningRequestCondition; mod certificate_signing_request_spec; pub use self::certificate_signing_request_spec::CertificateSigningRequestSpec; mod certificate_signing_request_status; pub use self::certificate_signing_request_status::CertificateSigningRequestStatus;
44.714286
146
0.886581
016d2b88fd8cf5b5a0c25bcf62c3aa2d270688cc
1,928
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { # [wasm_bindgen (extends = :: js_sys :: Object , js_name = AesKeyGenParams)] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `AesKeyGenParams` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `AesKeyGenParams`*"] pub type AesKeyGenParams; } impl AesKeyGenParams { #[doc = "Construct a new `AesKeyGenParams`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `AesKeyGenParams`*"] pub fn new(name: &str, length: u16) -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret.name(name); ret.length(length); ret } #[doc = "Change the `name` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `AesKeyGenParams`*"] pub fn name(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("name"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[doc = "Change the `length` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `AesKeyGenParams`*"] pub fn length(&mut self, val: u16) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("length"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } }
37.076923
99
0.587656
feccd3bad8bde9e4b08b5862fca41a39fdb325b8
2,457
// Copyright Cryptape Technologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use cita_types::Address; use std::collections::HashMap; #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct PeersInfo { pub amount: u32, pub peers: Option<HashMap<Address, String>>, #[serde(rename = "errorMessage")] pub error_message: Option<String>, } #[cfg(test)] mod tests { use super::PeersInfo; use cita_types::Address; use serde_json; use std::collections::HashMap; #[test] fn peers_info_serialization_without_error_msg() { let addr1 = Address::random(); let addr2 = Address::random(); let addr3 = Address::random(); let value = json!({ "amount": 3, "peers": { format!("0x{:x}", addr1).to_string(): "12.123.14.53", format!("0x{:x}", addr2).to_string(): "32.52.64.32", format!("0x{:x}", addr3).to_string(): "67.68.32.21", }, "errorMessage": serde_json::Value::Null, }); let mut peers = HashMap::new(); peers.insert(addr1, "12.123.14.53".to_owned()); peers.insert(addr2, "32.52.64.32".to_owned()); peers.insert(addr3, "67.68.32.21".to_owned()); let peers_info = PeersInfo { amount: 3, peers: Some(peers), error_message: None, }; assert_eq!(serde_json::to_value(peers_info).unwrap(), value); } #[test] fn peers_info_serialization_with_error_msg() { let value = json!({ "amount": 0, "peers": serde_json::Value::Null, "errorMessage": "Disabled interface", }); let peers_info = PeersInfo { amount: 0, peers: None, error_message: Some("Disabled interface".to_owned()), }; assert_eq!(serde_json::to_value(peers_info).unwrap(), value); } }
30.333333
75
0.599105
75344e9a91c4fafebadb54e7356c0a129f4679fa
1,898
//! Core I/O traits and combinators when working with Tokio. //! //! A description of the high-level I/O combinators can be [found online] in //! addition to a description of the [low level details]. //! //! [found online]: https://tokio.rs/docs/getting-started/core/ //! [low level details]: https://tokio.rs/docs/going-deeper-tokio/core-low-level/ #![deny(missing_docs, missing_debug_implementations, warnings)] #![doc(html_root_url = "https://docs.rs/tokio-io/0.1")] #[macro_use] extern crate log; #[macro_use] extern crate futures; extern crate bytes; use std::io as std_io; use futures::{Future, Stream}; /// A convenience typedef around a `Future` whose error component is `io::Error` pub type IoFuture<T> = Box<Future<Item = T, Error = std_io::Error> + Send>; /// A convenience typedef around a `Stream` whose error component is `io::Error` pub type IoStream<T> = Box<Stream<Item = T, Error = std_io::Error> + Send>; /// A convenience macro for working with `io::Result<T>` from the `Read` and /// `Write` traits. /// /// This macro takes `io::Result<T>` as input, and returns `T` as the output. If /// the input type is of the `Err` variant, then `Poll::NotReady` is returned if /// it indicates `WouldBlock` or otherwise `Err` is returned. #[macro_export] macro_rules! try_nb { ($e:expr) => (match $e { Ok(t) => t, Err(ref e) if e.kind() == ::std::io::ErrorKind::WouldBlock => { return Ok(::futures::Async::NotReady) } Err(e) => return Err(e.into()), }) } pub mod io; pub mod codec; mod allow_std; mod async_read; mod async_write; mod framed; mod framed_read; mod framed_write; mod length_delimited; mod lines; mod split; mod window; pub use self::async_read::AsyncRead; pub use self::async_write::AsyncWrite; fn _assert_objects() { fn _assert<T>() {} _assert::<Box<AsyncRead>>(); _assert::<Box<AsyncWrite>>(); }
27.911765
81
0.672813
29b891da38851505ca736bd5e1d4164ea0ee42d0
2,728
use std::io::Read; use std::str::FromStr; use crate::reader::*; use xml::reader::{EventReader, XmlEvent}; use super::{Paragraph, Table}; impl FromXML for Header { fn from_xml<R: Read>(reader: R) -> Result<Self, ReaderError> { let mut parser = EventReader::new(reader); let mut header = Self::default(); loop { let e = parser.next(); match e { Ok(XmlEvent::StartElement { attributes, name, .. }) => { let e = XMLElement::from_str(&name.local_name).unwrap(); match e { XMLElement::Paragraph => { let p = Paragraph::read(&mut parser, &attributes)?; header = header.add_paragraph(p); continue; } XMLElement::Table => { let t = Table::read(&mut parser, &attributes)?; header = header.add_table(t); continue; } _ => {} } } Ok(XmlEvent::EndDocument) => break, Err(_) => return Err(ReaderError::XMLReadError), _ => {} } } Ok(header) } } #[test] fn test_header_from_xml() { let xml = r#"<?xml version="1.0" encoding="UTF-8" standalone="yes"?> <w:hdr xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:o="urn:schemas-microsoft-com:office:office" xmlns:v="urn:schemas-microsoft-com:vml" xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main" xmlns:w10="urn:schemas-microsoft-com:office:word" xmlns:wp="http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing" xmlns:wps="http://schemas.microsoft.com/office/word/2010/wordprocessingShape" xmlns:wpg="http://schemas.microsoft.com/office/word/2010/wordprocessingGroup" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:wp14="http://schemas.microsoft.com/office/word/2010/wordprocessingDrawing" xmlns:w14="http://schemas.microsoft.com/office/word/2010/wordml" mc:Ignorable="w14 wp14"> <w:p w14:paraId="12345678"> <w:pPr> <w:rPr /> </w:pPr> <w:r> <w:rPr /> <w:t xml:space="preserve">Hello Header</w:t> </w:r> </w:p> </w:hdr>"#; let h = Header::from_xml(xml.as_bytes()).unwrap(); let expected = Header::new().add_paragraph(Paragraph::new().add_run(Run::new().add_text("Hello Header"))); assert_eq!(h, expected) }
37.888889
99
0.536657
01da64620e08f20f998dc22d961f2d29bfad6297
3,727
// preludes use chrono::prelude::*; use opencv::prelude::*; // opencv imports use opencv::core; use opencv::core::Point; use opencv::core::Rect; use opencv::core::Scalar; use opencv::imgcodecs::imwrite; use opencv::imgproc::put_text; use opencv::imgproc::rectangle; use opencv::imgproc::FONT_HERSHEY_PLAIN; use opencv::imgproc::LINE_AA; use opencv::types::VectorOfi32; use opencv::videoio::VideoCapture; use opencv::videoio::CAP_ANY; use opencv::videoio::CAP_PROP_FRAME_WIDTH; // clap imports use clap::{App, Arg}; // standard imports use std::env; use std::fs::create_dir_all; use std::thread; use std::time::Duration; fn capture(interval: Duration, output_dir: &str) -> opencv::Result<()> { let mut cam = VideoCapture::new(0, CAP_ANY)?; let mut first_frame: bool = true; loop { // read frame data let mut frame = core::Mat::default()?; cam.read(&mut frame)?; // skip the first frame if first_frame { first_frame = false; // give the camera some time to figure out its life thread::sleep(Duration::from_secs(1)); continue; } // ensure the frame actually contains data if frame.size()?.width == 0 { continue; } // set up the output file name let now: DateTime<Local> = Local::now(); let filename = format!("{}/{}.jpg", output_dir, now.timestamp_nanos()); let timestamp = now.to_rfc2822(); let width: i32 = cam.get(CAP_PROP_FRAME_WIDTH).unwrap() as i32; if frame.size()?.width > 0 { // add timestamp text // TODO: adapt text color to time of day (so that black text doesn't get lost in background) rectangle( &mut frame, Rect::new(0, 0, width, 20), Scalar::all(0.0), -1, LINE_AA, 0, )?; put_text( &mut frame, timestamp.as_str(), Point::new(10, 15), FONT_HERSHEY_PLAIN, 1.0, Scalar::all(255.0), 1, LINE_AA, false, )?; println!("printing {}", filename); let params = VectorOfi32::new(); imwrite(filename.as_str(), &frame, &params)?; } thread::sleep(interval); } } fn main() { let current_dir = env::current_dir().unwrap(); let output_dir_default = format!("{}/chronocam-output", current_dir.display()); let matches = App::new("Chronocam") .version("0.1.0") .author("Nick Wood <[email protected]>") .about("A simple security timelapse camera") .arg( Arg::with_name("interval") .short("i") .long("interval") .value_name("DELAY_SECONDS") .help("Specifies delay (in seconds) between photos") .default_value("3") .takes_value(true), ) .arg( Arg::with_name("output") .short("o") .long("output") .value_name("OUTPUT_DIRNAME") .help("The name of the output directory") .default_value(output_dir_default.as_str()) .takes_value(true), ) .get_matches(); let interval = matches.value_of("interval").unwrap(); let output = matches.value_of("output").unwrap(); let interval: u64 = interval.parse::<u64>().unwrap(); // make space for output create_dir_all(output).unwrap(); // start capturing capture(Duration::from_secs(interval), output).unwrap(); }
29.816
104
0.541723
d9ab29335491654686c87148aea026440fa24b0e
4,550
use crate::trace::{ compressed_reader::{CompressedReader, CompressedReaderState}, trace_reader::{resolve_trace_name, TraceReaderBackend}, trace_stream::{substreams_data, Substream, TraceStream}, }; use capnp::{message, message::ReaderOptions, serialize, serialize_packed::read_message}; use std::{ collections::HashMap, io::Read, ops::{Deref, DerefMut}, path::Path, }; impl TraceReaderBackend for TraceReaderFileBackend { fn rewind(&mut self) { for w in self.readers.values_mut() { w.rewind(); } self.global_time = 0; } fn uncompressed_bytes(&self) -> u64 { let mut total: u64 = 0; for w in self.readers.values() { total += w.uncompressed_bytes().unwrap(); } total } fn compressed_bytes(&self) -> u64 { let mut total: u64 = 0; for w in self.readers.values() { total += w.compressed_bytes().unwrap(); } total } fn make_clone(&self) -> Box<dyn TraceReaderBackend> { Box::new(self.clone()) } fn read_message( &mut self, substream: Substream, ) -> Result<message::Reader<serialize::OwnedSegments>, Box<dyn std::error::Error>> { let mut stream = self.reader_mut(substream); match read_message(&mut stream, ReaderOptions::new()) { Ok(res) => Ok(res), Err(e) => Err(Box::new(e)), } } fn read_data_exact( &mut self, substream: Substream, size: usize, ) -> Result<Vec<u8>, Box<dyn std::error::Error>> { let mut buf = vec![0u8; size]; match self.reader_mut(substream).read_exact(&mut buf) { Ok(()) => Ok(buf), Err(e) => Err(Box::new(e)), } } fn at_end(&self, substream: Substream) -> bool { self.reader(substream).at_end() } fn skip( &mut self, substream: Substream, size: usize, ) -> Result<(), Box<dyn std::error::Error>> { match self.reader_mut(substream).skip(size) { Ok(()) => Ok(()), Err(e) => Err(Box::new(e)), } } fn discard_state(&mut self, substream: Substream) { let cr = self.reader_mut(substream); cr.saved_state.take(); } fn save_state(&mut self, substream: Substream) { let cr = self.reader_mut(substream); debug_assert!(cr.saved_state.is_none()); let state = CompressedReaderState { saved_fd_offset: cr.fd_offset, saved_buffer: cr.buffer.clone(), saved_buffer_read_pos: cr.buffer_read_pos, }; cr.saved_state = Some(state); } fn restore_state(&mut self, substream: Substream) { let state = self.reader_mut(substream).saved_state.take().unwrap(); let cr = self.reader_mut(substream); if state.saved_fd_offset < cr.fd_offset { cr.eof = false; } cr.fd_offset = state.saved_fd_offset; cr.buffer = state.saved_buffer; cr.buffer_read_pos = state.saved_buffer_read_pos; } } impl Deref for TraceReaderFileBackend { type Target = TraceStream; fn deref(&self) -> &Self::Target { &self.trace_stream } } impl DerefMut for TraceReaderFileBackend { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.trace_stream } } #[derive(Clone)] pub struct TraceReaderFileBackend { trace_stream: TraceStream, readers: HashMap<Substream, CompressedReader>, } impl TraceReaderFileBackend { pub fn new<T: AsRef<Path>>(maybe_dir: Option<T>) -> TraceReaderFileBackend { // Set the global time at 0, so that when we tick it for the first // event, it matches the initial global time at recording, 1. // We don't know bind_to_cpu right now, will calculate it later and set it let trace_stream = TraceStream::new(&resolve_trace_name(maybe_dir), 0, None); let mut readers: HashMap<Substream, CompressedReader> = HashMap::new(); for s in substreams_data() { readers.insert( s.substream, CompressedReader::new(&trace_stream.path(s.substream)), ); } TraceReaderFileBackend { trace_stream, readers, } } fn reader(&self, s: Substream) -> &CompressedReader { self.readers.get(&s).unwrap() } fn reader_mut(&mut self, s: Substream) -> &mut CompressedReader { self.readers.get_mut(&s).unwrap() } }
28.797468
88
0.587253
1e36f5287a77d9ebb8fe8fef617b5ecf8b6afb6c
669
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(box_syntax)] pub fn main() { let mut i: Box<_> = box 1; // Should be a copy let mut j; j = i.clone(); *i = 2; *j = 3; assert_eq!(*i, 2); assert_eq!(*j, 3); }
29.086957
68
0.660688
03bec52bec17e458fec0aa488becdf72fa719be9
25,684
// Copyright (c) 2020 rust-mysql-simple contributors // // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. All files in the project carrying such notice may not be copied, // modified, or distributed except according to those terms. use std::{ collections::VecDeque, fmt, ops::Deref, sync::{ atomic::{AtomicUsize, Ordering}, Arc, Condvar, Mutex, }, time::Duration as StdDuration, }; use crate::{ conn::query_result::{Binary, Text}, prelude::*, time::{Duration, SteadyTime}, Conn, DriverError, Error, LocalInfileHandler, Opts, Params, QueryResult, Result, Statement, Transaction, TxOpts, }; #[derive(Debug)] struct InnerPool { opts: Opts, pool: VecDeque<Conn>, } impl InnerPool { fn new(min: usize, max: usize, opts: Opts) -> Result<InnerPool> { if min > max || max == 0 { return Err(Error::DriverError(DriverError::InvalidPoolConstraints)); } let mut pool = InnerPool { opts, pool: VecDeque::with_capacity(max), }; for _ in 0..min { pool.new_conn()?; } Ok(pool) } fn new_conn(&mut self) -> Result<()> { match Conn::new(self.opts.clone()) { Ok(conn) => { self.pool.push_back(conn); Ok(()) } Err(err) => Err(err), } } } /// `Pool` serves to provide you with a [`PooledConn`](struct.PooledConn.html)'s. /// However you can prepare statements directly on `Pool` without /// invoking [`Pool::get_conn`](struct.Pool.html#method.get_conn). /// /// `Pool` will hold at least `min` connections and will create as many as `max` /// connections with possible overhead of one connection per alive thread. /// /// Example of multithreaded `Pool` usage: /// /// ```rust /// # mysql::doctest_wrapper!(__result, { /// # use mysql::*; /// # use mysql::prelude::*; /// # let mut conn = Conn::new(get_opts())?; /// let opts = get_opts(); /// let pool = Pool::new(opts).unwrap(); /// let mut threads = Vec::new(); /// /// for _ in 0..100 { /// let pool = pool.clone(); /// threads.push(std::thread::spawn(move || { /// let mut conn = pool.get_conn().unwrap(); /// let result: u8 = conn.query_first("SELECT 1").unwrap().unwrap(); /// assert_eq!(result, 1_u8); /// })); /// } /// /// for t in threads.into_iter() { /// assert!(t.join().is_ok()); /// } /// # }); /// ``` /// /// For more info on how to work with mysql connection please look at /// [`PooledConn`](struct.PooledConn.html) documentation. #[derive(Clone)] pub struct Pool { inner: Arc<(Mutex<InnerPool>, Condvar)>, min: Arc<AtomicUsize>, max: Arc<AtomicUsize>, count: Arc<AtomicUsize>, check_health: bool, use_cache: bool, } impl Pool { /// Will return connection taken from a pool. /// /// Will verify and fix it via `Conn::ping` and `Conn::reset` if `call_ping` is `true`. /// Will try to get concrete connection if `id` is `Some(_)`. /// Will wait til timeout if `timeout_ms` is `Some(_)` fn _get_conn<T: AsRef<str>>( &self, stmt: Option<T>, timeout_ms: Option<u32>, call_ping: bool, ) -> Result<PooledConn> { let times = if let Some(timeout_ms) = timeout_ms { Some(( SteadyTime::now(), Duration::milliseconds(timeout_ms.into()), StdDuration::from_millis(timeout_ms.into()), )) } else { None }; let &(ref inner_pool, ref condvar) = &*self.inner; let conn = if self.use_cache { if let Some(query) = stmt { let mut id = None; let mut pool = inner_pool.lock()?; for (i, conn) in pool.pool.iter().rev().enumerate() { if conn.has_stmt(query.as_ref()) { id = Some(i); break; } } id.and_then(|id| pool.pool.swap_remove_back(id)) } else { None } } else { None }; let mut conn = if let Some(conn) = conn { conn } else { let out_conn; let mut pool = inner_pool.lock()?; loop { if let Some(conn) = pool.pool.pop_front() { drop(pool); out_conn = Some(conn); break; } else if self.count.load(Ordering::Relaxed) < self.max.load(Ordering::Relaxed) { pool.new_conn()?; self.count.fetch_add(1, Ordering::SeqCst); } else { pool = if let Some((start, timeout, std_timeout)) = times { if SteadyTime::now() - start > timeout { return Err(DriverError::Timeout.into()); } condvar.wait_timeout(pool, std_timeout)?.0 } else { condvar.wait(pool)? } } } out_conn.unwrap() }; if call_ping && self.check_health && !conn.ping() { conn.reset()?; } Ok(PooledConn { pool: self.clone(), conn: Some(conn), }) } /// Creates new pool with `min = 10` and `max = 100`. pub fn new<T: Into<Opts>>(opts: T) -> Result<Pool> { Pool::new_manual(10, 100, opts) } /// Same as `new` but you can set `min` and `max`. pub fn new_manual<T: Into<Opts>>(min: usize, max: usize, opts: T) -> Result<Pool> { let pool = InnerPool::new(min, max, opts.into())?; Ok(Pool { inner: Arc::new((Mutex::new(pool), Condvar::new())), min: Arc::new(AtomicUsize::new(min)), max: Arc::new(AtomicUsize::new(max)), count: Arc::new(AtomicUsize::new(min)), use_cache: true, check_health: true, }) } /// A way to turn off searching for cached statement (on by default). /// /// If turned on, then calls to `Pool::{prepare, prep_exec, first_exec}` will search for cached /// statement through all connections in the pool. Useless if the value of the `stmt_cache_size` /// option is 0. pub fn use_cache(&mut self, use_cache: bool) { self.use_cache = use_cache; } /// A way to turn off connection health check on each call to `get_conn` and `prepare` /// (`prep_exec` is not affected) (on by default). pub fn check_health(&mut self, check_health: bool) { self.check_health = check_health; } /// Gives you a [`PooledConn`](struct.PooledConn.html). /// /// `Pool` will check that connection is alive via /// [`Conn::ping`](struct.Conn.html#method.ping) and will /// call [`Conn::reset`](struct.Conn.html#method.reset) if /// necessary. pub fn get_conn(&self) -> Result<PooledConn> { self._get_conn(None::<String>, None, true) } /// Will try to get connection for a duration of `timeout_ms` milliseconds. /// /// # Failure /// This function will return `Error::DriverError(DriverError::Timeout)` if timeout was /// reached while waiting for new connection to become available. pub fn try_get_conn(&self, timeout_ms: u32) -> Result<PooledConn> { self._get_conn(None::<String>, Some(timeout_ms), true) } /// Shortcut for `pool.get_conn()?.start_transaction(..)`. pub fn start_transaction(&self, tx_opts: TxOpts) -> Result<Transaction<'static>> { let conn = self._get_conn(None::<String>, None, false)?; let result = conn.pooled_start_transaction(tx_opts); match result { Ok(trans) => Ok(trans), Err(ref e) if e.is_connectivity_error() => { let conn = self._get_conn(None::<String>, None, true)?; conn.pooled_start_transaction(tx_opts) } Err(e) => Err(e), } } } impl fmt::Debug for Pool { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Pool {{ min: {}, max: {}, count: {} }}", self.min.load(Ordering::Relaxed), self.max.load(Ordering::Relaxed), self.count.load(Ordering::Relaxed) ) } } /// Pooled mysql connection which will return to the pool on `drop`. /// /// You should prefer using `prepare` or `prep_exec` instead of `query` where possible, except /// cases when statement has no params and when it has no return values or return values which /// evaluates to `Value::Bytes`. /// /// `query` is a part of mysql text protocol, so under the hood you will always receive /// `Value::Bytes` as a result and `from_value` will need to parse it if you want, for example, `i64` /// /// ```rust /// # mysql::doctest_wrapper!(__result, { /// # use mysql::*; /// # use mysql::prelude::*; /// # let mut conn = Conn::new(get_opts())?; /// let pool = Pool::new(get_opts()).unwrap(); /// let mut conn = pool.get_conn().unwrap(); /// /// conn.query_first("SELECT 42").map(|result: Option<Value>| { /// let result = result.unwrap(); /// assert_eq!(result, Value::Bytes(b"42".to_vec())); /// assert_eq!(from_value::<i64>(result), 42i64); /// }).unwrap(); /// conn.exec_iter("SELECT 42", ()).map(|mut result| { /// let cell = result.next().unwrap().unwrap().take(0).unwrap(); /// assert_eq!(cell, Value::Int(42i64)); /// assert_eq!(from_value::<i64>(cell), 42i64); /// }).unwrap(); /// # }); /// ``` /// /// For more info on how to work with query results please look at /// [`QueryResult`](../struct.QueryResult.html) documentation. #[derive(Debug)] pub struct PooledConn { pool: Pool, conn: Option<Conn>, } impl Deref for PooledConn { type Target = Conn; fn deref(&self) -> &Self::Target { self.conn.as_ref().expect("deref after drop") } } impl Drop for PooledConn { fn drop(&mut self) { if self.pool.count.load(Ordering::Relaxed) > self.pool.max.load(Ordering::Relaxed) || self.conn.is_none() { self.pool.count.fetch_sub(1, Ordering::SeqCst); } else { self.conn.as_mut().unwrap().set_local_infile_handler(None); let mut pool = (self.pool.inner).0.lock().unwrap(); pool.pool.push_back(self.conn.take().unwrap()); drop(pool); (self.pool.inner).1.notify_one(); } } } impl PooledConn { /// Redirects to /// [`Conn#start_transaction`](struct.Conn.html#method.start_transaction) pub fn start_transaction(&mut self, tx_opts: TxOpts) -> Result<Transaction> { self.conn.as_mut().unwrap().start_transaction(tx_opts) } /// Gives mutable reference to the wrapped /// [`Conn`](struct.Conn.html). pub fn as_mut(&mut self) -> &mut Conn { self.conn.as_mut().unwrap() } /// Gives reference to the wrapped /// [`Conn`](struct.Conn.html). pub fn as_ref(&self) -> &Conn { self.conn.as_ref().unwrap() } /// Unwraps wrapped [`Conn`](struct.Conn.html). pub fn unwrap(mut self) -> Conn { self.conn.take().unwrap() } fn pooled_start_transaction(mut self, tx_opts: TxOpts) -> Result<Transaction<'static>> { self.as_mut()._start_transaction(tx_opts)?; Ok(Transaction::new(self.into())) } /// A way to override default local infile handler for this pooled connection. Destructor will /// restore original handler before returning connection to a pool. /// See [`Conn::set_local_infile_handler`](struct.Conn.html#method.set_local_infile_handler). pub fn set_local_infile_handler(&mut self, handler: Option<LocalInfileHandler>) { self.conn .as_mut() .unwrap() .set_local_infile_handler(handler); } } impl Queryable for PooledConn { fn query_iter<T: AsRef<str>>(&mut self, query: T) -> Result<QueryResult<'_, '_, '_, Text>> { self.conn.as_mut().unwrap().query_iter(query) } fn prep<T: AsRef<str>>(&mut self, query: T) -> Result<Statement> { self.conn.as_mut().unwrap().prep(query) } fn close(&mut self, stmt: Statement) -> Result<()> { self.conn.as_mut().unwrap().close(stmt) } fn exec_iter<S, P>(&mut self, stmt: S, params: P) -> Result<QueryResult<'_, '_, '_, Binary>> where S: AsStatement, P: Into<Params>, { self.conn.as_mut().unwrap().exec_iter(stmt, params) } } #[cfg(test)] #[allow(non_snake_case)] mod test { mod pool { use std::{thread, time::Duration}; use crate::{ from_value, prelude::*, test_misc::get_opts, DriverError, Error, OptsBuilder, Pool, TxOpts, }; #[test] fn multiple_pools_should_work() { let pool = Pool::new(get_opts()).unwrap(); pool.get_conn() .unwrap() .exec_drop("DROP DATABASE IF EXISTS A", ()) .unwrap(); pool.get_conn() .unwrap() .exec_drop("CREATE DATABASE A", ()) .unwrap(); pool.get_conn() .unwrap() .exec_drop("DROP TABLE IF EXISTS A.a", ()) .unwrap(); pool.get_conn() .unwrap() .exec_drop("CREATE TABLE IF NOT EXISTS A.a (id INT)", ()) .unwrap(); pool.get_conn() .unwrap() .exec_drop("INSERT INTO A.a VALUES (1)", ()) .unwrap(); let opts = OptsBuilder::from_opts(get_opts()).db_name(Some("A")); let pool2 = Pool::new(opts).unwrap(); let count: u8 = pool2 .get_conn() .unwrap() .exec_first("SELECT COUNT(*) FROM a", ()) .unwrap() .unwrap(); assert_eq!(1, count); pool.get_conn() .unwrap() .exec_drop("DROP DATABASE A", ()) .unwrap(); } struct A { pool: Pool, x: u32, } impl A { fn add(&mut self) { self.x += 1; } } #[test] fn should_fix_connectivity_errors_on_prepare() { let pool = Pool::new_manual(2, 2, get_opts()).unwrap(); let mut conn = pool.get_conn().unwrap(); let id: u32 = pool .get_conn() .unwrap() .exec_first("SELECT CONNECTION_ID();", ()) .unwrap() .unwrap(); conn.exec_drop("KILL CONNECTION ?", (id,)).unwrap(); thread::sleep(Duration::from_millis(250)); pool.get_conn() .unwrap() .prep("SHOW FULL PROCESSLIST") .unwrap(); } #[test] fn should_fix_connectivity_errors_on_prep_exec() { let pool = Pool::new_manual(2, 2, get_opts()).unwrap(); let mut conn = pool.get_conn().unwrap(); let id: u32 = pool .get_conn() .unwrap() .exec_first("SELECT CONNECTION_ID();", ()) .unwrap() .unwrap(); conn.exec_drop("KILL CONNECTION ?", (id,)).unwrap(); thread::sleep(Duration::from_millis(250)); pool.get_conn() .unwrap() .exec_drop("SHOW FULL PROCESSLIST", ()) .unwrap(); } #[test] fn should_fix_connectivity_errors_on_start_transaction() { let pool = Pool::new_manual(2, 2, get_opts()).unwrap(); let mut conn = pool.get_conn().unwrap(); let id: u32 = pool .get_conn() .unwrap() .exec_first("SELECT CONNECTION_ID();", ()) .unwrap() .unwrap(); conn.exec_drop("KILL CONNECTION ?", (id,)).unwrap(); thread::sleep(Duration::from_millis(250)); pool.start_transaction(TxOpts::default()).unwrap(); } #[test] fn should_execute_queryes_on_PooledConn() { let pool = Pool::new(get_opts()).unwrap(); let mut threads = Vec::new(); for _ in 0usize..10 { let pool = pool.clone(); threads.push(thread::spawn(move || { let conn = pool.get_conn(); assert!(conn.is_ok()); let mut conn = conn.unwrap(); conn.query_drop("SELECT 1").unwrap(); })); } for t in threads.into_iter() { assert!(t.join().is_ok()); } } #[test] fn should_timeout_if_no_connections_available() { let pool = Pool::new_manual(0, 1, get_opts()).unwrap(); let conn1 = pool.try_get_conn(357).unwrap(); let conn2 = pool.try_get_conn(357); assert!(conn2.is_err()); match conn2 { Err(Error::DriverError(DriverError::Timeout)) => assert!(true), _ => assert!(false), } drop(conn1); assert!(pool.try_get_conn(357).is_ok()); } #[test] fn should_execute_statements_on_PooledConn() { let pool = Pool::new(get_opts()).unwrap(); let mut threads = Vec::new(); for _ in 0usize..10 { let pool = pool.clone(); threads.push(thread::spawn(move || { let mut conn = pool.get_conn().unwrap(); let stmt = conn.prep("SELECT 1").unwrap(); conn.exec_drop(&stmt, ()).unwrap(); })); } for t in threads.into_iter() { assert!(t.join().is_ok()); } let pool = Pool::new(get_opts()).unwrap(); let mut threads = Vec::new(); for _ in 0usize..10 { let pool = pool.clone(); threads.push(thread::spawn(move || { let mut conn = pool.get_conn().unwrap(); conn.exec_drop("SELECT ?", (1,)).unwrap(); })); } for t in threads.into_iter() { assert!(t.join().is_ok()); } } #[test] #[allow(unused_variables)] fn should_start_transaction_on_Pool() { let pool = Pool::new_manual(1, 10, get_opts()).unwrap(); pool.get_conn() .unwrap() .query_drop("CREATE TEMPORARY TABLE mysql.tbl(a INT)") .unwrap(); pool.start_transaction(TxOpts::default()) .and_then(|mut t| { t.query_drop("INSERT INTO mysql.tbl(a) VALUES(1)").unwrap(); t.query_drop("INSERT INTO mysql.tbl(a) VALUES(2)").unwrap(); t.commit() }) .unwrap(); assert_eq!( pool.get_conn() .unwrap() .query_first::<u8, _>("SELECT COUNT(a) FROM mysql.tbl") .unwrap() .unwrap(), 2_u8 ); pool.start_transaction(TxOpts::default()) .and_then(|mut t| { t.query_drop("INSERT INTO mysql.tbl(a) VALUES(1)").unwrap(); t.query_drop("INSERT INTO mysql.tbl(a) VALUES(2)").unwrap(); t.rollback() }) .unwrap(); assert_eq!( pool.get_conn() .unwrap() .query_first::<u8, _>("SELECT COUNT(a) FROM mysql.tbl") .unwrap() .unwrap(), 2_u8 ); pool.start_transaction(TxOpts::default()) .and_then(|mut t| { t.query_drop("INSERT INTO mysql.tbl(a) VALUES(1)").unwrap(); t.query_drop("INSERT INTO mysql.tbl(a) VALUES(2)").unwrap(); Ok(()) }) .unwrap(); assert_eq!( pool.get_conn() .unwrap() .query_first::<u8, _>("SELECT COUNT(a) FROM mysql.tbl") .unwrap() .unwrap(), 2_u8 ); let mut a = A { pool, x: 0 }; let transaction = a.pool.start_transaction(TxOpts::default()).unwrap(); a.add(); } #[test] fn should_start_transaction_on_PooledConn() { let pool = Pool::new(get_opts()).unwrap(); let mut conn = pool.get_conn().unwrap(); conn.query_drop("CREATE TEMPORARY TABLE mysql.tbl(a INT)") .unwrap(); conn.start_transaction(TxOpts::default()) .and_then(|mut t| { t.query_drop("INSERT INTO mysql.tbl(a) VALUES(1)").unwrap(); t.query_drop("INSERT INTO mysql.tbl(a) VALUES(2)").unwrap(); t.commit() }) .unwrap(); for x in conn.query_iter("SELECT COUNT(a) FROM mysql.tbl").unwrap() { let mut x = x.unwrap(); assert_eq!(from_value::<u8>(x.take(0).unwrap()), 2u8); } conn.start_transaction(TxOpts::default()) .and_then(|mut t| { t.query_drop("INSERT INTO mysql.tbl(a) VALUES(1)").unwrap(); t.query_drop("INSERT INTO mysql.tbl(a) VALUES(2)").unwrap(); t.rollback() }) .unwrap(); for x in conn.query_iter("SELECT COUNT(a) FROM mysql.tbl").unwrap() { let mut x = x.unwrap(); assert_eq!(from_value::<u8>(x.take(0).unwrap()), 2u8); } conn.start_transaction(TxOpts::default()) .and_then(|mut t| { t.query_drop("INSERT INTO mysql.tbl(a) VALUES(1)").unwrap(); t.query_drop("INSERT INTO mysql.tbl(a) VALUES(2)").unwrap(); Ok(()) }) .unwrap(); for x in conn.query_iter("SELECT COUNT(a) FROM mysql.tbl").unwrap() { let mut x = x.unwrap(); assert_eq!(from_value::<u8>(x.take(0).unwrap()), 2u8); } } #[cfg(feature = "nightly")] mod bench { use test; use std::thread; use crate::{test_misc::get_opts, Pool}; #[bench] fn many_prepares(bencher: &mut test::Bencher) { let pool = Pool::new(get_opts()).unwrap(); bencher.iter(|| { pool.prepare("SELECT 1").unwrap(); }); } #[bench] fn many_prepexecs(bencher: &mut test::Bencher) { let pool = Pool::new(get_opts()).unwrap(); bencher.iter(|| { pool.prep_exec("SELECT 1", ()).unwrap(); }); } #[bench] fn many_prepares_threaded(bencher: &mut test::Bencher) { let pool = Pool::new(get_opts()).unwrap(); bencher.iter(|| { let mut threads = Vec::new(); for _ in 0..4 { let pool = pool.clone(); threads.push(thread::spawn(move || { for _ in 0..250 { test::black_box( pool.prep_exec( "SELECT 1, 'hello world', 123.321, ?, ?, ?", ("hello", "world", 65536), ) .unwrap(), ); } })); } for t in threads { t.join().unwrap(); } }); } #[bench] fn many_prepares_threaded_no_cache(bencher: &mut test::Bencher) { let mut pool = Pool::new(get_opts()).unwrap(); pool.use_cache(false); bencher.iter(|| { let mut threads = Vec::new(); for _ in 0..4 { let pool = pool.clone(); threads.push(thread::spawn(move || { for _ in 0..250 { test::black_box( pool.prep_exec( "SELECT 1, 'hello world', 123.321, ?, ?, ?", ("hello", "world", 65536), ) .unwrap(), ); } })); } for t in threads { t.join().unwrap(); } }); } } } }
34.802168
101
0.48427
d9cc353c66af6655164f59a5f90179e40d93d9a6
19,991
use std::error; use std::ffi::{self, CStr}; use std::fmt; use std::io; use std::str; use curl_sys; /// An error returned from various "easy" operations. /// /// This structure wraps a `CURLcode`. #[derive(Clone, PartialEq)] pub struct Error { code: curl_sys::CURLcode, extra: Option<Box<str>>, } impl Error { /// Creates a new error from the underlying code returned by libcurl. pub fn new(code: curl_sys::CURLcode) -> Error { Error { code: code, extra: None, } } /// Stores some extra information about this error inside this error. /// /// This is typically used with `take_error_buf` on the easy handles to /// couple the extra `CURLOPT_ERRORBUFFER` information with an `Error` being /// returned. pub fn set_extra(&mut self, extra: String) { self.extra = Some(extra.into()); } /// Returns whether this error corresponds to CURLE_UNSUPPORTED_PROTOCOL. pub fn is_unsupported_protocol(&self) -> bool { self.code == curl_sys::CURLE_UNSUPPORTED_PROTOCOL } /// Returns whether this error corresponds to CURLE_FAILED_INIT. pub fn is_failed_init(&self) -> bool { self.code == curl_sys::CURLE_FAILED_INIT } /// Returns whether this error corresponds to CURLE_URL_MALFORMAT. pub fn is_url_malformed(&self) -> bool { self.code == curl_sys::CURLE_URL_MALFORMAT } // /// Returns whether this error corresponds to CURLE_NOT_BUILT_IN. // pub fn is_not_built_in(&self) -> bool { // self.code == curl_sys::CURLE_NOT_BUILT_IN // } /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_PROXY. pub fn is_couldnt_resolve_proxy(&self) -> bool { self.code == curl_sys::CURLE_COULDNT_RESOLVE_PROXY } /// Returns whether this error corresponds to CURLE_COULDNT_RESOLVE_HOST. pub fn is_couldnt_resolve_host(&self) -> bool { self.code == curl_sys::CURLE_COULDNT_RESOLVE_HOST } /// Returns whether this error corresponds to CURLE_COULDNT_CONNECT. pub fn is_couldnt_connect(&self) -> bool { self.code == curl_sys::CURLE_COULDNT_CONNECT } /// Returns whether this error corresponds to CURLE_REMOTE_ACCESS_DENIED. pub fn is_remote_access_denied(&self) -> bool { self.code == curl_sys::CURLE_REMOTE_ACCESS_DENIED } /// Returns whether this error corresponds to CURLE_PARTIAL_FILE. pub fn is_partial_file(&self) -> bool { self.code == curl_sys::CURLE_PARTIAL_FILE } /// Returns whether this error corresponds to CURLE_QUOTE_ERROR. pub fn is_quote_error(&self) -> bool { self.code == curl_sys::CURLE_QUOTE_ERROR } /// Returns whether this error corresponds to CURLE_HTTP_RETURNED_ERROR. pub fn is_http_returned_error(&self) -> bool { self.code == curl_sys::CURLE_HTTP_RETURNED_ERROR } /// Returns whether this error corresponds to CURLE_READ_ERROR. pub fn is_read_error(&self) -> bool { self.code == curl_sys::CURLE_READ_ERROR } /// Returns whether this error corresponds to CURLE_WRITE_ERROR. pub fn is_write_error(&self) -> bool { self.code == curl_sys::CURLE_WRITE_ERROR } /// Returns whether this error corresponds to CURLE_UPLOAD_FAILED. pub fn is_upload_failed(&self) -> bool { self.code == curl_sys::CURLE_UPLOAD_FAILED } /// Returns whether this error corresponds to CURLE_OUT_OF_MEMORY. pub fn is_out_of_memory(&self) -> bool { self.code == curl_sys::CURLE_OUT_OF_MEMORY } /// Returns whether this error corresponds to CURLE_OPERATION_TIMEDOUT. pub fn is_operation_timedout(&self) -> bool { self.code == curl_sys::CURLE_OPERATION_TIMEDOUT } /// Returns whether this error corresponds to CURLE_RANGE_ERROR. pub fn is_range_error(&self) -> bool { self.code == curl_sys::CURLE_RANGE_ERROR } /// Returns whether this error corresponds to CURLE_HTTP_POST_ERROR. pub fn is_http_post_error(&self) -> bool { self.code == curl_sys::CURLE_HTTP_POST_ERROR } /// Returns whether this error corresponds to CURLE_SSL_CONNECT_ERROR. pub fn is_ssl_connect_error(&self) -> bool { self.code == curl_sys::CURLE_SSL_CONNECT_ERROR } /// Returns whether this error corresponds to CURLE_BAD_DOWNLOAD_RESUME. pub fn is_bad_download_resume(&self) -> bool { self.code == curl_sys::CURLE_BAD_DOWNLOAD_RESUME } /// Returns whether this error corresponds to CURLE_FILE_COULDNT_READ_FILE. pub fn is_file_couldnt_read_file(&self) -> bool { self.code == curl_sys::CURLE_FILE_COULDNT_READ_FILE } /// Returns whether this error corresponds to CURLE_FUNCTION_NOT_FOUND. pub fn is_function_not_found(&self) -> bool { self.code == curl_sys::CURLE_FUNCTION_NOT_FOUND } /// Returns whether this error corresponds to CURLE_ABORTED_BY_CALLBACK. pub fn is_aborted_by_callback(&self) -> bool { self.code == curl_sys::CURLE_ABORTED_BY_CALLBACK } /// Returns whether this error corresponds to CURLE_BAD_FUNCTION_ARGUMENT. pub fn is_bad_function_argument(&self) -> bool { self.code == curl_sys::CURLE_BAD_FUNCTION_ARGUMENT } /// Returns whether this error corresponds to CURLE_INTERFACE_FAILED. pub fn is_interface_failed(&self) -> bool { self.code == curl_sys::CURLE_INTERFACE_FAILED } /// Returns whether this error corresponds to CURLE_TOO_MANY_REDIRECTS. pub fn is_too_many_redirects(&self) -> bool { self.code == curl_sys::CURLE_TOO_MANY_REDIRECTS } /// Returns whether this error corresponds to CURLE_UNKNOWN_OPTION. pub fn is_unknown_option(&self) -> bool { self.code == curl_sys::CURLE_UNKNOWN_OPTION } /// Returns whether this error corresponds to CURLE_PEER_FAILED_VERIFICATION. pub fn is_peer_failed_verification(&self) -> bool { self.code == curl_sys::CURLE_PEER_FAILED_VERIFICATION } /// Returns whether this error corresponds to CURLE_GOT_NOTHING. pub fn is_got_nothing(&self) -> bool { self.code == curl_sys::CURLE_GOT_NOTHING } /// Returns whether this error corresponds to CURLE_SSL_ENGINE_NOTFOUND. pub fn is_ssl_engine_notfound(&self) -> bool { self.code == curl_sys::CURLE_SSL_ENGINE_NOTFOUND } /// Returns whether this error corresponds to CURLE_SSL_ENGINE_SETFAILED. pub fn is_ssl_engine_setfailed(&self) -> bool { self.code == curl_sys::CURLE_SSL_ENGINE_SETFAILED } /// Returns whether this error corresponds to CURLE_SEND_ERROR. pub fn is_send_error(&self) -> bool { self.code == curl_sys::CURLE_SEND_ERROR } /// Returns whether this error corresponds to CURLE_RECV_ERROR. pub fn is_recv_error(&self) -> bool { self.code == curl_sys::CURLE_RECV_ERROR } /// Returns whether this error corresponds to CURLE_SSL_CERTPROBLEM. pub fn is_ssl_certproblem(&self) -> bool { self.code == curl_sys::CURLE_SSL_CERTPROBLEM } /// Returns whether this error corresponds to CURLE_SSL_CIPHER. pub fn is_ssl_cipher(&self) -> bool { self.code == curl_sys::CURLE_SSL_CIPHER } /// Returns whether this error corresponds to CURLE_SSL_CACERT. pub fn is_ssl_cacert(&self) -> bool { self.code == curl_sys::CURLE_SSL_CACERT } /// Returns whether this error corresponds to CURLE_BAD_CONTENT_ENCODING. pub fn is_bad_content_encoding(&self) -> bool { self.code == curl_sys::CURLE_BAD_CONTENT_ENCODING } /// Returns whether this error corresponds to CURLE_FILESIZE_EXCEEDED. pub fn is_filesize_exceeded(&self) -> bool { self.code == curl_sys::CURLE_FILESIZE_EXCEEDED } /// Returns whether this error corresponds to CURLE_USE_SSL_FAILED. pub fn is_use_ssl_failed(&self) -> bool { self.code == curl_sys::CURLE_USE_SSL_FAILED } /// Returns whether this error corresponds to CURLE_SEND_FAIL_REWIND. pub fn is_send_fail_rewind(&self) -> bool { self.code == curl_sys::CURLE_SEND_FAIL_REWIND } /// Returns whether this error corresponds to CURLE_SSL_ENGINE_INITFAILED. pub fn is_ssl_engine_initfailed(&self) -> bool { self.code == curl_sys::CURLE_SSL_ENGINE_INITFAILED } /// Returns whether this error corresponds to CURLE_LOGIN_DENIED. pub fn is_login_denied(&self) -> bool { self.code == curl_sys::CURLE_LOGIN_DENIED } /// Returns whether this error corresponds to CURLE_CONV_FAILED. pub fn is_conv_failed(&self) -> bool { self.code == curl_sys::CURLE_CONV_FAILED } /// Returns whether this error corresponds to CURLE_CONV_REQD. pub fn is_conv_required(&self) -> bool { self.code == curl_sys::CURLE_CONV_REQD } /// Returns whether this error corresponds to CURLE_SSL_CACERT_BADFILE. pub fn is_ssl_cacert_badfile(&self) -> bool { self.code == curl_sys::CURLE_SSL_CACERT_BADFILE } /// Returns whether this error corresponds to CURLE_SSL_CRL_BADFILE. pub fn is_ssl_crl_badfile(&self) -> bool { self.code == curl_sys::CURLE_SSL_CRL_BADFILE } /// Returns whether this error corresponds to CURLE_SSL_SHUTDOWN_FAILED. pub fn is_ssl_shutdown_failed(&self) -> bool { self.code == curl_sys::CURLE_SSL_SHUTDOWN_FAILED } /// Returns whether this error corresponds to CURLE_AGAIN. pub fn is_again(&self) -> bool { self.code == curl_sys::CURLE_AGAIN } /// Returns whether this error corresponds to CURLE_SSL_ISSUER_ERROR. pub fn is_ssl_issuer_error(&self) -> bool { self.code == curl_sys::CURLE_SSL_ISSUER_ERROR } /// Returns whether this error corresponds to CURLE_CHUNK_FAILED. pub fn is_chunk_failed(&self) -> bool { self.code == curl_sys::CURLE_CHUNK_FAILED } /// Returns whether this error corresponds to CURLE_HTTP2. pub fn is_http2_error(&self) -> bool { self.code == curl_sys::CURLE_HTTP2 } /// Returns whether this error corresponds to CURLE_HTTP2_STREAM. pub fn is_http2_stream_error(&self) -> bool { self.code == curl_sys::CURLE_HTTP2_STREAM } // /// Returns whether this error corresponds to CURLE_NO_CONNECTION_AVAILABLE. // pub fn is_no_connection_available(&self) -> bool { // self.code == curl_sys::CURLE_NO_CONNECTION_AVAILABLE // } /// Returns the value of the underlying error corresponding to libcurl. pub fn code(&self) -> curl_sys::CURLcode { self.code } /// Returns the extra description of this error, if any is available. pub fn extra_description(&self) -> Option<&str> { self.extra.as_ref().map(|s| &**s) } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let desc = error::Error::description(self); match self.extra { Some(ref s) => write!(f, "[{}] {} ({})", self.code(), desc, s), None => write!(f, "[{}] {}", self.code(), desc), } } } impl fmt::Debug for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Error") .field("description", &error::Error::description(self)) .field("code", &self.code) .field("extra", &self.extra) .finish() } } impl error::Error for Error { fn description(&self) -> &str { unsafe { let s = curl_sys::curl_easy_strerror(self.code); assert!(!s.is_null()); str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() } } } /// An error returned from "share" operations. /// /// This structure wraps a `CURLSHcode`. #[derive(Clone, PartialEq)] pub struct ShareError { code: curl_sys::CURLSHcode, } impl ShareError { /// Creates a new error from the underlying code returned by libcurl. pub fn new(code: curl_sys::CURLSHcode) -> ShareError { ShareError { code: code } } /// Returns whether this error corresponds to CURLSHE_BAD_OPTION. pub fn is_bad_option(&self) -> bool { self.code == curl_sys::CURLSHE_BAD_OPTION } /// Returns whether this error corresponds to CURLSHE_IN_USE. pub fn is_in_use(&self) -> bool { self.code == curl_sys::CURLSHE_IN_USE } /// Returns whether this error corresponds to CURLSHE_INVALID. pub fn is_invalid(&self) -> bool { self.code == curl_sys::CURLSHE_INVALID } /// Returns whether this error corresponds to CURLSHE_NOMEM. pub fn is_nomem(&self) -> bool { self.code == curl_sys::CURLSHE_NOMEM } // /// Returns whether this error corresponds to CURLSHE_NOT_BUILT_IN. // pub fn is_not_built_in(&self) -> bool { // self.code == curl_sys::CURLSHE_NOT_BUILT_IN // } /// Returns the value of the underlying error corresponding to libcurl. pub fn code(&self) -> curl_sys::CURLSHcode { self.code } } impl fmt::Display for ShareError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { error::Error::description(self).fmt(f) } } impl fmt::Debug for ShareError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "ShareError {{ description: {:?}, code: {} }}", error::Error::description(self), self.code ) } } impl error::Error for ShareError { fn description(&self) -> &str { unsafe { let s = curl_sys::curl_share_strerror(self.code); assert!(!s.is_null()); str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() } } } /// An error from "multi" operations. /// /// THis structure wraps a `CURLMcode`. #[derive(Clone, PartialEq)] pub struct MultiError { code: curl_sys::CURLMcode, } impl MultiError { /// Creates a new error from the underlying code returned by libcurl. pub fn new(code: curl_sys::CURLMcode) -> MultiError { MultiError { code: code } } /// Returns whether this error corresponds to CURLM_BAD_HANDLE. pub fn is_bad_handle(&self) -> bool { self.code == curl_sys::CURLM_BAD_HANDLE } /// Returns whether this error corresponds to CURLM_BAD_EASY_HANDLE. pub fn is_bad_easy_handle(&self) -> bool { self.code == curl_sys::CURLM_BAD_EASY_HANDLE } /// Returns whether this error corresponds to CURLM_OUT_OF_MEMORY. pub fn is_out_of_memory(&self) -> bool { self.code == curl_sys::CURLM_OUT_OF_MEMORY } /// Returns whether this error corresponds to CURLM_INTERNAL_ERROR. pub fn is_internal_error(&self) -> bool { self.code == curl_sys::CURLM_INTERNAL_ERROR } /// Returns whether this error corresponds to CURLM_BAD_SOCKET. pub fn is_bad_socket(&self) -> bool { self.code == curl_sys::CURLM_BAD_SOCKET } /// Returns whether this error corresponds to CURLM_UNKNOWN_OPTION. pub fn is_unknown_option(&self) -> bool { self.code == curl_sys::CURLM_UNKNOWN_OPTION } /// Returns whether this error corresponds to CURLM_CALL_MULTI_PERFORM. pub fn is_call_perform(&self) -> bool { self.code == curl_sys::CURLM_CALL_MULTI_PERFORM } // /// Returns whether this error corresponds to CURLM_ADDED_ALREADY. // pub fn is_added_already(&self) -> bool { // self.code == curl_sys::CURLM_ADDED_ALREADY // } /// Returns the value of the underlying error corresponding to libcurl. pub fn code(&self) -> curl_sys::CURLMcode { self.code } } impl fmt::Display for MultiError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { error::Error::description(self).fmt(f) } } impl fmt::Debug for MultiError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "MultiError {{ description: {:?}, code: {} }}", error::Error::description(self), self.code ) } } impl error::Error for MultiError { fn description(&self) -> &str { unsafe { let s = curl_sys::curl_multi_strerror(self.code); assert!(!s.is_null()); str::from_utf8(CStr::from_ptr(s).to_bytes()).unwrap() } } } /// An error from "form add" operations. /// /// THis structure wraps a `CURLFORMcode`. #[derive(Clone, PartialEq)] pub struct FormError { code: curl_sys::CURLFORMcode, } impl FormError { /// Creates a new error from the underlying code returned by libcurl. pub fn new(code: curl_sys::CURLFORMcode) -> FormError { FormError { code: code } } /// Returns whether this error corresponds to CURL_FORMADD_MEMORY. pub fn is_memory(&self) -> bool { self.code == curl_sys::CURL_FORMADD_MEMORY } /// Returns whether this error corresponds to CURL_FORMADD_OPTION_TWICE. pub fn is_option_twice(&self) -> bool { self.code == curl_sys::CURL_FORMADD_OPTION_TWICE } /// Returns whether this error corresponds to CURL_FORMADD_NULL. pub fn is_null(&self) -> bool { self.code == curl_sys::CURL_FORMADD_NULL } /// Returns whether this error corresponds to CURL_FORMADD_UNKNOWN_OPTION. pub fn is_unknown_option(&self) -> bool { self.code == curl_sys::CURL_FORMADD_UNKNOWN_OPTION } /// Returns whether this error corresponds to CURL_FORMADD_INCOMPLETE. pub fn is_incomplete(&self) -> bool { self.code == curl_sys::CURL_FORMADD_INCOMPLETE } /// Returns whether this error corresponds to CURL_FORMADD_ILLEGAL_ARRAY. pub fn is_illegal_array(&self) -> bool { self.code == curl_sys::CURL_FORMADD_ILLEGAL_ARRAY } /// Returns whether this error corresponds to CURL_FORMADD_DISABLED. pub fn is_disabled(&self) -> bool { self.code == curl_sys::CURL_FORMADD_DISABLED } /// Returns the value of the underlying error corresponding to libcurl. pub fn code(&self) -> curl_sys::CURLFORMcode { self.code } } impl fmt::Display for FormError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { error::Error::description(self).fmt(f) } } impl fmt::Debug for FormError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "FormError {{ description: {:?}, code: {} }}", error::Error::description(self), self.code ) } } impl error::Error for FormError { fn description(&self) -> &str { match self.code { curl_sys::CURL_FORMADD_MEMORY => "allocation failure", curl_sys::CURL_FORMADD_OPTION_TWICE => "one option passed twice", curl_sys::CURL_FORMADD_NULL => "null pointer given for string", curl_sys::CURL_FORMADD_UNKNOWN_OPTION => "unknown option", curl_sys::CURL_FORMADD_INCOMPLETE => "form information not complete", curl_sys::CURL_FORMADD_ILLEGAL_ARRAY => "illegal array in option", curl_sys::CURL_FORMADD_DISABLED => { "libcurl does not have support for this option compiled in" } _ => "unknown form error", } } } impl From<ffi::NulError> for Error { fn from(_: ffi::NulError) -> Error { Error { code: curl_sys::CURLE_CONV_FAILED, extra: None, } } } impl From<Error> for io::Error { fn from(e: Error) -> io::Error { io::Error::new(io::ErrorKind::Other, e) } } impl From<ShareError> for io::Error { fn from(e: ShareError) -> io::Error { io::Error::new(io::ErrorKind::Other, e) } } impl From<MultiError> for io::Error { fn from(e: MultiError) -> io::Error { io::Error::new(io::ErrorKind::Other, e) } } impl From<FormError> for io::Error { fn from(e: FormError) -> io::Error { io::Error::new(io::ErrorKind::Other, e) } }
32.139871
83
0.651543
d68661f87e92312fc7e7b489862c655e7234d9e4
5,107
//! RTC driver use kernel::common::cells::OptionalCell; use kernel::common::regs::{ReadOnly, ReadWrite}; use kernel::common::StaticRef; use kernel::hil::time::{self, Alarm, Frequency, Time}; #[repr(C)] struct RtcRegisters { ctl: ReadWrite<u32, Control::Register>, // Event flags evflags: ReadWrite<u32, EvFlags::Register>, // Integer part sec: ReadWrite<u32>, // Fractional part (1/32kHz parts of a second) subsec: ReadOnly<u32>, _subsec_inc: ReadOnly<u32>, channel_ctl: ReadWrite<u32, ChannelControl::Register>, _channel0_cmp: ReadOnly<u32>, channel1_cmp: ReadWrite<u32>, _channel2_cmp: ReadOnly<u32>, _channel2_cmp_inc: ReadOnly<u32>, _channel1_capture: ReadOnly<u32>, // A read request to the sync register will not return // until all outstanding writes have properly propagated to the RTC domain sync: ReadOnly<u32>, } register_bitfields![ u32, Control [ COMB_EV_MASK OFFSET(16) NUMBITS(3) [ NoEvent = 0b00, Channel0 = 0b01, Channel1 = 0b10, Channel2 = 0b11 ], RESET OFFSET(7) NUMBITS(1) [], RTC_UPD_EN OFFSET(1) NUMBITS(1) [], ENABLE OFFSET(0) NUMBITS(1) [] ], EvFlags [ CH2 OFFSET(16) NUMBITS(1) [], CH1 OFFSET(8) NUMBITS(1) [], CH0 OFFSET(0) NUMBITS(1) [] ], ChannelControl [ CH2_CONT_EN OFFSET(18) NUMBITS(1) [], CH2_EN OFFSET(16) NUMBITS(1) [], CH1_CAPT_EN OFFSET(9) NUMBITS(1) [], CH1_EN OFFSET(8) NUMBITS(1) [], CH0_EN OFFSET(0) NUMBITS(1) [] ] ]; const RTC_BASE: StaticRef<RtcRegisters> = unsafe { StaticRef::new(0x40092000 as *const RtcRegisters) }; pub struct Rtc { registers: StaticRef<RtcRegisters>, callback: OptionalCell<&'static time::Client>, } pub static mut RTC: Rtc = Rtc::new(); impl Rtc { const fn new() -> Rtc { Rtc { registers: RTC_BASE, callback: OptionalCell::empty(), } } pub fn start(&self) { let regs = &*self.registers; regs.ctl.write(Control::ENABLE::SET); regs.sync.get(); } pub fn stop(&self) { let regs = &*self.registers; regs.ctl.write(Control::ENABLE::CLEAR); regs.sync.get(); } // This method is used by the RAT to sync the radio and MCU clocks when changing power modes pub fn sync(&self) { let regs = &*self.registers; regs.sync.get(); } fn read_counter(&self) -> u32 { let regs = &*self.registers; /* SEC can change during the SUBSEC read, so we need to be certain that the SUBSEC we read belong to the correct SEC counterpart. */ let mut current_sec: u32 = 0; let mut current_subsec: u32 = 0; let mut after_subsec_read: u32 = 1; while current_sec != after_subsec_read { current_sec = regs.sec.get(); current_subsec = regs.subsec.get(); after_subsec_read = regs.sec.get(); } return (current_sec << 16) | (current_subsec >> 16); } pub fn is_running(&self) -> bool { let regs = &*self.registers; regs.channel_ctl.read(ChannelControl::CH1_EN) != 0 } pub fn handle_interrupt(&self) { let regs = &*self.registers; // Event flag is cleared when you set it regs.evflags.write(EvFlags::CH1::SET); regs.ctl.modify(Control::COMB_EV_MASK::NoEvent); regs.channel_ctl.modify(ChannelControl::CH1_EN::CLEAR); regs.sync.get(); self.callback.map(|cb| cb.fired()); } pub fn set_client(&self, client: &'static time::Client) { self.callback.set(client); } pub fn set_upd_en(&self, value: bool) { let regs = &*self.registers; if value { regs.ctl.set(regs.ctl.get() | 0x02); } else { regs.ctl.set(regs.ctl.get() & !0x02); } } } pub struct RtcFreq(()); impl Frequency for RtcFreq { // The RTC Frequency is tuned, as there is exactly 0xFFFF (64kHz) // subsec increments to reach a second, this yields the correct // `tics` to set the comparator correctly. fn frequency() -> u32 { 0xFFFF } } impl Time for Rtc { type Frequency = RtcFreq; fn disable(&self) { let regs = &*self.registers; regs.ctl.modify(Control::COMB_EV_MASK::NoEvent); regs.channel_ctl.modify(ChannelControl::CH1_EN::CLEAR); regs.sync.get(); } fn is_armed(&self) -> bool { self.is_running() } } impl Alarm for Rtc { fn now(&self) -> u32 { self.read_counter() } fn set_alarm(&self, tics: u32) { let regs = &*self.registers; regs.ctl.modify(Control::COMB_EV_MASK::Channel1); regs.channel1_cmp.set(tics); regs.channel_ctl.modify(ChannelControl::CH1_EN::SET); regs.sync.get(); } fn get_alarm(&self) -> u32 { let regs = &*self.registers; regs.channel1_cmp.get() } }
25.923858
96
0.58273
76625d3f918cd355b84305a1862c13c71b013f44
7,207
import dvec::{dvec, extensions}; export filename; export filemap; export span; export file_substr; export fss_none; export fss_internal; export fss_external; export codemap; export expn_info; export expn_info_; export expanded_from; export new_filemap; export new_filemap_w_substr; export mk_substr_filename; export lookup_char_pos; export lookup_char_pos_adj; export adjust_span; export span_to_str; export span_to_filename; export span_to_lines; export file_lines; export get_line; export next_line; export span_to_snippet; export loc; export get_filemap; export new_codemap; type filename = ~str; type file_pos = {ch: uint, byte: uint}; /* A codemap is a thing that maps uints to file/line/column positions * in a crate. This to make it possible to represent the positions * with single-word things, rather than passing records all over the * compiler. */ enum file_substr { fss_none, fss_internal(span), fss_external({filename: ~str, line: uint, col: uint}) } type filemap = @{name: filename, substr: file_substr, src: @~str, start_pos: file_pos, mut lines: ~[file_pos]}; type codemap = @{files: dvec<filemap>}; type loc = {file: filemap, line: uint, col: uint}; fn new_codemap() -> codemap { @{files: dvec()} } fn new_filemap_w_substr(+filename: filename, +substr: file_substr, src: @~str, start_pos_ch: uint, start_pos_byte: uint) -> filemap { ret @{name: filename, substr: substr, src: src, start_pos: {ch: start_pos_ch, byte: start_pos_byte}, mut lines: ~[{ch: start_pos_ch, byte: start_pos_byte}]}; } fn new_filemap(+filename: filename, src: @~str, start_pos_ch: uint, start_pos_byte: uint) -> filemap { ret new_filemap_w_substr(filename, fss_none, src, start_pos_ch, start_pos_byte); } fn mk_substr_filename(cm: codemap, sp: span) -> ~str { let pos = lookup_char_pos(cm, sp.lo); ret #fmt("<%s:%u:%u>", pos.file.name, pos.line, pos.col); } fn next_line(file: filemap, chpos: uint, byte_pos: uint) { vec::push(file.lines, {ch: chpos, byte: byte_pos + file.start_pos.byte}); } type lookup_fn = pure fn(file_pos) -> uint; fn lookup_line(map: codemap, pos: uint, lookup: lookup_fn) -> {fm: filemap, line: uint} { let len = map.files.len(); let mut a = 0u; let mut b = len; while b - a > 1u { let m = (a + b) / 2u; if lookup(map.files[m].start_pos) > pos { b = m; } else { a = m; } } if (a >= len) { fail #fmt("position %u does not resolve to a source location", pos) } let f = map.files[a]; a = 0u; b = vec::len(f.lines); while b - a > 1u { let m = (a + b) / 2u; if lookup(f.lines[m]) > pos { b = m; } else { a = m; } } ret {fm: f, line: a}; } fn lookup_pos(map: codemap, pos: uint, lookup: lookup_fn) -> loc { let {fm: f, line: a} = lookup_line(map, pos, lookup); ret {file: f, line: a + 1u, col: pos - lookup(f.lines[a])}; } fn lookup_char_pos(map: codemap, pos: uint) -> loc { pure fn lookup(pos: file_pos) -> uint { ret pos.ch; } ret lookup_pos(map, pos, lookup); } fn lookup_byte_pos(map: codemap, pos: uint) -> loc { pure fn lookup(pos: file_pos) -> uint { ret pos.byte; } ret lookup_pos(map, pos, lookup); } fn lookup_char_pos_adj(map: codemap, pos: uint) -> {filename: ~str, line: uint, col: uint, file: option<filemap>} { let loc = lookup_char_pos(map, pos); alt (loc.file.substr) { fss_none { {filename: /* FIXME (#2543) */ copy loc.file.name, line: loc.line, col: loc.col, file: some(loc.file)} } fss_internal(sp) { lookup_char_pos_adj(map, sp.lo + (pos - loc.file.start_pos.ch)) } fss_external(eloc) { {filename: /* FIXME (#2543) */ copy eloc.filename, line: eloc.line + loc.line - 1u, col: if loc.line == 1u {eloc.col + loc.col} else {loc.col}, file: none} } } } fn adjust_span(map: codemap, sp: span) -> span { pure fn lookup(pos: file_pos) -> uint { ret pos.ch; } let line = lookup_line(map, sp.lo, lookup); alt (line.fm.substr) { fss_none {sp} fss_internal(s) { adjust_span(map, {lo: s.lo + (sp.lo - line.fm.start_pos.ch), hi: s.lo + (sp.hi - line.fm.start_pos.ch), expn_info: sp.expn_info})} fss_external(_) {sp} } } enum expn_info_ { expanded_from({call_site: span, callie: {name: ~str, span: option<span>}}) } type expn_info = option<@expn_info_>; type span = {lo: uint, hi: uint, expn_info: expn_info}; fn span_to_str_no_adj(sp: span, cm: codemap) -> ~str { let lo = lookup_char_pos(cm, sp.lo); let hi = lookup_char_pos(cm, sp.hi); ret #fmt("%s:%u:%u: %u:%u", lo.file.name, lo.line, lo.col, hi.line, hi.col) } fn span_to_str(sp: span, cm: codemap) -> ~str { let lo = lookup_char_pos_adj(cm, sp.lo); let hi = lookup_char_pos_adj(cm, sp.hi); ret #fmt("%s:%u:%u: %u:%u", lo.filename, lo.line, lo.col, hi.line, hi.col) } type file_lines = {file: filemap, lines: ~[uint]}; fn span_to_filename(sp: span, cm: codemap::codemap) -> filename { let lo = lookup_char_pos(cm, sp.lo); ret /* FIXME (#2543) */ copy lo.file.name; } fn span_to_lines(sp: span, cm: codemap::codemap) -> @file_lines { let lo = lookup_char_pos(cm, sp.lo); let hi = lookup_char_pos(cm, sp.hi); let mut lines = ~[]; for uint::range(lo.line - 1u, hi.line as uint) |i| { vec::push(lines, i); }; ret @{file: lo.file, lines: lines}; } fn get_line(fm: filemap, line: int) -> ~str unsafe { let begin: uint = fm.lines[line].byte - fm.start_pos.byte; let end = alt str::find_char_from(*fm.src, '\n', begin) { some(e) { e } none { str::len(*fm.src) } }; str::slice(*fm.src, begin, end) } fn lookup_byte_offset(cm: codemap::codemap, chpos: uint) -> {fm: filemap, pos: uint} { pure fn lookup(pos: file_pos) -> uint { ret pos.ch; } let {fm, line} = lookup_line(cm, chpos, lookup); let line_offset = fm.lines[line].byte - fm.start_pos.byte; let col = chpos - fm.lines[line].ch; let col_offset = str::count_bytes(*fm.src, line_offset, col); {fm: fm, pos: line_offset + col_offset} } fn span_to_snippet(sp: span, cm: codemap::codemap) -> ~str { let begin = lookup_byte_offset(cm, sp.lo); let end = lookup_byte_offset(cm, sp.hi); assert begin.fm == end.fm; ret str::slice(*begin.fm.src, begin.pos, end.pos); } fn get_snippet(cm: codemap::codemap, fidx: uint, lo: uint, hi: uint) -> ~str { let fm = cm.files[fidx]; ret str::slice(*fm.src, lo, hi) } fn get_filemap(cm: codemap, filename: ~str) -> filemap { for cm.files.each |fm| { if fm.name == filename { ret fm; } } //XXjdm the following triggers a mismatched type bug // (or expected function, found _|_) fail; // ("asking for " + filename + " which we don't know about"); } // // Local Variables: // mode: rust // fill-column: 78; // indent-tabs-mode: nil // c-basic-offset: 4 // buffer-file-coding-system: utf-8-unix // End: //
29.416327
77
0.611766
0e7cbd26d119b6987d0551756c687db6a508a600
1,424
use std::collections::HashMap; use crate::{string::ObjString, value::Value}; #[derive(Debug)] pub struct Table { pub entries: HashMap<u64, (*const ObjString, Value)>, } impl Table { pub fn new() -> Self { Table { entries: HashMap::new(), } } pub fn set(&mut self, key: *const ObjString, value: Value) -> bool { // println!("table.set({}, {})", hash(key), value); self.entries.insert(hash(key), (key, value)).is_none() } pub fn get(&self, key: *const ObjString) -> Option<Value> { let value = self.entries.get(&hash(key)).map(|(_, v)| *v); // println!("table.get({}) = {:?}", hash(key), value); value } pub fn add_all(&mut self, other: &Table) { self.entries.extend(other.entries.iter()); } pub fn delete(&mut self, key: *const ObjString) -> bool { self.entries.remove(&hash(key)).is_some() } pub fn remove_white_strings(&mut self) { let mut to_remove = Vec::new(); for (h, (_, v)) in &self.entries { if v.is_obj() { if !v.as_obj().is_marked { to_remove.push(*h); } } } for hash in to_remove { self.entries.remove(&hash); } } } fn hash(key: *const ObjString) -> u64 { let s = unsafe { key.as_ref() }.expect("non-null string key"); s.hash() }
24.982456
72
0.518961
6abd29f359bdac75aca889555f75095beb707686
802
#![no_std] #![no_main] #![feature(custom_test_frameworks)] #![test_runner(blog_os::test_runner)] //test会生成main函数,调用crate::test_runner #![reexport_test_harness_main = "test_main"] // 给test生成的main函数改个名字,在_start()调用它 use core::panic::PanicInfo; use blog_os::println; #[no_mangle] pub extern "C" fn _start() -> ! { println!("Hello World{}", "!"); #[cfg(test)] test_main(); loop {} } /// This function is called on panic. // our existing panic handler #[cfg(not(test))] // new attribute #[panic_handler] fn panic(info: &PanicInfo) -> ! { println!("{}", info); loop {} } // our panic handler in test mode #[cfg(test)] #[panic_handler] fn panic(info: &PanicInfo) -> ! { blog_os::test_panic_handler(info) } #[test_case] //一个测试 fn test_assertion() { assert_eq!(1,1); }
19.095238
79
0.649626