hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
e5084b852ea54bda9f5fe37b79f14b534d8440c5
4,607
// Copyright (c) The Diem Core Contributors // Copyright (c) The Move Contributors // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, Result}; use move_command_line_common::files::MOVE_ERROR_DESC_EXTENSION; use move_core_types::{ account_address::AccountAddress, errmap::{ErrorDescription, ErrorMapping}, identifier::Identifier, language_storage::ModuleId, }; use move_model::{ ast::Value, model::{GlobalEnv, ModuleEnv, NamedConstantEnv}, symbol::Symbol, }; use serde::{Deserialize, Serialize}; use std::{convert::TryFrom, rc::Rc}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ErrmapOptions { /// The constant prefix that determines if a constant is an error or not pub error_prefix: String, /// The module ID of the error category module pub error_category_module: ModuleId, /// In which file to store the output pub output_file: String, } impl Default for ErrmapOptions { fn default() -> Self { Self { error_prefix: "E".to_string(), error_category_module: ModuleId::new( AccountAddress::from_hex_literal("0x1").unwrap(), Identifier::new("Errors").unwrap(), ), output_file: MOVE_ERROR_DESC_EXTENSION.to_string(), } } } pub struct ErrmapGen<'env> { /// Options for error map generation options: &'env ErrmapOptions, /// Input definitions env: &'env GlobalEnv, /// Output error mapping output: ErrorMapping, } impl<'env> ErrmapGen<'env> { pub fn new(env: &'env GlobalEnv, options: &'env ErrmapOptions) -> Self { Self { options, env, output: ErrorMapping::default(), } } pub fn save_result(&self) { self.output.to_file(&self.options.output_file); } pub fn gen(&mut self) { for module in self.env.get_modules() { if !module.is_script_module() { self.build_error_map(&module).unwrap() } } } fn build_error_map(&mut self, module: &ModuleEnv<'_>) -> Result<()> { let module_id = self.get_module_id_for_name(module); if module_id == self.options.error_category_module { self.build_error_categories(module)? } else { self.build_error_map_for_module(&module_id, module)? } Ok(()) } fn build_error_categories(&mut self, module: &ModuleEnv<'_>) -> Result<()> { for named_constant in module.get_named_constants() { let name = self.name_string(named_constant.get_name()); let error_category = self.get_abort_code(&named_constant)?; self.output.add_error_category( error_category, ErrorDescription { code_name: name.to_string(), code_description: named_constant.get_doc().to_string(), }, )? } Ok(()) } fn build_error_map_for_module( &mut self, module_id: &ModuleId, module: &ModuleEnv<'_>, ) -> Result<()> { for named_constant in module.get_named_constants() { let name = self.name_string(named_constant.get_name()); if name.starts_with(&self.options.error_prefix) { let abort_code = self.get_abort_code(&named_constant)?; self.output.add_module_error( module_id.clone(), abort_code, ErrorDescription { code_name: name.to_string(), code_description: named_constant.get_doc().to_string(), }, )? } } Ok(()) } fn get_abort_code(&self, constant: &NamedConstantEnv<'_>) -> Result<u64> { match constant.get_value() { Value::Number(big_int) => u64::try_from(big_int).map_err(|err| err.into()), x => bail!( "Invalid abort code constant {} found for code {}", x, self.name_string(constant.get_name()) ), } } fn get_module_id_for_name(&self, module: &ModuleEnv<'_>) -> ModuleId { let name = module.get_name(); let addr = AccountAddress::from_hex_literal(&format!("0x{:x}", name.addr())).unwrap(); let name = Identifier::new(self.name_string(name.name()).to_string()).unwrap(); ModuleId::new(addr, name) } fn name_string(&self, symbol: Symbol) -> Rc<String> { self.env.symbol_pool().string(symbol) } }
32.216783
94
0.580204
615a2a36cf2d42f38159eadb2e47442c5cde48c3
2,410
#[doc = "Reader of register INTENCLR"] pub type R = crate::R<u8, super::INTENCLR>; #[doc = "Writer for register INTENCLR"] pub type W = crate::W<u8, super::INTENCLR>; #[doc = "Register INTENCLR `reset()`'s with value 0"] impl crate::ResetValue for super::INTENCLR { type Type = u8; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `SINGLEE`"] pub type SINGLEE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `SINGLEE`"] pub struct SINGLEE_W<'a> { w: &'a mut W, } impl<'a> SINGLEE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u8) & 0x01); self.w } } #[doc = "Reader of field `DUALE`"] pub type DUALE_R = crate::R<bool, bool>; #[doc = "Write proxy for field `DUALE`"] pub struct DUALE_W<'a> { w: &'a mut W, } impl<'a> DUALE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u8) & 0x01) << 1); self.w } } impl R { #[doc = "Bit 0 - Single Bit ECC Error Interrupt Enable Clear"] #[inline(always)] pub fn singlee(&self) -> SINGLEE_R { SINGLEE_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Dual Bit ECC Error Interrupt Enable Clear"] #[inline(always)] pub fn duale(&self) -> DUALE_R { DUALE_R::new(((self.bits >> 1) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Single Bit ECC Error Interrupt Enable Clear"] #[inline(always)] pub fn singlee(&mut self) -> SINGLEE_W { SINGLEE_W { w: self } } #[doc = "Bit 1 - Dual Bit ECC Error Interrupt Enable Clear"] #[inline(always)] pub fn duale(&mut self) -> DUALE_W { DUALE_W { w: self } } }
28.352941
83
0.554357
ac692086f26367b4472dc98ce3b7ad4993935c45
55,908
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{ capability::{CapabilityProvider, CapabilitySource}, model::{ addable_directory::AddableDirectoryWithResult, component::{StartReason, WeakComponentInstance}, dir_tree::{DirTree, DirTreeCapability}, error::ModelError, hooks::{Event, EventPayload, EventType, Hook, HooksRegistration, RuntimeInfo}, lifecycle_controller::LifecycleController, lifecycle_controller_factory::LifecycleControllerFactory, routing_fns::{route_expose_fn, route_use_fn}, }, }, ::routing::capability_source::InternalCapability, async_trait::async_trait, cm_moniker::InstancedAbsoluteMoniker, cm_rust::{CapabilityPath, ComponentDecl}, cm_task_scope::TaskScope, cm_util::channel, config_encoder::ConfigFields, fidl::endpoints::{ProtocolMarker, ServerEnd}, fidl_fuchsia_io::{DirectoryProxy, NodeMarker, CLONE_FLAG_SAME_RIGHTS, MODE_TYPE_DIRECTORY}, fidl_fuchsia_sys2::LifecycleControllerMarker, fuchsia_trace as trace, fuchsia_zircon as zx, futures::lock::Mutex, moniker::{AbsoluteMonikerBase, ChildMonikerBase}, std::{ collections::HashMap, convert::TryFrom, path::PathBuf, sync::{Arc, Weak}, }, vfs::{ directory::entry::DirectoryEntry, directory::immutable::simple as pfs, execution_scope::ExecutionScope, file::vmo::asynchronous::read_only_static, path::Path as pfsPath, remote::remote_dir, }, }; // Declare simple directory type for brevity type Directory = Arc<pfs::Simple>; struct HubCapabilityProvider { instanced_moniker: InstancedAbsoluteMoniker, hub: Arc<Hub>, } impl HubCapabilityProvider { pub fn new(instanced_moniker: InstancedAbsoluteMoniker, hub: Arc<Hub>) -> Self { HubCapabilityProvider { instanced_moniker, hub } } } #[async_trait] impl CapabilityProvider for HubCapabilityProvider { async fn open( self: Box<Self>, _task_scope: TaskScope, flags: u32, open_mode: u32, relative_path: PathBuf, server_end: &mut zx::Channel, ) -> Result<(), ModelError> { log::info!("Opening the /hub capability provider: {:?}", relative_path); let mut relative_path = relative_path .to_str() .ok_or_else(|| ModelError::path_is_not_utf8(relative_path.clone()))? .to_string(); relative_path.push('/'); let dir_path = pfsPath::validate_and_split(relative_path.clone()).map_err(|_| { ModelError::open_directory_error( self.instanced_moniker.to_absolute_moniker(), relative_path, ) })?; self.hub.open(&self.instanced_moniker, flags, open_mode, dir_path, server_end).await?; Ok(()) } } /// Hub state on an instance of a component. struct Instance { /// Whether the `directory` has a subdirectory named `exec` pub has_execution_directory: bool, /// Whether the `directory` has a subdirectory named `resolved` pub has_resolved_directory: bool, pub directory: Directory, pub children_directory: Directory, } /// The Hub is a directory tree representing the component topology. Through the Hub, /// debugging and instrumentation tools can query information about component instances /// on the system, such as their component URLs, execution state and so on. pub struct Hub { instances: Mutex<HashMap<InstancedAbsoluteMoniker, Instance>>, scope: ExecutionScope, lifecycle_controller_factory: LifecycleControllerFactory, } impl Hub { /// Create a new Hub given a `component_url` for the root component and a /// `lifecycle_controller_factory` which can create scoped LifecycleController services. pub fn new( component_url: String, lifecycle_controller_factory: LifecycleControllerFactory, ) -> Result<Self, ModelError> { let mut instance_map = HashMap::new(); let instanced_moniker = InstancedAbsoluteMoniker::root(); let lifecycle_controller = lifecycle_controller_factory.create(&instanced_moniker.to_absolute_moniker()); Hub::add_instance_if_necessary( lifecycle_controller, &instanced_moniker, component_url, &mut instance_map, )? .expect("Did not create directory."); Ok(Hub { instances: Mutex::new(instance_map), scope: ExecutionScope::new(), lifecycle_controller_factory, }) } pub async fn open_root( &self, flags: u32, mut server_end: zx::Channel, ) -> Result<(), ModelError> { let root_moniker = InstancedAbsoluteMoniker::root(); self.open(&root_moniker, flags, MODE_TYPE_DIRECTORY, pfsPath::dot(), &mut server_end) .await?; Ok(()) } pub fn hooks(self: &Arc<Self>) -> Vec<HooksRegistration> { vec![HooksRegistration::new( "Hub", vec![ EventType::CapabilityRouted, EventType::Discovered, EventType::Purged, EventType::Destroyed, EventType::Started, EventType::Resolved, EventType::Stopped, ], Arc::downgrade(self) as Weak<dyn Hook>, )] } pub async fn open( &self, instanced_moniker: &InstancedAbsoluteMoniker, flags: u32, open_mode: u32, relative_path: pfsPath, server_end: &mut zx::Channel, ) -> Result<(), ModelError> { let instance_map = self.instances.lock().await; let instance = instance_map.get(&instanced_moniker).ok_or(ModelError::open_directory_error( instanced_moniker.to_absolute_moniker(), relative_path.clone().into_string(), ))?; let server_end = channel::take_channel(server_end); instance.directory.clone().open( self.scope.clone(), flags, open_mode, relative_path, ServerEnd::<NodeMarker>::new(server_end), ); Ok(()) } fn add_instance_if_necessary( lifecycle_controller: LifecycleController, instanced_moniker: &InstancedAbsoluteMoniker, component_url: String, instance_map: &mut HashMap<InstancedAbsoluteMoniker, Instance>, ) -> Result<Option<Directory>, ModelError> { trace::duration!("component_manager", "hub:add_instance_if_necessary"); if instance_map.contains_key(&instanced_moniker) { return Ok(None); } let instance = pfs::simple(); // Add a 'url' file. instance.add_node( "url", read_only_static(component_url.clone().into_bytes()), &instanced_moniker, )?; // Add an 'id' file. // For consistency sake, the Hub assumes that the root instance also // has ID 0, like any other static instance. let id = if let Some(child_moniker) = instanced_moniker.leaf() { child_moniker.instance() } else { 0 }; let component_type = if id > 0 { "dynamic" } else { "static" }; instance.add_node( "id", read_only_static(id.to_string().into_bytes()), &instanced_moniker, )?; // Add a 'component_type' file. instance.add_node( "component_type", read_only_static(component_type.to_string().into_bytes()), &instanced_moniker, )?; // Add a children directory. let children = pfs::simple(); instance.add_node("children", children.clone(), &instanced_moniker)?; Self::add_debug_directory(lifecycle_controller, instance.clone(), instanced_moniker)?; instance_map.insert( instanced_moniker.clone(), Instance { has_execution_directory: false, has_resolved_directory: false, directory: instance.clone(), children_directory: children.clone(), }, ); Ok(Some(instance)) } async fn add_instance_to_parent_if_necessary<'a>( lifecycle_controller: LifecycleController, instanced_moniker: &'a InstancedAbsoluteMoniker, component_url: String, mut instance_map: &'a mut HashMap<InstancedAbsoluteMoniker, Instance>, ) -> Result<(), ModelError> { let controlled = match Hub::add_instance_if_necessary( lifecycle_controller, &instanced_moniker, component_url, &mut instance_map, )? { Some(c) => c, None => return Ok(()), }; if let (Some(leaf), Some(parent_moniker)) = (instanced_moniker.leaf(), instanced_moniker.parent()) { trace::duration!("component_manager", "hub:add_instance_to_parent"); match instance_map.get_mut(&parent_moniker) { Some(instance) => { let child_moniker = leaf.to_child_moniker(); instance.children_directory.add_node( child_moniker.as_str(), controlled.clone(), &instanced_moniker, )?; } None => { // TODO(fxbug.dev/89503): Investigate event ordering between // parent and child, so that we can guarantee the parent is // in the instance_map. log::warn!( "Parent {} not found: could not add {} to children directory.", parent_moniker, instanced_moniker ); } }; } Ok(()) } fn add_resolved_url_file( directory: Directory, resolved_url: String, instanced_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { directory.add_node( "resolved_url", read_only_static(resolved_url.into_bytes()), &instanced_moniker, )?; Ok(()) } fn add_config( directory: Directory, config: &ConfigFields, instanced_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { let config_dir = pfs::simple(); for field in &config.fields { let value = format!("{}", field.value); config_dir.add_node( &field.key, read_only_static(value.into_bytes()), &instanced_moniker, )?; } directory.add_node("config", config_dir, &instanced_moniker)?; Ok(()) } fn add_use_directory( directory: Directory, component_decl: ComponentDecl, target_moniker: &InstancedAbsoluteMoniker, target: WeakComponentInstance, ) -> Result<(), ModelError> { let tree = DirTree::build_from_uses(route_use_fn, target, component_decl); let mut use_dir = pfs::simple(); tree.install(target_moniker, &mut use_dir)?; directory.add_node("use", use_dir, target_moniker)?; Ok(()) } fn add_in_directory( execution_directory: Directory, component_decl: ComponentDecl, package_dir: Option<DirectoryProxy>, target_moniker: &InstancedAbsoluteMoniker, target: WeakComponentInstance, ) -> Result<(), ModelError> { let tree = DirTree::build_from_uses(route_use_fn, target, component_decl); let mut in_dir = pfs::simple(); tree.install(target_moniker, &mut in_dir)?; if let Some(pkg_dir) = package_dir { in_dir.add_node("pkg", remote_dir(pkg_dir), target_moniker)?; } execution_directory.add_node("in", in_dir, target_moniker)?; Ok(()) } fn add_debug_directory( lifecycle_controller: LifecycleController, parent_directory: Directory, target_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:add_debug_directory"); let mut debug_dir = pfs::simple(); let lifecycle_controller_path = CapabilityPath::try_from(format!("/{}", LifecycleControllerMarker::NAME).as_str()) .unwrap(); let capabilities = vec![DirTreeCapability::new( lifecycle_controller_path, Box::new( move |scope: ExecutionScope, _flags: u32, _mode: u32, _relative_path: pfsPath, server_end: ServerEnd<NodeMarker>| { log::info!("Connecting fuchsia.sys2.LifecycleController"); let lifecycle_controller = lifecycle_controller.clone(); let server_end = ServerEnd::<LifecycleControllerMarker>::new(server_end.into_channel()); let lifecycle_controller_stream = server_end.into_stream().unwrap(); scope.spawn(async move { lifecycle_controller.serve(lifecycle_controller_stream).await; }); }, ), )]; let tree = DirTree::build_from_capabilities(capabilities); tree.install(target_moniker, &mut debug_dir)?; parent_directory.add_node("debug", debug_dir, target_moniker)?; Ok(()) } fn add_expose_directory( directory: Directory, component_decl: ComponentDecl, target_moniker: &InstancedAbsoluteMoniker, target: WeakComponentInstance, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:add_expose_directory"); let tree = DirTree::build_from_exposes(route_expose_fn, target, component_decl); let mut expose_dir = pfs::simple(); tree.install(target_moniker, &mut expose_dir)?; directory.add_node("expose", expose_dir, target_moniker)?; Ok(()) } fn add_out_directory( execution_directory: Directory, outgoing_dir: Option<DirectoryProxy>, target_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:add_out_directory"); if let Some(out_dir) = outgoing_dir { execution_directory.add_node("out", remote_dir(out_dir), target_moniker)?; } Ok(()) } fn add_runtime_directory( execution_directory: Directory, runtime_dir: Option<DirectoryProxy>, instanced_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:add_runtime_directory"); if let Some(runtime_dir) = runtime_dir { execution_directory.add_node("runtime", remote_dir(runtime_dir), instanced_moniker)?; } Ok(()) } fn add_start_reason_file( execution_directory: Directory, start_reason: &StartReason, instanced_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { let start_reason = format!("{}", start_reason); execution_directory.add_node( "start_reason", read_only_static(start_reason.into_bytes()), instanced_moniker, )?; Ok(()) } fn add_instance_id_file( directory: Directory, target_moniker: &InstancedAbsoluteMoniker, target: WeakComponentInstance, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:add_instance_id_file"); if let Some(instance_id) = target.upgrade()?.instance_id() { directory.add_node( "instance_id", read_only_static(instance_id.to_string().into_bytes()), &target_moniker, )?; }; Ok(()) } async fn on_resolved_async<'a>( &self, target_moniker: &InstancedAbsoluteMoniker, target: &WeakComponentInstance, resolved_url: String, component_decl: &'a ComponentDecl, config: &Option<ConfigFields>, ) -> Result<(), ModelError> { let mut instance_map = self.instances.lock().await; let instance = instance_map .get_mut(target_moniker) .ok_or(ModelError::instance_not_found(target_moniker.to_absolute_moniker()))?; // If the resolved directory already exists, report error. assert!(!instance.has_resolved_directory); let resolved_directory = pfs::simple(); Self::add_resolved_url_file( resolved_directory.clone(), resolved_url.clone(), target_moniker, )?; Self::add_use_directory( resolved_directory.clone(), component_decl.clone(), target_moniker, target.clone(), )?; Self::add_expose_directory( resolved_directory.clone(), component_decl.clone(), target_moniker, target.clone(), )?; Self::add_instance_id_file(resolved_directory.clone(), target_moniker, target.clone())?; if let Some(config) = config { Self::add_config(resolved_directory.clone(), config, target_moniker)?; } instance.directory.add_node("resolved", resolved_directory, &target_moniker)?; instance.has_resolved_directory = true; Ok(()) } async fn on_started_async<'a>( &'a self, target_moniker: &InstancedAbsoluteMoniker, target: &WeakComponentInstance, runtime: &RuntimeInfo, component_decl: &'a ComponentDecl, start_reason: &StartReason, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:on_start_instance_async"); let mut instance_map = self.instances.lock().await; let instance = instance_map .get_mut(target_moniker) .ok_or(ModelError::instance_not_found(target_moniker.to_absolute_moniker()))?; // Don't create an execution directory if it already exists if instance.has_execution_directory { return Ok(()); } trace::duration!("component_manager", "hub:create_execution"); let execution_directory = pfs::simple(); Self::add_resolved_url_file( execution_directory.clone(), runtime.resolved_url.clone(), target_moniker, )?; Self::add_in_directory( execution_directory.clone(), component_decl.clone(), Self::clone_dir(runtime.package_dir.as_ref()), target_moniker, target.clone(), )?; Self::add_expose_directory( execution_directory.clone(), component_decl.clone(), target_moniker, target.clone(), )?; Self::add_out_directory( execution_directory.clone(), Self::clone_dir(runtime.outgoing_dir.as_ref()), target_moniker, )?; Self::add_runtime_directory( execution_directory.clone(), Self::clone_dir(runtime.runtime_dir.as_ref()), &target_moniker, )?; Self::add_start_reason_file(execution_directory.clone(), start_reason, &target_moniker)?; instance.directory.add_node("exec", execution_directory, &target_moniker)?; instance.has_execution_directory = true; Ok(()) } async fn on_discovered_async( &self, target_moniker: &InstancedAbsoluteMoniker, component_url: String, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:on_discovered_async"); let lifecycle_controller = self.lifecycle_controller_factory.create(&target_moniker.to_absolute_moniker()); let mut instance_map = self.instances.lock().await; Self::add_instance_to_parent_if_necessary( lifecycle_controller, target_moniker, component_url, &mut instance_map, ) .await?; Ok(()) } async fn on_purged_async( &self, target_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:on_purged_async"); let mut instance_map = self.instances.lock().await; instance_map .remove(&target_moniker) .ok_or(ModelError::instance_not_found(target_moniker.to_absolute_moniker()))?; Ok(()) } async fn on_stopped_async( &self, target_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:on_stopped_async"); let mut instance_map = self.instances.lock().await; let mut instance = instance_map .get_mut(target_moniker) .ok_or(ModelError::instance_not_found(target_moniker.to_absolute_moniker()))?; instance.directory.remove_node("exec")?.ok_or_else(|| { log::warn!("exec directory for instance {} was already removed", target_moniker); ModelError::remove_entry_error("exec") })?; instance.has_execution_directory = false; Ok(()) } async fn on_destroyed_async( &self, target_moniker: &InstancedAbsoluteMoniker, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:on_destroyed_async"); let parent_moniker = target_moniker.parent().expect("A root component cannot be destroyed"); let mut instance_map = self.instances.lock().await; let parent_instance = match instance_map.get_mut(&parent_moniker) { Some(i) => i, // Evidently this a duplicate dispatch of Destroyed. None => return Ok(()), }; let instanced_child = target_moniker.leaf().expect("A root component cannot be destroyed"); // In the children directory, the child's instance id is not used let child_entry = instanced_child.to_child_moniker().to_string(); // TODO: It's possible for the Destroyed event to be dispatched twice if there // are two concurrent `DestroyChild` operations. In such cases we should probably cause // this update to no-op instead of returning an error. parent_instance.children_directory.remove_node(&child_entry)?.ok_or_else(|| { log::warn!( "child directory {} in parent instance {} was already removed", child_entry, parent_moniker ); ModelError::remove_entry_error(child_entry) })?; Ok(()) } /// Given a `CapabilitySource`, determine if it is a framework-provided /// hub capability. If so, update the given `capability_provider` to /// provide a hub directory. async fn on_capability_routed_async( self: Arc<Self>, source: CapabilitySource, capability_provider: Arc<Mutex<Option<Box<dyn CapabilityProvider>>>>, ) -> Result<(), ModelError> { trace::duration!("component_manager", "hub:on_capability_routed_async"); // If this is a scoped framework directory capability, then check the source path if let CapabilitySource::Framework { capability: InternalCapability::Directory(source_name), component, } = source { if source_name.str() != "hub" { return Ok(()); } // Set the capability provider, if not already set. let mut capability_provider = capability_provider.lock().await; if capability_provider.is_none() { *capability_provider = Some(Box::new(HubCapabilityProvider::new( component.instanced_moniker.clone(), self.clone(), ))) } } Ok(()) } // TODO(fsamuel): We should probably preserve the original error messages // instead of dropping them. fn clone_dir(dir: Option<&DirectoryProxy>) -> Option<DirectoryProxy> { dir.and_then(|d| io_util::clone_directory(d, CLONE_FLAG_SAME_RIGHTS).ok()) } } #[async_trait] impl Hook for Hub { async fn on(self: Arc<Self>, event: &Event) -> Result<(), ModelError> { let target_moniker = event .target_moniker .unwrap_instance_moniker_or(ModelError::UnexpectedComponentManagerMoniker)?; match &event.result { Ok(EventPayload::CapabilityRouted { source, capability_provider }) => { self.on_capability_routed_async(source.clone(), capability_provider.clone()) .await?; } Ok(EventPayload::Purged) => { self.on_purged_async(target_moniker).await?; } Ok(EventPayload::Discovered) => { self.on_discovered_async(target_moniker, event.component_url.to_string()).await?; } Ok(EventPayload::Destroyed) => { self.on_destroyed_async(target_moniker).await?; } Ok(EventPayload::Started { component, runtime, component_decl, start_reason }) => { self.on_started_async( target_moniker, component, &runtime, &component_decl, start_reason, ) .await?; } Ok(EventPayload::Resolved { component, resolved_url, decl, config, .. }) => { self.on_resolved_async( target_moniker, component, resolved_url.clone(), &decl, &config, ) .await?; } Ok(EventPayload::Stopped { .. }) => { self.on_stopped_async(target_moniker).await?; } _ => {} }; Ok(()) } } #[cfg(test)] mod tests { use { super::*, crate::{ builtin_environment::BuiltinEnvironment, model::{ component::StartReason, model::Model, starter::Starter, testing::{ test_helpers::{ component_decl_with_test_runner, dir_contains, list_directory, list_directory_recursive, read_file, TestEnvironmentBuilder, TestModelResult, }, test_hook::HubInjectionTestHook, }, }, }, cm_rust::{ self, CapabilityName, CapabilityPath, ComponentDecl, ConfigChecksum, ConfigDecl, ConfigField, ConfigNestedValueType, ConfigValueSource, ConfigValueType, DependencyType, DirectoryDecl, EventMode, EventSubscription, ExposeDecl, ExposeDirectoryDecl, ExposeProtocolDecl, ExposeSource, ExposeTarget, ListValue, ProtocolDecl, SingleValue, UseDecl, UseDirectoryDecl, UseEventDecl, UseEventStreamDeprecatedDecl, UseProtocolDecl, UseSource, Value, ValueSpec, ValuesData, }, cm_rust_testing::ComponentDeclBuilder, fidl::endpoints::ServerEnd, fidl_fuchsia_io::{ DirectoryMarker, DirectoryProxy, MODE_TYPE_DIRECTORY, OPEN_RIGHT_READABLE, OPEN_RIGHT_WRITABLE, }, moniker::AbsoluteMoniker, routing_test_helpers::component_id_index::make_index_file, std::{convert::TryFrom, path::Path}, vfs::{ directory::entry::DirectoryEntry, execution_scope::ExecutionScope, file::vmo::asynchronous::read_only_static, path::Path as pfsPath, pseudo_directory, }, }; /// Hosts an out directory with a 'foo' file. fn foo_out_dir_fn() -> Box<dyn Fn(ServerEnd<DirectoryMarker>) + Send + Sync> { Box::new(move |server_end: ServerEnd<DirectoryMarker>| { let out_dir = pseudo_directory!( "foo" => read_only_static(b"bar"), "test" => pseudo_directory!( "aaa" => read_only_static(b"bbb"), ), ); out_dir.clone().open( ExecutionScope::new(), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, MODE_TYPE_DIRECTORY, pfsPath::dot(), ServerEnd::new(server_end.into_channel()), ); }) } /// Hosts a runtime directory with a 'bleep' file. fn bleep_runtime_dir_fn() -> Box<dyn Fn(ServerEnd<DirectoryMarker>) + Send + Sync> { Box::new(move |server_end: ServerEnd<DirectoryMarker>| { let pseudo_dir = pseudo_directory!( "bleep" => read_only_static(b"blah"), ); pseudo_dir.clone().open( ExecutionScope::new(), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, MODE_TYPE_DIRECTORY, pfsPath::dot(), ServerEnd::new(server_end.into_channel()), ); }) } type DirectoryCallback = Box<dyn Fn(ServerEnd<DirectoryMarker>) + Send + Sync>; struct ComponentDescriptor { pub name: &'static str, pub decl: ComponentDecl, pub config: Option<(&'static str, ValuesData)>, pub host_fn: Option<DirectoryCallback>, pub runtime_host_fn: Option<DirectoryCallback>, } async fn start_component_manager_with_hub( root_component_url: String, components: Vec<ComponentDescriptor>, ) -> (Arc<Model>, Arc<Mutex<BuiltinEnvironment>>, DirectoryProxy) { start_component_manager_with_options(root_component_url, components, vec![], None).await } async fn start_component_manager_with_options( root_component_url: String, components: Vec<ComponentDescriptor>, additional_hooks: Vec<HooksRegistration>, index_file_path: Option<String>, ) -> (Arc<Model>, Arc<Mutex<BuiltinEnvironment>>, DirectoryProxy) { let resolved_root_component_url = format!("{}_resolved", root_component_url); let decls = components.iter().map(|c| (c.name, c.decl.clone())).collect(); let configs = components.iter().filter_map(|c| c.config.clone()).collect(); let TestModelResult { model, builtin_environment, mock_runner, .. } = TestEnvironmentBuilder::new() .set_components(decls) .set_config_values(configs) .set_component_id_index_path(index_file_path) .build() .await; for component in components.into_iter() { if let Some(host_fn) = component.host_fn { mock_runner.add_host_fn(&resolved_root_component_url, host_fn); } if let Some(runtime_host_fn) = component.runtime_host_fn { mock_runner.add_runtime_host_fn(&resolved_root_component_url, runtime_host_fn); } } let hub_proxy = builtin_environment .lock() .await .bind_service_fs_for_hub() .await .expect("unable to bind service_fs"); model.root().hooks.install(additional_hooks).await; let root_moniker = AbsoluteMoniker::root(); model.start_instance(&root_moniker, &StartReason::Root).await.unwrap(); (model, builtin_environment, hub_proxy) } #[fuchsia::test] async fn hub_basic() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new().add_lazy_child("a").build(), config: None, host_fn: None, runtime_host_fn: None, }, ComponentDescriptor { name: "a", decl: component_decl_with_test_runner(), config: None, host_fn: None, runtime_host_fn: None, }, ], ) .await; assert_eq!(root_component_url, read_file(&hub_proxy, "url").await); assert_eq!( format!("{}_resolved", root_component_url), read_file(&hub_proxy, "exec/resolved_url").await ); assert_eq!( format!("{}", StartReason::Root), read_file(&hub_proxy, "exec/start_reason").await ); // Verify IDs assert_eq!("0", read_file(&hub_proxy, "id").await); assert_eq!("0", read_file(&hub_proxy, "children/a/id").await); // Verify Component Type assert_eq!("static", read_file(&hub_proxy, "component_type").await); assert_eq!("static", read_file(&hub_proxy, "children/a/component_type").await); assert_eq!("test:///a", read_file(&hub_proxy, "children/a/url").await); } #[fuchsia::test] async fn hub_out_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new().add_lazy_child("a").build(), config: None, host_fn: Some(foo_out_dir_fn()), runtime_host_fn: None, }], ) .await; assert!(dir_contains(&hub_proxy, "exec", "out").await); assert!(dir_contains(&hub_proxy, "exec/out", "foo").await); assert!(dir_contains(&hub_proxy, "exec/out/test", "aaa").await); assert_eq!("bar", read_file(&hub_proxy, "exec/out/foo").await); assert_eq!("bbb", read_file(&hub_proxy, "exec/out/test/aaa").await); } #[fuchsia::test] async fn hub_runtime_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new().add_lazy_child("a").build(), config: None, host_fn: None, runtime_host_fn: Some(bleep_runtime_dir_fn()), }], ) .await; assert_eq!("blah", read_file(&hub_proxy, "exec/runtime/bleep").await); } #[fuchsia::test] async fn hub_test_hook_interception() { let root_component_url = "test:///root".to_string(); let hub_injection_test_hook = Arc::new(HubInjectionTestHook::new()); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_options( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .use_(UseDecl::Directory(UseDirectoryDecl { dependency_type: DependencyType::Strong, source: UseSource::Framework, source_name: "hub".into(), target_path: CapabilityPath::try_from("/hub").unwrap(), rights: *routing::rights::READ_RIGHTS, subdir: None, })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], hub_injection_test_hook.hooks(), None, ) .await; let in_dir = io_util::open_directory( &hub_proxy, &Path::new("exec/in"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["hub"], list_directory(&in_dir).await); let scoped_hub_dir_proxy = io_util::open_directory( &hub_proxy, &Path::new("exec/in/hub"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); // There are no out or runtime directories because there is no program running. assert_eq!(vec!["old_hub"], list_directory(&scoped_hub_dir_proxy).await); let old_hub_dir_proxy = io_util::open_directory( &hub_proxy, &Path::new("exec/in/hub/old_hub/exec"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!( vec!["expose", "in", "out", "resolved_url", "runtime", "start_reason"], list_directory(&old_hub_dir_proxy).await ); } #[fuchsia::test] async fn hub_config_dir_in_resolved() { let root_component_url = "test:///root".to_string(); let checksum = ConfigChecksum::Sha256([ 0x07, 0xA8, 0xE6, 0x85, 0xC8, 0x79, 0xA9, 0x79, 0xC3, 0x26, 0x17, 0xDC, 0x4E, 0x74, 0x65, 0x7F, 0xF1, 0xF7, 0x73, 0xE7, 0x12, 0xEE, 0x51, 0xFD, 0xF6, 0x57, 0x43, 0x07, 0xA7, 0xAF, 0x2E, 0x64, ]); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_config(ConfigDecl { fields: vec![ ConfigField { key: "logging".to_string(), type_: ConfigValueType::Bool, }, ConfigField { key: "verbosity".to_string(), type_: ConfigValueType::String { max_size: 10 }, }, ConfigField { key: "tags".to_string(), type_: ConfigValueType::Vector { max_count: 10, nested_type: ConfigNestedValueType::String { max_size: 20 }, }, }, ], checksum: checksum.clone(), value_source: ConfigValueSource::PackagePath("meta/root.cvf".into()), }) .build(), config: Some(( "meta/root.cvf", ValuesData { values: vec![ ValueSpec { value: Value::Single(SingleValue::Flag(true)) }, ValueSpec { value: Value::Single(SingleValue::Text("DEBUG".to_string())), }, ValueSpec { value: Value::List(ListValue::TextList(vec![ "foo".into(), "bar".into(), ])), }, ], checksum, }, )), host_fn: None, runtime_host_fn: None, }], ) .await; let config_dir = io_util::open_directory( &hub_proxy, &Path::new("resolved/config"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["logging", "tags", "verbosity"], list_directory(&config_dir).await); assert_eq!("true", read_file(&config_dir, "logging").await); assert_eq!("\"DEBUG\"", read_file(&config_dir, "verbosity").await); assert_eq!("[\"foo\", \"bar\"]", read_file(&config_dir, "tags").await); } #[fuchsia::test] async fn hub_resolved_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .use_(UseDecl::Directory(UseDirectoryDecl { dependency_type: DependencyType::Strong, source: UseSource::Framework, source_name: "hub".into(), target_path: CapabilityPath::try_from("/hub").unwrap(), rights: *routing::rights::READ_RIGHTS, subdir: Some("resolved".into()), })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], ) .await; let resolved_dir = io_util::open_directory( &hub_proxy, &Path::new("resolved"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["expose", "resolved_url", "use"], list_directory(&resolved_dir).await); assert_eq!( format!("{}_resolved", root_component_url), read_file(&hub_proxy, "resolved/resolved_url").await ); } #[fuchsia::test] async fn hub_use_directory_in_resolved() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .use_(UseDecl::Directory(UseDirectoryDecl { dependency_type: DependencyType::Strong, source: UseSource::Framework, source_name: "hub".into(), target_path: CapabilityPath::try_from("/hub").unwrap(), rights: *routing::rights::READ_RIGHTS, subdir: None, })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], ) .await; let use_dir = io_util::open_directory( &hub_proxy, &Path::new("resolved/use"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["hub"], list_directory(&use_dir).await); let hub_dir = io_util::open_directory( &use_dir, &Path::new("hub"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!( vec!["children", "component_type", "debug", "exec", "id", "resolved", "url"], list_directory(&hub_dir).await ); } #[fuchsia::test] async fn hub_instance_id_in_resolved() { // Create index. let iid = format!("1234{}", "5".repeat(60)); let index_file = make_index_file(component_id_index::Index { instances: vec![component_id_index::InstanceIdEntry { instance_id: Some(iid.clone()), appmgr_moniker: None, moniker: Some(AbsoluteMoniker::parse_str("/a").unwrap()), }], ..component_id_index::Index::default() }) .unwrap(); let root_component_url = "test:///root".to_string(); let (model, _builtin_environment, hub_proxy) = start_component_manager_with_options( root_component_url.clone(), vec![ ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .use_(UseDecl::Directory(UseDirectoryDecl { dependency_type: DependencyType::Strong, source: UseSource::Framework, source_name: "hub".into(), target_path: CapabilityPath::try_from("/hub").unwrap(), rights: *routing::rights::READ_RIGHTS, subdir: Some("resolved".into()), })) .build(), config: None, host_fn: None, runtime_host_fn: None, }, ComponentDescriptor { name: "a", decl: component_decl_with_test_runner(), config: None, host_fn: None, runtime_host_fn: None, }, ], vec![], index_file.path().to_str().map(str::to_string), ) .await; // Starting will resolve the component and cause the instance id to be written. model .start_instance(&AbsoluteMoniker::parse_str("/a").unwrap(), &StartReason::Debug) .await .unwrap(); let resolved_dir = io_util::open_directory( &hub_proxy, &Path::new("children/a/resolved"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); // Confirm that the instance_id is read and written to file in resolved directory. assert_eq!(iid, read_file(&resolved_dir, "instance_id").await); } #[fuchsia::test] // TODO(b/65870): change function name to hub_expose_directory after the expose directory // is removed from exec and the original hub_expose_directory test is deleted. async fn hub_expose_directory_in_resolved() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .protocol(ProtocolDecl { name: "foo".into(), source_path: Some("/svc/foo".parse().unwrap()), }) .directory(DirectoryDecl { name: "baz".into(), source_path: Some("/data".parse().unwrap()), rights: *routing::rights::READ_RIGHTS, }) .expose(ExposeDecl::Protocol(ExposeProtocolDecl { source: ExposeSource::Self_, source_name: "foo".into(), target_name: "bar".into(), target: ExposeTarget::Parent, })) .expose(ExposeDecl::Directory(ExposeDirectoryDecl { source: ExposeSource::Self_, source_name: "baz".into(), target_name: "hippo".into(), target: ExposeTarget::Parent, rights: None, subdir: None, })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], ) .await; let expose_dir = io_util::open_directory( &hub_proxy, &Path::new("resolved/expose"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["bar", "hippo"], list_directory_recursive(&expose_dir).await); } #[fuchsia::test] async fn hub_in_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .use_(UseDecl::Directory(UseDirectoryDecl { source: UseSource::Framework, source_name: "hub".into(), target_path: CapabilityPath::try_from("/hub").unwrap(), rights: *routing::rights::READ_RIGHTS, subdir: Some("exec".into()), dependency_type: DependencyType::Strong, })) .use_(UseDecl::Protocol(UseProtocolDecl { source: UseSource::Parent, source_name: "baz-svc".into(), target_path: CapabilityPath::try_from("/svc/hippo").unwrap(), dependency_type: DependencyType::Strong, })) .use_(UseDecl::Directory(UseDirectoryDecl { source: UseSource::Parent, source_name: "foo-dir".into(), target_path: CapabilityPath::try_from("/data/bar").unwrap(), rights: *routing::rights::READ_RIGHTS | *routing::rights::WRITE_RIGHTS, subdir: None, dependency_type: DependencyType::Strong, })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], ) .await; let in_dir = io_util::open_directory( &hub_proxy, &Path::new("exec/in"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["data", "hub", "svc"], list_directory(&in_dir).await); let scoped_hub_dir_proxy = io_util::open_directory( &hub_proxy, &Path::new("exec/in/hub"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!( vec!["expose", "in", "out", "resolved_url", "runtime", "start_reason"], list_directory(&scoped_hub_dir_proxy).await ); } #[fuchsia::test] async fn hub_no_event_stream_in_incoming_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .use_(UseDecl::Event(UseEventDecl { dependency_type: DependencyType::Strong, source: UseSource::Framework, source_name: "started".into(), target_name: "started".into(), filter: None, mode: cm_rust::EventMode::Async, })) .use_(UseDecl::EventStreamDeprecated(UseEventStreamDeprecatedDecl { name: CapabilityName::try_from("EventStream").unwrap(), subscriptions: vec![EventSubscription { event_name: "started".to_string(), mode: EventMode::Async, }], })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], ) .await; let in_dir = io_util::open_directory( &hub_proxy, &Path::new("exec/in"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(0, list_directory(&in_dir).await.len()); } #[fuchsia::test] async fn hub_expose_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new() .add_lazy_child("a") .protocol(ProtocolDecl { name: "foo".into(), source_path: Some("/svc/foo".parse().unwrap()), }) .directory(DirectoryDecl { name: "baz".into(), source_path: Some("/data".parse().unwrap()), rights: *routing::rights::READ_RIGHTS, }) .expose(ExposeDecl::Protocol(ExposeProtocolDecl { source: ExposeSource::Self_, source_name: "foo".into(), target_name: "bar".into(), target: ExposeTarget::Parent, })) .expose(ExposeDecl::Directory(ExposeDirectoryDecl { source: ExposeSource::Self_, source_name: "baz".into(), target_name: "hippo".into(), target: ExposeTarget::Parent, rights: None, subdir: None, })) .build(), config: None, host_fn: None, runtime_host_fn: None, }], ) .await; let expose_dir = io_util::open_directory( &hub_proxy, &Path::new("exec/expose"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!(vec!["bar", "hippo"], list_directory_recursive(&expose_dir).await); } #[fuchsia::test] async fn hub_debug_directory() { let root_component_url = "test:///root".to_string(); let (_model, _builtin_environment, hub_proxy) = start_component_manager_with_hub( root_component_url.clone(), vec![ComponentDescriptor { name: "root", decl: ComponentDeclBuilder::new().add_lazy_child("a").build(), config: None, host_fn: None, runtime_host_fn: Some(bleep_runtime_dir_fn()), }], ) .await; let debug_svc_dir = io_util::open_directory( &hub_proxy, &Path::new("debug"), OPEN_RIGHT_READABLE | OPEN_RIGHT_WRITABLE, ) .expect("Failed to open directory"); assert_eq!( vec!["fuchsia.sys2.LifecycleController"], list_directory_recursive(&debug_svc_dir).await ); } }
37.903729
100
0.549707
761630da376d92783b5dac33fe947d3d002a8027
19,666
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(clippy::float_cmp)] use crate::rustc::hir::def::Def; use crate::rustc::hir::*; use crate::rustc::lint::LateContext; use crate::rustc::ty::subst::{Subst, Substs}; use crate::rustc::ty::{self, Instance, Ty, TyCtxt}; use crate::rustc::{bug, span_bug}; use crate::syntax::ast::{FloatTy, LitKind}; use crate::syntax::ptr::P; use crate::utils::{clip, sext, unsext}; use std::cmp::Ordering::{self, Equal}; use std::cmp::PartialOrd; use std::convert::TryInto; use std::hash::{Hash, Hasher}; use std::mem; use std::rc::Rc; /// A `LitKind`-like enum to fold constant `Expr`s into. #[derive(Debug, Clone)] pub enum Constant { /// a String "abc" Str(String), /// a Binary String b"abc" Binary(Rc<Vec<u8>>), /// a single char 'a' Char(char), /// an integer's bit representation Int(u128), /// an f32 F32(f32), /// an f64 F64(f64), /// true or false Bool(bool), /// an array of constants Vec(Vec<Constant>), /// also an array, but with only one constant, repeated N times Repeat(Box<Constant>, u64), /// a tuple of constants Tuple(Vec<Constant>), } impl PartialEq for Constant { fn eq(&self, other: &Self) -> bool { match (self, other) { (&Constant::Str(ref ls), &Constant::Str(ref rs)) => ls == rs, (&Constant::Binary(ref l), &Constant::Binary(ref r)) => l == r, (&Constant::Char(l), &Constant::Char(r)) => l == r, (&Constant::Int(l), &Constant::Int(r)) => l == r, (&Constant::F64(l), &Constant::F64(r)) => { // we want `Fw32 == FwAny` and `FwAny == Fw64`, by transitivity we must have // `Fw32 == Fw64` so don’t compare them // mem::transmute is required to catch non-matching 0.0, -0.0, and NaNs unsafe { mem::transmute::<f64, u64>(l) == mem::transmute::<f64, u64>(r) } }, (&Constant::F32(l), &Constant::F32(r)) => { // we want `Fw32 == FwAny` and `FwAny == Fw64`, by transitivity we must have // `Fw32 == Fw64` so don’t compare them // mem::transmute is required to catch non-matching 0.0, -0.0, and NaNs unsafe { mem::transmute::<f64, u64>(f64::from(l)) == mem::transmute::<f64, u64>(f64::from(r)) } }, (&Constant::Bool(l), &Constant::Bool(r)) => l == r, (&Constant::Vec(ref l), &Constant::Vec(ref r)) | (&Constant::Tuple(ref l), &Constant::Tuple(ref r)) => { l == r }, (&Constant::Repeat(ref lv, ref ls), &Constant::Repeat(ref rv, ref rs)) => ls == rs && lv == rv, _ => false, // TODO: Are there inter-type equalities? } } } impl Hash for Constant { fn hash<H>(&self, state: &mut H) where H: Hasher, { match *self { Constant::Str(ref s) => { s.hash(state); }, Constant::Binary(ref b) => { b.hash(state); }, Constant::Char(c) => { c.hash(state); }, Constant::Int(i) => { i.hash(state); }, Constant::F32(f) => { unsafe { mem::transmute::<f64, u64>(f64::from(f)) }.hash(state); }, Constant::F64(f) => { unsafe { mem::transmute::<f64, u64>(f) }.hash(state); }, Constant::Bool(b) => { b.hash(state); }, Constant::Vec(ref v) | Constant::Tuple(ref v) => { v.hash(state); }, Constant::Repeat(ref c, l) => { c.hash(state); l.hash(state); }, } } } impl Constant { pub fn partial_cmp(tcx: TyCtxt<'_, '_, '_>, cmp_type: ty::Ty<'_>, left: &Self, right: &Self) -> Option<Ordering> { match (left, right) { (&Constant::Str(ref ls), &Constant::Str(ref rs)) => Some(ls.cmp(rs)), (&Constant::Char(ref l), &Constant::Char(ref r)) => Some(l.cmp(r)), (&Constant::Int(l), &Constant::Int(r)) => { if let ty::Int(int_ty) = cmp_type.sty { Some(sext(tcx, l, int_ty).cmp(&sext(tcx, r, int_ty))) } else { Some(l.cmp(&r)) } }, (&Constant::F64(l), &Constant::F64(r)) => l.partial_cmp(&r), (&Constant::F32(l), &Constant::F32(r)) => l.partial_cmp(&r), (&Constant::Bool(ref l), &Constant::Bool(ref r)) => Some(l.cmp(r)), (&Constant::Tuple(ref l), &Constant::Tuple(ref r)) | (&Constant::Vec(ref l), &Constant::Vec(ref r)) => l .iter() .zip(r.iter()) .map(|(li, ri)| Self::partial_cmp(tcx, cmp_type, li, ri)) .find(|r| r.map_or(true, |o| o != Ordering::Equal)) .unwrap_or_else(|| Some(l.len().cmp(&r.len()))), (&Constant::Repeat(ref lv, ref ls), &Constant::Repeat(ref rv, ref rs)) => { match Self::partial_cmp(tcx, cmp_type, lv, rv) { Some(Equal) => Some(ls.cmp(rs)), x => x, } }, _ => None, // TODO: Are there any useful inter-type orderings? } } } /// parse a `LitKind` to a `Constant` pub fn lit_to_constant<'tcx>(lit: &LitKind, ty: Ty<'tcx>) -> Constant { use crate::syntax::ast::*; match *lit { LitKind::Str(ref is, _) => Constant::Str(is.to_string()), LitKind::Byte(b) => Constant::Int(u128::from(b)), LitKind::ByteStr(ref s) => Constant::Binary(Rc::clone(s)), LitKind::Char(c) => Constant::Char(c), LitKind::Int(n, _) => Constant::Int(n), LitKind::Float(ref is, _) | LitKind::FloatUnsuffixed(ref is) => match ty.sty { ty::Float(FloatTy::F32) => Constant::F32(is.as_str().parse().unwrap()), ty::Float(FloatTy::F64) => Constant::F64(is.as_str().parse().unwrap()), _ => bug!(), }, LitKind::Bool(b) => Constant::Bool(b), } } pub fn constant<'c, 'cc>( lcx: &LateContext<'c, 'cc>, tables: &'c ty::TypeckTables<'cc>, e: &Expr, ) -> Option<(Constant, bool)> { let mut cx = ConstEvalLateContext { tcx: lcx.tcx, tables, param_env: lcx.param_env, needed_resolution: false, substs: lcx.tcx.intern_substs(&[]), }; cx.expr(e).map(|cst| (cst, cx.needed_resolution)) } pub fn constant_simple<'c, 'cc>( lcx: &LateContext<'c, 'cc>, tables: &'c ty::TypeckTables<'cc>, e: &Expr, ) -> Option<Constant> { constant(lcx, tables, e).and_then(|(cst, res)| if res { None } else { Some(cst) }) } /// Creates a `ConstEvalLateContext` from the given `LateContext` and `TypeckTables` pub fn constant_context<'c, 'cc>( lcx: &LateContext<'c, 'cc>, tables: &'c ty::TypeckTables<'cc>, ) -> ConstEvalLateContext<'c, 'cc> { ConstEvalLateContext { tcx: lcx.tcx, tables, param_env: lcx.param_env, needed_resolution: false, substs: lcx.tcx.intern_substs(&[]), } } pub struct ConstEvalLateContext<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &'a ty::TypeckTables<'tcx>, param_env: ty::ParamEnv<'tcx>, needed_resolution: bool, substs: &'tcx Substs<'tcx>, } impl<'c, 'cc> ConstEvalLateContext<'c, 'cc> { /// simple constant folding: Insert an expression, get a constant or none. pub fn expr(&mut self, e: &Expr) -> Option<Constant> { match e.node { ExprKind::Path(ref qpath) => self.fetch_path(qpath, e.hir_id), ExprKind::Block(ref block, _) => self.block(block), ExprKind::If(ref cond, ref then, ref otherwise) => self.ifthenelse(cond, then, otherwise), ExprKind::Lit(ref lit) => Some(lit_to_constant(&lit.node, self.tables.expr_ty(e))), ExprKind::Array(ref vec) => self.multi(vec).map(Constant::Vec), ExprKind::Tup(ref tup) => self.multi(tup).map(Constant::Tuple), ExprKind::Repeat(ref value, _) => { let n = match self.tables.expr_ty(e).sty { ty::Array(_, n) => n.assert_usize(self.tcx).expect("array length"), _ => span_bug!(e.span, "typeck error"), }; self.expr(value).map(|v| Constant::Repeat(Box::new(v), n)) }, ExprKind::Unary(op, ref operand) => self.expr(operand).and_then(|o| match op { UnNot => self.constant_not(&o, self.tables.expr_ty(e)), UnNeg => self.constant_negate(&o, self.tables.expr_ty(e)), UnDeref => Some(o), }), ExprKind::Binary(op, ref left, ref right) => self.binop(op, left, right), // TODO: add other expressions _ => None, } } #[allow(clippy::cast_possible_wrap)] fn constant_not(&self, o: &Constant, ty: ty::Ty<'_>) -> Option<Constant> { use self::Constant::*; match *o { Bool(b) => Some(Bool(!b)), Int(value) => { let value = !value; match ty.sty { ty::Int(ity) => Some(Int(unsext(self.tcx, value as i128, ity))), ty::Uint(ity) => Some(Int(clip(self.tcx, value, ity))), _ => None, } }, _ => None, } } fn constant_negate(&self, o: &Constant, ty: ty::Ty<'_>) -> Option<Constant> { use self::Constant::*; match *o { Int(value) => { let ity = match ty.sty { ty::Int(ity) => ity, _ => return None, }; // sign extend let value = sext(self.tcx, value, ity); let value = value.checked_neg()?; // clear unused bits Some(Int(unsext(self.tcx, value, ity))) }, F32(f) => Some(F32(-f)), F64(f) => Some(F64(-f)), _ => None, } } /// create `Some(Vec![..])` of all constants, unless there is any /// non-constant part fn multi(&mut self, vec: &[Expr]) -> Option<Vec<Constant>> { vec.iter().map(|elem| self.expr(elem)).collect::<Option<_>>() } /// lookup a possibly constant expression from a ExprKind::Path fn fetch_path(&mut self, qpath: &QPath, id: HirId) -> Option<Constant> { use crate::rustc::mir::interpret::GlobalId; let def = self.tables.qpath_def(qpath, id); match def { Def::Const(def_id) | Def::AssociatedConst(def_id) => { let substs = self.tables.node_substs(id); let substs = if self.substs.is_empty() { substs } else { substs.subst(self.tcx, self.substs) }; let instance = Instance::resolve(self.tcx, self.param_env, def_id, substs)?; let gid = GlobalId { instance, promoted: None, }; let result = self.tcx.const_eval(self.param_env.and(gid)).ok()?; let ret = miri_to_const(self.tcx, result); if ret.is_some() { self.needed_resolution = true; } return ret; }, _ => {}, } None } /// A block can only yield a constant if it only has one constant expression fn block(&mut self, block: &Block) -> Option<Constant> { if block.stmts.is_empty() { block.expr.as_ref().and_then(|b| self.expr(b)) } else { None } } fn ifthenelse(&mut self, cond: &Expr, then: &P<Expr>, otherwise: &Option<P<Expr>>) -> Option<Constant> { if let Some(Constant::Bool(b)) = self.expr(cond) { if b { self.expr(&**then) } else { otherwise.as_ref().and_then(|expr| self.expr(expr)) } } else { None } } fn binop(&mut self, op: BinOp, left: &Expr, right: &Expr) -> Option<Constant> { let l = self.expr(left)?; let r = self.expr(right); match (l, r) { (Constant::Int(l), Some(Constant::Int(r))) => match self.tables.expr_ty(left).sty { ty::Int(ity) => { let l = sext(self.tcx, l, ity); let r = sext(self.tcx, r, ity); let zext = |n: i128| Constant::Int(unsext(self.tcx, n, ity)); match op.node { BinOpKind::Add => l.checked_add(r).map(zext), BinOpKind::Sub => l.checked_sub(r).map(zext), BinOpKind::Mul => l.checked_mul(r).map(zext), BinOpKind::Div if r != 0 => l.checked_div(r).map(zext), BinOpKind::Rem if r != 0 => l.checked_rem(r).map(zext), BinOpKind::Shr => l.checked_shr(r.try_into().expect("invalid shift")).map(zext), BinOpKind::Shl => l.checked_shl(r.try_into().expect("invalid shift")).map(zext), BinOpKind::BitXor => Some(zext(l ^ r)), BinOpKind::BitOr => Some(zext(l | r)), BinOpKind::BitAnd => Some(zext(l & r)), BinOpKind::Eq => Some(Constant::Bool(l == r)), BinOpKind::Ne => Some(Constant::Bool(l != r)), BinOpKind::Lt => Some(Constant::Bool(l < r)), BinOpKind::Le => Some(Constant::Bool(l <= r)), BinOpKind::Ge => Some(Constant::Bool(l >= r)), BinOpKind::Gt => Some(Constant::Bool(l > r)), _ => None, } }, ty::Uint(_) => match op.node { BinOpKind::Add => l.checked_add(r).map(Constant::Int), BinOpKind::Sub => l.checked_sub(r).map(Constant::Int), BinOpKind::Mul => l.checked_mul(r).map(Constant::Int), BinOpKind::Div => l.checked_div(r).map(Constant::Int), BinOpKind::Rem => l.checked_rem(r).map(Constant::Int), BinOpKind::Shr => l.checked_shr(r.try_into().expect("shift too large")).map(Constant::Int), BinOpKind::Shl => l.checked_shl(r.try_into().expect("shift too large")).map(Constant::Int), BinOpKind::BitXor => Some(Constant::Int(l ^ r)), BinOpKind::BitOr => Some(Constant::Int(l | r)), BinOpKind::BitAnd => Some(Constant::Int(l & r)), BinOpKind::Eq => Some(Constant::Bool(l == r)), BinOpKind::Ne => Some(Constant::Bool(l != r)), BinOpKind::Lt => Some(Constant::Bool(l < r)), BinOpKind::Le => Some(Constant::Bool(l <= r)), BinOpKind::Ge => Some(Constant::Bool(l >= r)), BinOpKind::Gt => Some(Constant::Bool(l > r)), _ => None, }, _ => None, }, (Constant::F32(l), Some(Constant::F32(r))) => match op.node { BinOpKind::Add => Some(Constant::F32(l + r)), BinOpKind::Sub => Some(Constant::F32(l - r)), BinOpKind::Mul => Some(Constant::F32(l * r)), BinOpKind::Div => Some(Constant::F32(l / r)), BinOpKind::Rem => Some(Constant::F32(l % r)), BinOpKind::Eq => Some(Constant::Bool(l == r)), BinOpKind::Ne => Some(Constant::Bool(l != r)), BinOpKind::Lt => Some(Constant::Bool(l < r)), BinOpKind::Le => Some(Constant::Bool(l <= r)), BinOpKind::Ge => Some(Constant::Bool(l >= r)), BinOpKind::Gt => Some(Constant::Bool(l > r)), _ => None, }, (Constant::F64(l), Some(Constant::F64(r))) => match op.node { BinOpKind::Add => Some(Constant::F64(l + r)), BinOpKind::Sub => Some(Constant::F64(l - r)), BinOpKind::Mul => Some(Constant::F64(l * r)), BinOpKind::Div => Some(Constant::F64(l / r)), BinOpKind::Rem => Some(Constant::F64(l % r)), BinOpKind::Eq => Some(Constant::Bool(l == r)), BinOpKind::Ne => Some(Constant::Bool(l != r)), BinOpKind::Lt => Some(Constant::Bool(l < r)), BinOpKind::Le => Some(Constant::Bool(l <= r)), BinOpKind::Ge => Some(Constant::Bool(l >= r)), BinOpKind::Gt => Some(Constant::Bool(l > r)), _ => None, }, (l, r) => match (op.node, l, r) { (BinOpKind::And, Constant::Bool(false), _) => Some(Constant::Bool(false)), (BinOpKind::Or, Constant::Bool(true), _) => Some(Constant::Bool(true)), (BinOpKind::And, Constant::Bool(true), Some(r)) | (BinOpKind::Or, Constant::Bool(false), Some(r)) => { Some(r) }, (BinOpKind::BitXor, Constant::Bool(l), Some(Constant::Bool(r))) => Some(Constant::Bool(l ^ r)), (BinOpKind::BitAnd, Constant::Bool(l), Some(Constant::Bool(r))) => Some(Constant::Bool(l & r)), (BinOpKind::BitOr, Constant::Bool(l), Some(Constant::Bool(r))) => Some(Constant::Bool(l | r)), _ => None, }, } } } pub fn miri_to_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, result: &ty::Const<'tcx>) -> Option<Constant> { use crate::rustc::mir::interpret::{ConstValue, Scalar}; match result.val { ConstValue::Scalar(Scalar::Bits { bits: b, .. }) => match result.ty.sty { ty::Bool => Some(Constant::Bool(b == 1)), ty::Uint(_) | ty::Int(_) => Some(Constant::Int(b)), ty::Float(FloatTy::F32) => Some(Constant::F32(f32::from_bits( b.try_into().expect("invalid f32 bit representation"), ))), ty::Float(FloatTy::F64) => Some(Constant::F64(f64::from_bits( b.try_into().expect("invalid f64 bit representation"), ))), // FIXME: implement other conversion _ => None, }, ConstValue::ScalarPair(Scalar::Ptr(ptr), Scalar::Bits { bits: n, .. }) => match result.ty.sty { ty::Ref(_, tam, _) => match tam.sty { ty::Str => { let alloc = tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id); let offset = ptr.offset.bytes().try_into().expect("too-large pointer offset"); let n = n as usize; String::from_utf8(alloc.bytes[offset..(offset + n)].to_owned()) .ok() .map(Constant::Str) }, _ => None, }, _ => None, }, // FIXME: implement other conversions _ => None, } }
42.201717
118
0.489322
fe3a8954a8301a872405fec70d6973596591ca38
1,569
use std::path::{Path, PathBuf}; /// Copy the content of the elm/ dir into /// ~/.elm/0.19.1/packages/mpizenberg/elm-test-runner/1.0.0/ fn main() { // todo!(); println!("Hello from build.rs"); let mut copy_options = fs_extra::dir::CopyOptions::new(); copy_options.content_only = true; let installed_dir = elm_home() .join("0.19.1") .join("packages") .join("mpizenberg") .join("elm-test-runner") .join("4.0.6"); let elm_stuff = Path::new("elm").join("elm-stuff"); std::fs::remove_dir_all(&elm_stuff) .unwrap_or_else(|_| println!("Error removing elm/elm-stuff")); std::fs::remove_dir_all(&installed_dir) .unwrap_or_else(|_| println!("Error removing elm-test-runner package in ~/.elm/")); std::fs::create_dir_all(&installed_dir) .unwrap_or_else(|_| println!("Error creating elm-test-runner package dir in ~/.elm/")); fs_extra::dir::copy("elm", &installed_dir, &copy_options).unwrap_or_else(|_| { println!("Error copying elm-test-runner package in ~/.elm/"); 0 }); } pub fn elm_home() -> PathBuf { match std::env::var_os("ELM_HOME") { None => default_elm_home(), Some(os_string) => os_string.into(), } } #[cfg(target_family = "unix")] fn default_elm_home() -> PathBuf { dirs_next::home_dir() .expect("Unknown home directory") .join(".elm") } #[cfg(target_family = "windows")] fn default_elm_home() -> PathBuf { dirs_next::data_dir() .expect("Unknown data directory") .join("elm") }
32.020408
95
0.611217
0e0f6346b29974c14b723c3a4d651433b720c325
2,437
use super::*; use crate::mgfw::log; #[derive(Debug, Copy, Clone)] pub struct Color { pub r: f32, pub g: f32, pub b: f32, pub a: f32, } struct Color16 { pub r: i16, pub g: i16, pub b: i16, pub a: i16, } pub struct ColorComponentManager { data: *mut Color16, // WARNING: Anything below this line is not in cache! } const COLOR_SCALE: i16 = 255; const COLOR_SCALE_F: f32 = 1.0 / COLOR_SCALE as f32; #[allow(dead_code)] impl ColorComponentManager { pub fn new(mgr: &mut CacheManager) -> ColorComponentManager { log(format!("Constructing ColorComponentManager")); let sz_bytes = std::mem::size_of::<Color16>() * ENTITY_SZ; let data = mgr.allocate(sz_bytes) as *mut Color16; // default init colors to opaque white for i in 0..ENTITY_SZ { let p = unsafe { &mut *(data.offset(i as isize)) }; p.r = COLOR_SCALE; p.g = COLOR_SCALE; p.b = COLOR_SCALE; p.a = COLOR_SCALE; } ColorComponentManager { data } } pub fn set_color(&self, idx: usize, color: Color) { self.set_color_rgba(idx, color.r, color.g, color.b, color.a); } pub fn set_color_rgba(&self, idx: usize, r: f32, g: f32, b: f32, a: f32) { let clr = self.get_data_ref_mut(idx); clr.r = (r / COLOR_SCALE_F) as i16; clr.g = (g / COLOR_SCALE_F) as i16; clr.b = (b / COLOR_SCALE_F) as i16; clr.a = (a / COLOR_SCALE_F) as i16; } pub fn get_color(&self, idx: usize) -> Color { let clr = self.get_data_ref(idx); convert(clr) } pub fn set_alpha(&self, idx: usize, alpha: f32) { let clr = self.get_data_ref_mut(idx); clr.a = (alpha / COLOR_SCALE_F) as i16; } pub fn get_alpha(&self, idx: usize) -> f32 { self.get_data_ref(idx).a as f32 * COLOR_SCALE_F } fn get_data_ref_mut(&self, idx: usize) -> &mut Color16 { assert!(idx < ENTITY_SZ); unsafe { &mut *(self.data.offset(idx as isize)) } } fn get_data_ref(&self, idx: usize) -> &Color16 { assert!(idx < ENTITY_SZ); unsafe { &*(self.data.offset(idx as isize)) } } } fn convert(color: &Color16) -> Color { Color { r: color.r as f32 * COLOR_SCALE_F, g: color.g as f32 * COLOR_SCALE_F, b: color.b as f32 * COLOR_SCALE_F, a: color.a as f32 * COLOR_SCALE_F, } }
26.78022
78
0.576939
1c5766c37773f5398b0e706d97c63a3576854280
3,729
extern crate pairlock; use pairlock::{PairLock,TryUpdateError}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use std::ptr; #[test] fn basic() { let r = PairLock::new(1, 0); assert_eq!(r.view(|v| *v ), 1); assert_eq!(r.view(|v| *v ), 1); let mut updater = r.update(); assert_eq!(*updater, 0); *updater = 2; drop(updater); assert_eq!(r.view(|v| *v ), 2); let mut updater = r.try_update().unwrap(); assert_eq!(*updater, 1); *updater = 3; drop(updater); assert_eq!(r.view(|v| *v ), 3); let prev = r.set(4); assert_eq!(prev, 2); } #[test] fn basic_clone() { let pl = PairLock::with_default(vec![1]); assert_eq!(pl.get_clone(), vec![1]); let default = pl.set(vec![2,3]); assert_eq!(default, Vec::default()); assert_eq!(pl.get_clone(), vec![2,3]); } #[test] fn basic_copy() { let pl = PairLock::with_default("one"); assert_eq!(pl.read(), "one"); pl.set("another"); assert_eq!(pl.read(), "another"); } #[test] fn basic_arc() { let pl = PairLock::new_arc(0); assert_eq!(*pl.get(), 0); pl.set(Arc::new(1)); assert_eq!(*pl.get(), 1); } #[test] fn exclusive() { let mut pl = PairLock::new(1, 0); assert_eq!(*pl.get_mut_active(), 1); assert_eq!(*pl.get_mut_inactive(), 0); assert_eq!(pl.get_mut_both(), (&mut 1, &mut 0)); *pl.update() = 2; assert_eq!(*pl.get_mut_active(), 2); assert_eq!(*pl.get_mut_inactive(), 1); assert_eq!(pl.get_mut_both(), (&mut 2, &mut 1)); assert_eq!(pl.into_inner(), (2,1)); } #[test] fn singlethreaded_locking() { let r = PairLock::new((),()); assert!(r.try_update().is_ok()); r.view(|_| r.view(|_| assert!(r.try_update().is_ok()) ) ); r.view(|_| { assert!(r.try_update().is_ok()); assert_eq!(r.try_update(), Err(TryUpdateError::InactiveReads)); }); r.view(|_| { let _u = r.update(); assert_eq!(r.try_update(), Err(TryUpdateError::OtherUpdate)); }); } #[test] fn drop_runs() { static DROPS: AtomicUsize = AtomicUsize::new(0); struct Foo; impl Drop for Foo { fn drop(&mut self) { DROPS.fetch_add(1, Ordering::SeqCst); } } // ... once when both slots point to the same arc drop(PairLock::with_clone(Arc::new(Foo))); assert_eq!(DROPS.load(Ordering::SeqCst), 1); // ... twice when pointing to two different drop(PairLock::new(Arc::new(Foo), Arc::new(Foo))); assert_eq!(DROPS.load(Ordering::SeqCst), 3); // ... when the last reference drops let pl = PairLock::new_arc(Foo); let a = pl.get(); pl.set(Arc::new(Foo)); let b = pl.get(); drop(pl); assert_eq!(DROPS.load(Ordering::SeqCst), 3); drop(a); drop(b); assert_eq!(DROPS.load(Ordering::SeqCst), 5); } #[test] fn debug_fmt() { #[derive(Clone,Copy, Debug)] #[allow(unused)] struct Foo{bar:&'static str} let pl = PairLock::new(Foo{bar:"baz"}, Foo{bar:"quux"}); assert_eq!(format!("{:?}", pl), format!("PairLock({:?}, _)", pl.read())); assert_eq!( format!("{:#?}", pl), "PairLock(\n Foo {\n bar: \"baz\",\n },\n _,\n)" ); } #[test] fn default() { assert_eq!(PairLock::<bool>::default().read(), bool::default()); } #[test] fn pointers() { let t1 = Arc::new(true); let t1_ptr = &*t1 as *const bool; let c = PairLock::with_clone(t1.clone()); assert!(ptr::eq(&*c.get(), t1_ptr)); assert!(ptr::eq(&*c.get(), t1_ptr)); c.set(t1); let t2 = Arc::new(true); let t2_ptr = &*t2 as *const bool; assert!(!ptr::eq(t2_ptr, t1_ptr)); assert!(ptr::eq(&*c.get(), t1_ptr)); c.set(t2); assert!(ptr::eq(&*c.get(), t2_ptr)); }
25.717241
77
0.561813
2363b40e4cc543e92f3cc98619260667e806fce2
12,654
use crate::error::Error; use crate::key; use crate::msgs::enums::{SignatureAlgorithm, SignatureScheme}; use ring::signature::{self, EcdsaKeyPair, Ed25519KeyPair, RsaKeyPair}; use std::convert::TryFrom; use std::sync::Arc; /// An abstract signing key. pub trait SigningKey: Send + Sync { /// Choose a `SignatureScheme` from those offered. /// /// Expresses the choice by returning something that implements `Signer`, /// using the chosen scheme. fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option<Box<dyn Signer>>; /// What kind of key we have. fn algorithm(&self) -> SignatureAlgorithm; } /// A thing that can sign a message. pub trait Signer: Send + Sync { /// Signs `message` using the selected scheme. fn sign(&self, message: &[u8]) -> Result<Vec<u8>, Error>; /// Reveals which scheme will be used when you call `sign()`. fn scheme(&self) -> SignatureScheme; } /// A packaged-together certificate chain, matching `SigningKey` and /// optional stapled OCSP response and/or SCT list. #[derive(Clone)] pub struct CertifiedKey { /// The certificate chain. pub cert: Vec<key::Certificate>, /// The certified key. pub key: Arc<dyn SigningKey>, /// An optional OCSP response from the certificate issuer, /// attesting to its continued validity. pub ocsp: Option<Vec<u8>>, /// An optional collection of SCTs from CT logs, proving the /// certificate is included on those logs. This must be /// a `SignedCertificateTimestampList` encoding; see RFC6962. pub sct_list: Option<Vec<u8>>, } impl CertifiedKey { /// Make a new CertifiedKey, with the given chain and key. /// /// The cert chain must not be empty. The first certificate in the chain /// must be the end-entity certificate. pub fn new(cert: Vec<key::Certificate>, key: Arc<dyn SigningKey>) -> Self { Self { cert, key, ocsp: None, sct_list: None, } } /// The end-entity certificate. pub fn end_entity_cert(&self) -> Result<&key::Certificate, SignError> { self.cert.get(0).ok_or(SignError(())) } /// Check the certificate chain for validity: /// - it should be non-empty list /// - the first certificate should be parsable as a x509v3, /// - the first certificate should quote the given server name /// (if provided) /// /// These checks are not security-sensitive. They are the /// *server* attempting to detect accidental misconfiguration. pub(crate) fn cross_check_end_entity_cert( &self, name: Option<webpki::DnsNameRef>, ) -> Result<(), Error> { // Always reject an empty certificate chain. let end_entity_cert = self .end_entity_cert() .map_err(|SignError(())| { Error::General("No end-entity certificate in certificate chain".to_string()) })?; // Reject syntactically-invalid end-entity certificates. let end_entity_cert = webpki::EndEntityCert::try_from(end_entity_cert.as_ref()).map_err(|_| { Error::General( "End-entity certificate in certificate \ chain is syntactically invalid" .to_string(), ) })?; if let Some(name) = name { // If SNI was offered then the certificate must be valid for // that hostname. Note that this doesn't fully validate that the // certificate is valid; it only validates that the name is one // that the certificate is valid for, if the certificate is // valid. if end_entity_cert .verify_is_valid_for_dns_name(name) .is_err() { return Err(Error::General( "The server certificate is not \ valid for the given name" .to_string(), )); } } Ok(()) } } /// Parse `der` as any supported key encoding/type, returning /// the first which works. pub fn any_supported_type(der: &key::PrivateKey) -> Result<Arc<dyn SigningKey>, SignError> { if let Ok(rsa) = RsaSigningKey::new(der) { Ok(Arc::new(rsa)) } else if let Ok(ecdsa) = any_ecdsa_type(der) { Ok(ecdsa) } else { any_eddsa_type(der) } } /// Parse `der` as any ECDSA key type, returning the first which works. pub fn any_ecdsa_type(der: &key::PrivateKey) -> Result<Arc<dyn SigningKey>, SignError> { if let Ok(ecdsa_p256) = EcdsaSigningKey::new( der, SignatureScheme::ECDSA_NISTP256_SHA256, &signature::ECDSA_P256_SHA256_ASN1_SIGNING, ) { return Ok(Arc::new(ecdsa_p256)); } if let Ok(ecdsa_p384) = EcdsaSigningKey::new( der, SignatureScheme::ECDSA_NISTP384_SHA384, &signature::ECDSA_P384_SHA384_ASN1_SIGNING, ) { return Ok(Arc::new(ecdsa_p384)); } Err(SignError(())) } /// Parse `der` as any EdDSA key type, returning the first which works. pub fn any_eddsa_type(der: &key::PrivateKey) -> Result<Arc<dyn SigningKey>, SignError> { if let Ok(ed25519) = Ed25519SigningKey::new(der, SignatureScheme::ED25519) { return Ok(Arc::new(ed25519)); } // TODO: Add support for Ed448 Err(SignError(())) } /// A `SigningKey` for RSA-PKCS1 or RSA-PSS pub struct RsaSigningKey { key: Arc<RsaKeyPair>, } static ALL_RSA_SCHEMES: &[SignatureScheme] = &[ SignatureScheme::RSA_PSS_SHA512, SignatureScheme::RSA_PSS_SHA384, SignatureScheme::RSA_PSS_SHA256, SignatureScheme::RSA_PKCS1_SHA512, SignatureScheme::RSA_PKCS1_SHA384, SignatureScheme::RSA_PKCS1_SHA256, ]; impl RsaSigningKey { /// Make a new `RSASigningKey` from a DER encoding, in either /// PKCS#1 or PKCS#8 format. pub fn new(der: &key::PrivateKey) -> Result<Self, SignError> { RsaKeyPair::from_der(&der.0) .or_else(|_| RsaKeyPair::from_pkcs8(&der.0)) .map(|s| Self { key: Arc::new(s) }) .map_err(|_| SignError(())) } } impl SigningKey for RsaSigningKey { fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option<Box<dyn Signer>> { ALL_RSA_SCHEMES .iter() .find(|scheme| offered.contains(scheme)) .map(|scheme| RsaSigner::new(Arc::clone(&self.key), *scheme)) } fn algorithm(&self) -> SignatureAlgorithm { SignatureAlgorithm::RSA } } #[allow(clippy::upper_case_acronyms)] #[doc(hidden)] #[deprecated(since = "0.20.0", note = "Use RsaSigningKey")] pub type RSASigningKey = RsaSigningKey; struct RsaSigner { key: Arc<RsaKeyPair>, scheme: SignatureScheme, encoding: &'static dyn signature::RsaEncoding, } impl RsaSigner { fn new(key: Arc<RsaKeyPair>, scheme: SignatureScheme) -> Box<dyn Signer> { let encoding: &dyn signature::RsaEncoding = match scheme { SignatureScheme::RSA_PKCS1_SHA256 => &signature::RSA_PKCS1_SHA256, SignatureScheme::RSA_PKCS1_SHA384 => &signature::RSA_PKCS1_SHA384, SignatureScheme::RSA_PKCS1_SHA512 => &signature::RSA_PKCS1_SHA512, SignatureScheme::RSA_PSS_SHA256 => &signature::RSA_PSS_SHA256, SignatureScheme::RSA_PSS_SHA384 => &signature::RSA_PSS_SHA384, SignatureScheme::RSA_PSS_SHA512 => &signature::RSA_PSS_SHA512, _ => unreachable!(), }; Box::new(Self { key, scheme, encoding, }) } } impl Signer for RsaSigner { fn sign(&self, message: &[u8]) -> Result<Vec<u8>, Error> { let mut sig = vec![0; self.key.public_modulus_len()]; let rng = ring::rand::SystemRandom::new(); self.key .sign(self.encoding, &rng, message, &mut sig) .map(|_| sig) .map_err(|_| Error::General("signing failed".to_string())) } fn scheme(&self) -> SignatureScheme { self.scheme } } /// A SigningKey that uses exactly one TLS-level SignatureScheme /// and one ring-level signature::SigningAlgorithm. /// /// Compare this to RSASigningKey, which for a particular key is /// willing to sign with several algorithms. This is quite poor /// cryptography practice, but is necessary because a given RSA key /// is expected to work in TLS1.2 (PKCS#1 signatures) and TLS1.3 /// (PSS signatures) -- nobody is willing to obtain certificates for /// different protocol versions. /// /// Currently this is only implemented for ECDSA keys. struct EcdsaSigningKey { key: Arc<EcdsaKeyPair>, scheme: SignatureScheme, } impl EcdsaSigningKey { /// Make a new `ECDSASigningKey` from a DER encoding in PKCS#8 format, /// expecting a key usable with precisely the given signature scheme. fn new( der: &key::PrivateKey, scheme: SignatureScheme, sigalg: &'static signature::EcdsaSigningAlgorithm, ) -> Result<Self, ()> { EcdsaKeyPair::from_pkcs8(sigalg, &der.0) .map(|kp| Self { key: Arc::new(kp), scheme, }) .map_err(|_| ()) } } impl SigningKey for EcdsaSigningKey { fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option<Box<dyn Signer>> { if offered.contains(&self.scheme) { Some(Box::new(EcdsaSigner { key: Arc::clone(&self.key), scheme: self.scheme, })) } else { None } } fn algorithm(&self) -> SignatureAlgorithm { use crate::msgs::handshake::DecomposedSignatureScheme; self.scheme.sign() } } struct EcdsaSigner { key: Arc<EcdsaKeyPair>, scheme: SignatureScheme, } impl Signer for EcdsaSigner { fn sign(&self, message: &[u8]) -> Result<Vec<u8>, Error> { let rng = ring::rand::SystemRandom::new(); self.key .sign(&rng, message) .map_err(|_| Error::General("signing failed".into())) .map(|sig| sig.as_ref().into()) } fn scheme(&self) -> SignatureScheme { self.scheme } } /// A SigningKey that uses exactly one TLS-level SignatureScheme /// and one ring-level signature::SigningAlgorithm. /// /// Compare this to RSASigningKey, which for a particular key is /// willing to sign with several algorithms. This is quite poor /// cryptography practice, but is necessary because a given RSA key /// is expected to work in TLS1.2 (PKCS#1 signatures) and TLS1.3 /// (PSS signatures) -- nobody is willing to obtain certificates for /// different protocol versions. /// /// Currently this is only implemented for Ed25519 keys. struct Ed25519SigningKey { key: Arc<Ed25519KeyPair>, scheme: SignatureScheme, } impl Ed25519SigningKey { /// Make a new `Ed25519SigningKey` from a DER encoding in PKCS#8 format, /// expecting a key usable with precisely the given signature scheme. fn new(der: &key::PrivateKey, scheme: SignatureScheme) -> Result<Self, SignError> { Ed25519KeyPair::from_pkcs8_maybe_unchecked(&der.0) .map(|kp| Self { key: Arc::new(kp), scheme, }) .map_err(|_| SignError(())) } } impl SigningKey for Ed25519SigningKey { fn choose_scheme(&self, offered: &[SignatureScheme]) -> Option<Box<dyn Signer>> { if offered.contains(&self.scheme) { Some(Box::new(Ed25519Signer { key: Arc::clone(&self.key), scheme: self.scheme, })) } else { None } } fn algorithm(&self) -> SignatureAlgorithm { use crate::msgs::handshake::DecomposedSignatureScheme; self.scheme.sign() } } struct Ed25519Signer { key: Arc<Ed25519KeyPair>, scheme: SignatureScheme, } impl Signer for Ed25519Signer { fn sign(&self, message: &[u8]) -> Result<Vec<u8>, Error> { Ok(self.key.sign(message).as_ref().into()) } fn scheme(&self) -> SignatureScheme { self.scheme } } /// The set of schemes we support for signatures and /// that are allowed for TLS1.3. pub fn supported_sign_tls13() -> &'static [SignatureScheme] { &[ SignatureScheme::ECDSA_NISTP384_SHA384, SignatureScheme::ECDSA_NISTP256_SHA256, SignatureScheme::RSA_PSS_SHA512, SignatureScheme::RSA_PSS_SHA384, SignatureScheme::RSA_PSS_SHA256, SignatureScheme::ED25519, ] } /// Errors while signing #[derive(Debug)] pub struct SignError(());
31.79397
92
0.617038
79e3d8d884875599e1730e6f88821d71c886651e
18,927
//! Utilities for building HTTP endpoints in a library-agnostic manner pub mod graphiql; pub mod playground; use serde::{ de, ser::{self, SerializeMap}, Deserialize, Serialize, }; use crate::{ ast::InputValue, executor::{ExecutionError, ValuesStream}, value::{DefaultScalarValue, ScalarValue}, FieldError, GraphQLError, GraphQLSubscriptionType, GraphQLType, GraphQLTypeAsync, RootNode, Value, Variables, }; /// The expected structure of the decoded JSON document for either POST or GET requests. /// /// For POST, you can use Serde to deserialize the incoming JSON data directly /// into this struct - it derives Deserialize for exactly this reason. /// /// For GET, you will need to parse the query string and extract "query", /// "operationName", and "variables" manually. #[derive(Deserialize, Clone, Serialize, PartialEq, Debug)] pub struct GraphQLRequest<S = DefaultScalarValue> where S: ScalarValue, { query: String, #[serde(rename = "operationName")] operation_name: Option<String>, #[serde(bound(deserialize = "InputValue<S>: Deserialize<'de> + Serialize"))] variables: Option<InputValue<S>>, } impl<S> GraphQLRequest<S> where S: ScalarValue, { /// Returns the `operation_name` associated with this request. pub fn operation_name(&self) -> Option<&str> { self.operation_name.as_ref().map(|oper_name| &**oper_name) } fn variables(&self) -> Variables<S> { self.variables .as_ref() .and_then(|iv| { iv.to_object_value().map(|o| { o.into_iter() .map(|(k, v)| (k.to_owned(), v.clone())) .collect() }) }) .unwrap_or_default() } /// Construct a new GraphQL request from parts pub fn new( query: String, operation_name: Option<String>, variables: Option<InputValue<S>>, ) -> Self { GraphQLRequest { query, operation_name, variables, } } /// Execute a GraphQL request synchronously using the specified schema and context /// /// This is a simple wrapper around the `execute_sync` function exposed at the /// top level of this crate. pub fn execute_sync<'a, CtxT, QueryT, MutationT, SubscriptionT>( &'a self, root_node: &'a RootNode<QueryT, MutationT, SubscriptionT, S>, context: &CtxT, ) -> GraphQLResponse<'a, S> where S: ScalarValue, QueryT: GraphQLType<S, Context = CtxT>, MutationT: GraphQLType<S, Context = CtxT>, SubscriptionT: GraphQLType<S, Context = CtxT>, { GraphQLResponse(crate::execute_sync( &self.query, self.operation_name(), root_node, &self.variables(), context, )) } /// Execute a GraphQL request using the specified schema and context /// /// This is a simple wrapper around the `execute` function exposed at the /// top level of this crate. pub async fn execute<'a, CtxT, QueryT, MutationT, SubscriptionT>( &'a self, root_node: &'a RootNode<'a, QueryT, MutationT, SubscriptionT, S>, context: &'a CtxT, ) -> GraphQLResponse<'a, S> where S: ScalarValue + Send + Sync, QueryT: crate::GraphQLTypeAsync<S, Context = CtxT> + Send + Sync, QueryT::TypeInfo: Send + Sync, MutationT: crate::GraphQLTypeAsync<S, Context = CtxT> + Send + Sync, MutationT::TypeInfo: Send + Sync, SubscriptionT: GraphQLType<S, Context = CtxT> + Send + Sync, SubscriptionT::TypeInfo: Send + Sync, CtxT: Send + Sync, { let op = self.operation_name(); let vars = &self.variables(); let res = crate::execute(&self.query, op, root_node, vars, context).await; GraphQLResponse(res) } } /// Resolve a GraphQL subscription into `Value<ValuesStream<S>` using the /// specified schema and context. /// This is a wrapper around the `resolve_into_stream` function exposed at the top /// level of this crate. pub async fn resolve_into_stream<'req, 'rn, 'ctx, 'a, CtxT, QueryT, MutationT, SubscriptionT, S>( req: &'req GraphQLRequest<S>, root_node: &'rn RootNode<'a, QueryT, MutationT, SubscriptionT, S>, context: &'ctx CtxT, ) -> Result<(Value<ValuesStream<'a, S>>, Vec<ExecutionError<S>>), GraphQLError<'a>> where 'req: 'a, 'rn: 'a, 'ctx: 'a, S: ScalarValue + Send + Sync + 'static, QueryT: GraphQLTypeAsync<S, Context = CtxT> + Send + Sync, QueryT::TypeInfo: Send + Sync, MutationT: GraphQLTypeAsync<S, Context = CtxT> + Send + Sync, MutationT::TypeInfo: Send + Sync, SubscriptionT: GraphQLSubscriptionType<S, Context = CtxT> + Send + Sync, SubscriptionT::TypeInfo: Send + Sync, CtxT: Send + Sync, { let op = req.operation_name(); let vars = req.variables(); crate::resolve_into_stream(&req.query, op, root_node, &vars, context).await } /// Simple wrapper around the result from executing a GraphQL query /// /// This struct implements Serialize, so you can simply serialize this /// to JSON and send it over the wire. Use the `is_ok` method to determine /// whether to send a 200 or 400 HTTP status code. #[derive(Debug)] pub struct GraphQLResponse<'a, S = DefaultScalarValue>( Result<(Value<S>, Vec<ExecutionError<S>>), GraphQLError<'a>>, ); impl<'a, S> GraphQLResponse<'a, S> where S: ScalarValue, { /// Constructs new `GraphQLResponse` using the given result pub fn from_result(r: Result<(Value<S>, Vec<ExecutionError<S>>), GraphQLError<'a>>) -> Self { Self(r) } /// Constructs an error response outside of the normal execution flow pub fn error(error: FieldError<S>) -> Self { GraphQLResponse(Ok((Value::null(), vec![ExecutionError::at_origin(error)]))) } /// Was the request successful or not? /// /// Note that there still might be errors in the response even though it's /// considered OK. This is by design in GraphQL. pub fn is_ok(&self) -> bool { self.0.is_ok() } } impl<'a, T> Serialize for GraphQLResponse<'a, T> where T: Serialize + ScalarValue, Value<T>: Serialize, ExecutionError<T>: Serialize, GraphQLError<'a>: Serialize, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: ser::Serializer, { match self.0 { Ok((ref res, ref err)) => { let mut map = serializer.serialize_map(None)?; map.serialize_key("data")?; map.serialize_value(res)?; if !err.is_empty() { map.serialize_key("errors")?; map.serialize_value(err)?; } map.end() } Err(ref err) => { let mut map = serializer.serialize_map(Some(1))?; map.serialize_key("errors")?; map.serialize_value(err)?; map.end() } } } } /// Simple wrapper around GraphQLRequest to allow the handling of Batch requests. #[derive(Debug, Deserialize, PartialEq)] #[serde(untagged)] #[serde(bound = "InputValue<S>: Deserialize<'de>")] pub enum GraphQLBatchRequest<S = DefaultScalarValue> where S: ScalarValue, { /// A single operation request. Single(GraphQLRequest<S>), /// A batch operation request. /// /// Empty batch is considered as invalid value, so cannot be deserialized. #[serde(deserialize_with = "deserialize_non_empty_vec")] Batch(Vec<GraphQLRequest<S>>), } fn deserialize_non_empty_vec<'de, D, T>(deserializer: D) -> Result<Vec<T>, D::Error> where D: de::Deserializer<'de>, T: Deserialize<'de>, { use de::Error as _; let v = Vec::<T>::deserialize(deserializer)?; if v.is_empty() { Err(D::Error::invalid_length(0, &"a positive integer")) } else { Ok(v) } } impl<S> GraphQLBatchRequest<S> where S: ScalarValue, { /// Execute a GraphQL batch request synchronously using the specified schema and context /// /// This is a simple wrapper around the `execute_sync` function exposed in GraphQLRequest. pub fn execute_sync<'a, CtxT, QueryT, MutationT, SubscriptionT>( &'a self, root_node: &'a crate::RootNode<QueryT, MutationT, SubscriptionT, S>, context: &CtxT, ) -> GraphQLBatchResponse<'a, S> where QueryT: crate::GraphQLType<S, Context = CtxT>, MutationT: crate::GraphQLType<S, Context = CtxT>, SubscriptionT: crate::GraphQLType<S, Context = CtxT>, { match *self { Self::Single(ref req) => { GraphQLBatchResponse::Single(req.execute_sync(root_node, context)) } Self::Batch(ref reqs) => GraphQLBatchResponse::Batch( reqs.iter() .map(|req| req.execute_sync(root_node, context)) .collect(), ), } } /// Executes a GraphQL request using the specified schema and context /// /// This is a simple wrapper around the `execute` function exposed in /// GraphQLRequest pub async fn execute<'a, CtxT, QueryT, MutationT, SubscriptionT>( &'a self, root_node: &'a crate::RootNode<'a, QueryT, MutationT, SubscriptionT, S>, context: &'a CtxT, ) -> GraphQLBatchResponse<'a, S> where QueryT: crate::GraphQLTypeAsync<S, Context = CtxT> + Send + Sync, QueryT::TypeInfo: Send + Sync, MutationT: crate::GraphQLTypeAsync<S, Context = CtxT> + Send + Sync, MutationT::TypeInfo: Send + Sync, SubscriptionT: crate::GraphQLSubscriptionType<S, Context = CtxT> + Send + Sync, SubscriptionT::TypeInfo: Send + Sync, CtxT: Send + Sync, S: Send + Sync, { match *self { Self::Single(ref req) => { let resp = req.execute(root_node, context).await; GraphQLBatchResponse::Single(resp) } Self::Batch(ref reqs) => { let resps = futures::future::join_all( reqs.iter().map(|req| req.execute(root_node, context)), ) .await; GraphQLBatchResponse::Batch(resps) } } } /// The operation names of the request. pub fn operation_names(&self) -> Vec<Option<&str>> { match self { Self::Single(req) => vec![req.operation_name()], Self::Batch(reqs) => reqs.iter().map(|req| req.operation_name()).collect(), } } } /// Simple wrapper around the result (GraphQLResponse) from executing a GraphQLBatchRequest /// /// This struct implements Serialize, so you can simply serialize this /// to JSON and send it over the wire. use the `is_ok` to determine /// wheter to send a 200 or 400 HTTP status code. #[derive(Serialize)] #[serde(untagged)] pub enum GraphQLBatchResponse<'a, S = DefaultScalarValue> where S: ScalarValue, { /// Result of a single operation in a GraphQL request. Single(GraphQLResponse<'a, S>), /// Result of a batch operation in a GraphQL request. Batch(Vec<GraphQLResponse<'a, S>>), } impl<'a, S> GraphQLBatchResponse<'a, S> where S: ScalarValue, { /// Returns if all the GraphQLResponse in this operation are ok, /// you can use it to determine wheter to send a 200 or 400 HTTP status code. pub fn is_ok(&self) -> bool { match self { Self::Single(resp) => resp.is_ok(), Self::Batch(resps) => resps.iter().all(GraphQLResponse::is_ok), } } } #[cfg(any(test, feature = "expose-test-schema"))] #[allow(missing_docs)] pub mod tests { use serde_json::{self, Value as Json}; /// Normalized response content we expect to get back from /// the http framework integration we are testing. #[derive(Debug)] pub struct TestResponse { pub status_code: i32, pub body: Option<String>, pub content_type: String, } /// Normalized way to make requests to the http framework /// integration we are testing. pub trait HTTPIntegration { fn get(&self, url: &str) -> TestResponse; fn post(&self, url: &str, body: &str) -> TestResponse; } #[allow(missing_docs)] pub fn run_http_test_suite<T: HTTPIntegration>(integration: &T) { println!("Running HTTP Test suite for integration"); println!(" - test_simple_get"); test_simple_get(integration); println!(" - test_encoded_get"); test_encoded_get(integration); println!(" - test_get_with_variables"); test_get_with_variables(integration); println!(" - test_simple_post"); test_simple_post(integration); println!(" - test_batched_post"); test_batched_post(integration); println!(" - test_empty_batched_post"); test_empty_batched_post(integration); println!(" - test_invalid_json"); test_invalid_json(integration); println!(" - test_invalid_field"); test_invalid_field(integration); println!(" - test_duplicate_keys"); test_duplicate_keys(integration); } fn unwrap_json_response(response: &TestResponse) -> Json { serde_json::from_str::<Json>( response .body .as_ref() .expect("No data returned from request"), ) .expect("Could not parse JSON object") } fn test_simple_get<T: HTTPIntegration>(integration: &T) { // {hero{name}} let response = integration.get("/?query=%7Bhero%7Bname%7D%7D"); assert_eq!(response.status_code, 200); assert_eq!(response.content_type.as_str(), "application/json"); assert_eq!( unwrap_json_response(&response), serde_json::from_str::<Json>(r#"{"data": {"hero": {"name": "R2-D2"}}}"#) .expect("Invalid JSON constant in test") ); } fn test_encoded_get<T: HTTPIntegration>(integration: &T) { // query { human(id: "1000") { id, name, appearsIn, homePlanet } } let response = integration.get( "/?query=query%20%7B%20human(id%3A%20%221000%22)%20%7B%20id%2C%20name%2C%20appearsIn%2C%20homePlanet%20%7D%20%7D"); assert_eq!(response.status_code, 200); assert_eq!(response.content_type.as_str(), "application/json"); assert_eq!( unwrap_json_response(&response), serde_json::from_str::<Json>( r#"{ "data": { "human": { "appearsIn": [ "NEW_HOPE", "EMPIRE", "JEDI" ], "homePlanet": "Tatooine", "name": "Luke Skywalker", "id": "1000" } } }"# ) .expect("Invalid JSON constant in test") ); } fn test_get_with_variables<T: HTTPIntegration>(integration: &T) { // query($id: String!) { human(id: $id) { id, name, appearsIn, homePlanet } } // with variables = { "id": "1000" } let response = integration.get( "/?query=query(%24id%3A%20String!)%20%7B%20human(id%3A%20%24id)%20%7B%20id%2C%20name%2C%20appearsIn%2C%20homePlanet%20%7D%20%7D&variables=%7B%20%22id%22%3A%20%221000%22%20%7D"); assert_eq!(response.status_code, 200); assert_eq!(response.content_type, "application/json"); assert_eq!( unwrap_json_response(&response), serde_json::from_str::<Json>( r#"{ "data": { "human": { "appearsIn": [ "NEW_HOPE", "EMPIRE", "JEDI" ], "homePlanet": "Tatooine", "name": "Luke Skywalker", "id": "1000" } } }"# ) .expect("Invalid JSON constant in test") ); } fn test_simple_post<T: HTTPIntegration>(integration: &T) { let response = integration.post("/", r#"{"query": "{hero{name}}"}"#); assert_eq!(response.status_code, 200); assert_eq!(response.content_type, "application/json"); assert_eq!( unwrap_json_response(&response), serde_json::from_str::<Json>(r#"{"data": {"hero": {"name": "R2-D2"}}}"#) .expect("Invalid JSON constant in test") ); } fn test_batched_post<T: HTTPIntegration>(integration: &T) { let response = integration.post( "/", r#"[{"query": "{hero{name}}"}, {"query": "{hero{name}}"}]"#, ); assert_eq!(response.status_code, 200); assert_eq!(response.content_type, "application/json"); assert_eq!( unwrap_json_response(&response), serde_json::from_str::<Json>( r#"[{"data": {"hero": {"name": "R2-D2"}}}, {"data": {"hero": {"name": "R2-D2"}}}]"# ) .expect("Invalid JSON constant in test") ); } fn test_empty_batched_post<T: HTTPIntegration>(integration: &T) { let response = integration.post("/", "[]"); assert_eq!(response.status_code, 400); } fn test_invalid_json<T: HTTPIntegration>(integration: &T) { let response = integration.get("/?query=blah"); assert_eq!(response.status_code, 400); let response = integration.post("/", r#"blah"#); assert_eq!(response.status_code, 400); } fn test_invalid_field<T: HTTPIntegration>(integration: &T) { // {hero{blah}} let response = integration.get("/?query=%7Bhero%7Bblah%7D%7D"); assert_eq!(response.status_code, 400); let response = integration.post("/", r#"{"query": "{hero{blah}}"}"#); assert_eq!(response.status_code, 400); } fn test_duplicate_keys<T: HTTPIntegration>(integration: &T) { // {hero{name}} let response = integration.get("/?query=%7B%22query%22%3A%20%22%7Bhero%7Bname%7D%7D%22%2C%20%22query%22%3A%20%22%7Bhero%7Bname%7D%7D%22%7D"); assert_eq!(response.status_code, 400); let response = integration.post( "/", r#" {"query": "{hero{name}}", "query": "{hero{name}}"} "#, ); assert_eq!(response.status_code, 400); } }
33.919355
189
0.573995
cc6f8f2185dd382e314992ffe5ee3a134ce422d6
18,594
// DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY: // // ucd-generate property-values tmp/ucd-11.0.0/ --include gc,script,scx,age // // ucd-generate is available on crates.io. pub const PROPERTY_VALUES: &'static [(&'static str, &'static [(&'static str, &'static str)])] = &[ ("Age", &[("1.1", "V1_1"), ("10.0", "V10_0"), ("11.0", "V11_0"), ("2.0", "V2_0"), ("2.1", "V2_1"), ("3.0", "V3_0"), ("3.1", "V3_1"), ("3.2", "V3_2"), ("4.0", "V4_0"), ("4.1", "V4_1"), ("5.0", "V5_0"), ("5.1", "V5_1"), ("5.2", "V5_2"), ("6.0", "V6_0"), ("6.1", "V6_1"), ("6.2", "V6_2"), ("6.3", "V6_3"), ("7.0", "V7_0"), ("8.0", "V8_0"), ("9.0", "V9_0"), ("na", "Unassigned"), ("unassigned", "Unassigned"), ("v100", "V10_0"), ("v11", "V1_1"), ("v110", "V11_0"), ("v20", "V2_0"), ("v21", "V2_1"), ("v30", "V3_0"), ("v31", "V3_1"), ("v32", "V3_2"), ("v40", "V4_0"), ("v41", "V4_1"), ("v50", "V5_0"), ("v51", "V5_1"), ("v52", "V5_2"), ("v60", "V6_0"), ("v61", "V6_1"), ("v62", "V6_2"), ("v63", "V6_3"), ("v70", "V7_0"), ("v80", "V8_0"), ("v90", "V9_0"), ]), ("General_Category", &[("c", "Other"), ("casedletter", "Cased_Letter"), ("cc", "Control"), ("cf", "Format"), ("closepunctuation", "Close_Punctuation"), ("cn", "Unassigned"), ("cntrl", "Control"), ("co", "Private_Use"), ("combiningmark", "Mark"), ("connectorpunctuation", "Connector_Punctuation"), ("control", "Control"), ("cs", "Surrogate"), ("currencysymbol", "Currency_Symbol"), ("dashpunctuation", "Dash_Punctuation"), ("decimalnumber", "Decimal_Number"), ("digit", "Decimal_Number"), ("enclosingmark", "Enclosing_Mark"), ("finalpunctuation", "Final_Punctuation"), ("format", "Format"), ("initialpunctuation", "Initial_Punctuation"), ("l", "Letter"), ("lc", "Cased_Letter"), ("letter", "Letter"), ("letternumber", "Letter_Number"), ("lineseparator", "Line_Separator"), ("ll", "Lowercase_Letter"), ("lm", "Modifier_Letter"), ("lo", "Other_Letter"), ("lowercaseletter", "Lowercase_Letter"), ("lt", "Titlecase_Letter"), ("lu", "Uppercase_Letter"), ("m", "Mark"), ("mark", "Mark"), ("mathsymbol", "Math_Symbol"), ("mc", "Spacing_Mark"), ("me", "Enclosing_Mark"), ("mn", "Nonspacing_Mark"), ("modifierletter", "Modifier_Letter"), ("modifiersymbol", "Modifier_Symbol"), ("n", "Number"), ("nd", "Decimal_Number"), ("nl", "Letter_Number"), ("no", "Other_Number"), ("nonspacingmark", "Nonspacing_Mark"), ("number", "Number"), ("openpunctuation", "Open_Punctuation"), ("other", "Other"), ("otherletter", "Other_Letter"), ("othernumber", "Other_Number"), ("otherpunctuation", "Other_Punctuation"), ("othersymbol", "Other_Symbol"), ("p", "Punctuation"), ("paragraphseparator", "Paragraph_Separator"), ("pc", "Connector_Punctuation"), ("pd", "Dash_Punctuation"), ("pe", "Close_Punctuation"), ("pf", "Final_Punctuation"), ("pi", "Initial_Punctuation"), ("po", "Other_Punctuation"), ("privateuse", "Private_Use"), ("ps", "Open_Punctuation"), ("punct", "Punctuation"), ("punctuation", "Punctuation"), ("s", "Symbol"), ("sc", "Currency_Symbol"), ("separator", "Separator"), ("sk", "Modifier_Symbol"), ("sm", "Math_Symbol"), ("so", "Other_Symbol"), ("spaceseparator", "Space_Separator"), ("spacingmark", "Spacing_Mark"), ("surrogate", "Surrogate"), ("symbol", "Symbol"), ("titlecaseletter", "Titlecase_Letter"), ("unassigned", "Unassigned"), ("uppercaseletter", "Uppercase_Letter"), ("z", "Separator"), ("zl", "Line_Separator"), ("zp", "Paragraph_Separator"), ("zs", "Space_Separator"), ]), ("Script", &[("adlam", "Adlam"), ("adlm", "Adlam"), ("aghb", "Caucasian_Albanian"), ("ahom", "Ahom"), ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), ("arab", "Arabic"), ("arabic", "Arabic"), ("armenian", "Armenian"), ("armi", "Imperial_Aramaic"), ("armn", "Armenian"), ("avestan", "Avestan"), ("avst", "Avestan"), ("bali", "Balinese"), ("balinese", "Balinese"), ("bamu", "Bamum"), ("bamum", "Bamum"), ("bass", "Bassa_Vah"), ("bassavah", "Bassa_Vah"), ("batak", "Batak"), ("batk", "Batak"), ("beng", "Bengali"), ("bengali", "Bengali"), ("bhaiksuki", "Bhaiksuki"), ("bhks", "Bhaiksuki"), ("bopo", "Bopomofo"), ("bopomofo", "Bopomofo"), ("brah", "Brahmi"), ("brahmi", "Brahmi"), ("brai", "Braille"), ("braille", "Braille"), ("bugi", "Buginese"), ("buginese", "Buginese"), ("buhd", "Buhid"), ("buhid", "Buhid"), ("cakm", "Chakma"), ("canadianaboriginal", "Canadian_Aboriginal"), ("cans", "Canadian_Aboriginal"), ("cari", "Carian"), ("carian", "Carian"), ("caucasianalbanian", "Caucasian_Albanian"), ("chakma", "Chakma"), ("cham", "Cham"), ("cher", "Cherokee"), ("cherokee", "Cherokee"), ("common", "Common"), ("copt", "Coptic"), ("coptic", "Coptic"), ("cprt", "Cypriot"), ("cuneiform", "Cuneiform"), ("cypriot", "Cypriot"), ("cyrillic", "Cyrillic"), ("cyrl", "Cyrillic"), ("deseret", "Deseret"), ("deva", "Devanagari"), ("devanagari", "Devanagari"), ("dogr", "Dogra"), ("dogra", "Dogra"), ("dsrt", "Deseret"), ("dupl", "Duployan"), ("duployan", "Duployan"), ("egyp", "Egyptian_Hieroglyphs"), ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), ("elba", "Elbasan"), ("elbasan", "Elbasan"), ("ethi", "Ethiopic"), ("ethiopic", "Ethiopic"), ("geor", "Georgian"), ("georgian", "Georgian"), ("glag", "Glagolitic"), ("glagolitic", "Glagolitic"), ("gong", "Gunjala_Gondi"), ("gonm", "Masaram_Gondi"), ("goth", "Gothic"), ("gothic", "Gothic"), ("gran", "Grantha"), ("grantha", "Grantha"), ("greek", "Greek"), ("grek", "Greek"), ("gujarati", "Gujarati"), ("gujr", "Gujarati"), ("gunjalagondi", "Gunjala_Gondi"), ("gurmukhi", "Gurmukhi"), ("guru", "Gurmukhi"), ("han", "Han"), ("hang", "Hangul"), ("hangul", "Hangul"), ("hani", "Han"), ("hanifirohingya", "Hanifi_Rohingya"), ("hano", "Hanunoo"), ("hanunoo", "Hanunoo"), ("hatr", "Hatran"), ("hatran", "Hatran"), ("hebr", "Hebrew"), ("hebrew", "Hebrew"), ("hira", "Hiragana"), ("hiragana", "Hiragana"), ("hluw", "Anatolian_Hieroglyphs"), ("hmng", "Pahawh_Hmong"), ("hrkt", "Katakana_Or_Hiragana"), ("hung", "Old_Hungarian"), ("imperialaramaic", "Imperial_Aramaic"), ("inherited", "Inherited"), ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), ("inscriptionalparthian", "Inscriptional_Parthian"), ("ital", "Old_Italic"), ("java", "Javanese"), ("javanese", "Javanese"), ("kaithi", "Kaithi"), ("kali", "Kayah_Li"), ("kana", "Katakana"), ("kannada", "Kannada"), ("katakana", "Katakana"), ("katakanaorhiragana", "Katakana_Or_Hiragana"), ("kayahli", "Kayah_Li"), ("khar", "Kharoshthi"), ("kharoshthi", "Kharoshthi"), ("khmer", "Khmer"), ("khmr", "Khmer"), ("khoj", "Khojki"), ("khojki", "Khojki"), ("khudawadi", "Khudawadi"), ("knda", "Kannada"), ("kthi", "Kaithi"), ("lana", "Tai_Tham"), ("lao", "Lao"), ("laoo", "Lao"), ("latin", "Latin"), ("latn", "Latin"), ("lepc", "Lepcha"), ("lepcha", "Lepcha"), ("limb", "Limbu"), ("limbu", "Limbu"), ("lina", "Linear_A"), ("linb", "Linear_B"), ("lineara", "Linear_A"), ("linearb", "Linear_B"), ("lisu", "Lisu"), ("lyci", "Lycian"), ("lycian", "Lycian"), ("lydi", "Lydian"), ("lydian", "Lydian"), ("mahajani", "Mahajani"), ("mahj", "Mahajani"), ("maka", "Makasar"), ("makasar", "Makasar"), ("malayalam", "Malayalam"), ("mand", "Mandaic"), ("mandaic", "Mandaic"), ("mani", "Manichaean"), ("manichaean", "Manichaean"), ("marc", "Marchen"), ("marchen", "Marchen"), ("masaramgondi", "Masaram_Gondi"), ("medefaidrin", "Medefaidrin"), ("medf", "Medefaidrin"), ("meeteimayek", "Meetei_Mayek"), ("mend", "Mende_Kikakui"), ("mendekikakui", "Mende_Kikakui"), ("merc", "Meroitic_Cursive"), ("mero", "Meroitic_Hieroglyphs"), ("meroiticcursive", "Meroitic_Cursive"), ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), ("miao", "Miao"), ("mlym", "Malayalam"), ("modi", "Modi"), ("mong", "Mongolian"), ("mongolian", "Mongolian"), ("mro", "Mro"), ("mroo", "Mro"), ("mtei", "Meetei_Mayek"), ("mult", "Multani"), ("multani", "Multani"), ("myanmar", "Myanmar"), ("mymr", "Myanmar"), ("nabataean", "Nabataean"), ("narb", "Old_North_Arabian"), ("nbat", "Nabataean"), ("newa", "Newa"), ("newtailue", "New_Tai_Lue"), ("nko", "Nko"), ("nkoo", "Nko"), ("nshu", "Nushu"), ("nushu", "Nushu"), ("ogam", "Ogham"), ("ogham", "Ogham"), ("olchiki", "Ol_Chiki"), ("olck", "Ol_Chiki"), ("oldhungarian", "Old_Hungarian"), ("olditalic", "Old_Italic"), ("oldnortharabian", "Old_North_Arabian"), ("oldpermic", "Old_Permic"), ("oldpersian", "Old_Persian"), ("oldsogdian", "Old_Sogdian"), ("oldsoutharabian", "Old_South_Arabian"), ("oldturkic", "Old_Turkic"), ("oriya", "Oriya"), ("orkh", "Old_Turkic"), ("orya", "Oriya"), ("osage", "Osage"), ("osge", "Osage"), ("osma", "Osmanya"), ("osmanya", "Osmanya"), ("pahawhhmong", "Pahawh_Hmong"), ("palm", "Palmyrene"), ("palmyrene", "Palmyrene"), ("pauc", "Pau_Cin_Hau"), ("paucinhau", "Pau_Cin_Hau"), ("perm", "Old_Permic"), ("phag", "Phags_Pa"), ("phagspa", "Phags_Pa"), ("phli", "Inscriptional_Pahlavi"), ("phlp", "Psalter_Pahlavi"), ("phnx", "Phoenician"), ("phoenician", "Phoenician"), ("plrd", "Miao"), ("prti", "Inscriptional_Parthian"), ("psalterpahlavi", "Psalter_Pahlavi"), ("qaac", "Coptic"), ("qaai", "Inherited"), ("rejang", "Rejang"), ("rjng", "Rejang"), ("rohg", "Hanifi_Rohingya"), ("runic", "Runic"), ("runr", "Runic"), ("samaritan", "Samaritan"), ("samr", "Samaritan"), ("sarb", "Old_South_Arabian"), ("saur", "Saurashtra"), ("saurashtra", "Saurashtra"), ("sgnw", "SignWriting"), ("sharada", "Sharada"), ("shavian", "Shavian"), ("shaw", "Shavian"), ("shrd", "Sharada"), ("sidd", "Siddham"), ("siddham", "Siddham"), ("signwriting", "SignWriting"), ("sind", "Khudawadi"), ("sinh", "Sinhala"), ("sinhala", "Sinhala"), ("sogd", "Sogdian"), ("sogdian", "Sogdian"), ("sogo", "Old_Sogdian"), ("sora", "Sora_Sompeng"), ("sorasompeng", "Sora_Sompeng"), ("soyo", "Soyombo"), ("soyombo", "Soyombo"), ("sund", "Sundanese"), ("sundanese", "Sundanese"), ("sylo", "Syloti_Nagri"), ("sylotinagri", "Syloti_Nagri"), ("syrc", "Syriac"), ("syriac", "Syriac"), ("tagalog", "Tagalog"), ("tagb", "Tagbanwa"), ("tagbanwa", "Tagbanwa"), ("taile", "Tai_Le"), ("taitham", "Tai_Tham"), ("taiviet", "Tai_Viet"), ("takr", "Takri"), ("takri", "Takri"), ("tale", "Tai_Le"), ("talu", "New_Tai_Lue"), ("tamil", "Tamil"), ("taml", "Tamil"), ("tang", "Tangut"), ("tangut", "Tangut"), ("tavt", "Tai_Viet"), ("telu", "Telugu"), ("telugu", "Telugu"), ("tfng", "Tifinagh"), ("tglg", "Tagalog"), ("thaa", "Thaana"), ("thaana", "Thaana"), ("thai", "Thai"), ("tibetan", "Tibetan"), ("tibt", "Tibetan"), ("tifinagh", "Tifinagh"), ("tirh", "Tirhuta"), ("tirhuta", "Tirhuta"), ("ugar", "Ugaritic"), ("ugaritic", "Ugaritic"), ("unknown", "Unknown"), ("vai", "Vai"), ("vaii", "Vai"), ("wara", "Warang_Citi"), ("warangciti", "Warang_Citi"), ("xpeo", "Old_Persian"), ("xsux", "Cuneiform"), ("yi", "Yi"), ("yiii", "Yi"), ("zanabazarsquare", "Zanabazar_Square"), ("zanb", "Zanabazar_Square"), ("zinh", "Inherited"), ("zyyy", "Common"), ("zzzz", "Unknown"), ]), ("Script_Extensions", &[("adlam", "Adlam"), ("adlm", "Adlam"), ("aghb", "Caucasian_Albanian"), ("ahom", "Ahom"), ("anatolianhieroglyphs", "Anatolian_Hieroglyphs"), ("arab", "Arabic"), ("arabic", "Arabic"), ("armenian", "Armenian"), ("armi", "Imperial_Aramaic"), ("armn", "Armenian"), ("avestan", "Avestan"), ("avst", "Avestan"), ("bali", "Balinese"), ("balinese", "Balinese"), ("bamu", "Bamum"), ("bamum", "Bamum"), ("bass", "Bassa_Vah"), ("bassavah", "Bassa_Vah"), ("batak", "Batak"), ("batk", "Batak"), ("beng", "Bengali"), ("bengali", "Bengali"), ("bhaiksuki", "Bhaiksuki"), ("bhks", "Bhaiksuki"), ("bopo", "Bopomofo"), ("bopomofo", "Bopomofo"), ("brah", "Brahmi"), ("brahmi", "Brahmi"), ("brai", "Braille"), ("braille", "Braille"), ("bugi", "Buginese"), ("buginese", "Buginese"), ("buhd", "Buhid"), ("buhid", "Buhid"), ("cakm", "Chakma"), ("canadianaboriginal", "Canadian_Aboriginal"), ("cans", "Canadian_Aboriginal"), ("cari", "Carian"), ("carian", "Carian"), ("caucasianalbanian", "Caucasian_Albanian"), ("chakma", "Chakma"), ("cham", "Cham"), ("cher", "Cherokee"), ("cherokee", "Cherokee"), ("common", "Common"), ("copt", "Coptic"), ("coptic", "Coptic"), ("cprt", "Cypriot"), ("cuneiform", "Cuneiform"), ("cypriot", "Cypriot"), ("cyrillic", "Cyrillic"), ("cyrl", "Cyrillic"), ("deseret", "Deseret"), ("deva", "Devanagari"), ("devanagari", "Devanagari"), ("dogr", "Dogra"), ("dogra", "Dogra"), ("dsrt", "Deseret"), ("dupl", "Duployan"), ("duployan", "Duployan"), ("egyp", "Egyptian_Hieroglyphs"), ("egyptianhieroglyphs", "Egyptian_Hieroglyphs"), ("elba", "Elbasan"), ("elbasan", "Elbasan"), ("ethi", "Ethiopic"), ("ethiopic", "Ethiopic"), ("geor", "Georgian"), ("georgian", "Georgian"), ("glag", "Glagolitic"), ("glagolitic", "Glagolitic"), ("gong", "Gunjala_Gondi"), ("gonm", "Masaram_Gondi"), ("goth", "Gothic"), ("gothic", "Gothic"), ("gran", "Grantha"), ("grantha", "Grantha"), ("greek", "Greek"), ("grek", "Greek"), ("gujarati", "Gujarati"), ("gujr", "Gujarati"), ("gunjalagondi", "Gunjala_Gondi"), ("gurmukhi", "Gurmukhi"), ("guru", "Gurmukhi"), ("han", "Han"), ("hang", "Hangul"), ("hangul", "Hangul"), ("hani", "Han"), ("hanifirohingya", "Hanifi_Rohingya"), ("hano", "Hanunoo"), ("hanunoo", "Hanunoo"), ("hatr", "Hatran"), ("hatran", "Hatran"), ("hebr", "Hebrew"), ("hebrew", "Hebrew"), ("hira", "Hiragana"), ("hiragana", "Hiragana"), ("hluw", "Anatolian_Hieroglyphs"), ("hmng", "Pahawh_Hmong"), ("hrkt", "Katakana_Or_Hiragana"), ("hung", "Old_Hungarian"), ("imperialaramaic", "Imperial_Aramaic"), ("inherited", "Inherited"), ("inscriptionalpahlavi", "Inscriptional_Pahlavi"), ("inscriptionalparthian", "Inscriptional_Parthian"), ("ital", "Old_Italic"), ("java", "Javanese"), ("javanese", "Javanese"), ("kaithi", "Kaithi"), ("kali", "Kayah_Li"), ("kana", "Katakana"), ("kannada", "Kannada"), ("katakana", "Katakana"), ("katakanaorhiragana", "Katakana_Or_Hiragana"), ("kayahli", "Kayah_Li"), ("khar", "Kharoshthi"), ("kharoshthi", "Kharoshthi"), ("khmer", "Khmer"), ("khmr", "Khmer"), ("khoj", "Khojki"), ("khojki", "Khojki"), ("khudawadi", "Khudawadi"), ("knda", "Kannada"), ("kthi", "Kaithi"), ("lana", "Tai_Tham"), ("lao", "Lao"), ("laoo", "Lao"), ("latin", "Latin"), ("latn", "Latin"), ("lepc", "Lepcha"), ("lepcha", "Lepcha"), ("limb", "Limbu"), ("limbu", "Limbu"), ("lina", "Linear_A"), ("linb", "Linear_B"), ("lineara", "Linear_A"), ("linearb", "Linear_B"), ("lisu", "Lisu"), ("lyci", "Lycian"), ("lycian", "Lycian"), ("lydi", "Lydian"), ("lydian", "Lydian"), ("mahajani", "Mahajani"), ("mahj", "Mahajani"), ("maka", "Makasar"), ("makasar", "Makasar"), ("malayalam", "Malayalam"), ("mand", "Mandaic"), ("mandaic", "Mandaic"), ("mani", "Manichaean"), ("manichaean", "Manichaean"), ("marc", "Marchen"), ("marchen", "Marchen"), ("masaramgondi", "Masaram_Gondi"), ("medefaidrin", "Medefaidrin"), ("medf", "Medefaidrin"), ("meeteimayek", "Meetei_Mayek"), ("mend", "Mende_Kikakui"), ("mendekikakui", "Mende_Kikakui"), ("merc", "Meroitic_Cursive"), ("mero", "Meroitic_Hieroglyphs"), ("meroiticcursive", "Meroitic_Cursive"), ("meroitichieroglyphs", "Meroitic_Hieroglyphs"), ("miao", "Miao"), ("mlym", "Malayalam"), ("modi", "Modi"), ("mong", "Mongolian"), ("mongolian", "Mongolian"), ("mro", "Mro"), ("mroo", "Mro"), ("mtei", "Meetei_Mayek"), ("mult", "Multani"), ("multani", "Multani"), ("myanmar", "Myanmar"), ("mymr", "Myanmar"), ("nabataean", "Nabataean"), ("narb", "Old_North_Arabian"), ("nbat", "Nabataean"), ("newa", "Newa"), ("newtailue", "New_Tai_Lue"), ("nko", "Nko"), ("nkoo", "Nko"), ("nshu", "Nushu"), ("nushu", "Nushu"), ("ogam", "Ogham"), ("ogham", "Ogham"), ("olchiki", "Ol_Chiki"), ("olck", "Ol_Chiki"), ("oldhungarian", "Old_Hungarian"), ("olditalic", "Old_Italic"), ("oldnortharabian", "Old_North_Arabian"), ("oldpermic", "Old_Permic"), ("oldpersian", "Old_Persian"), ("oldsogdian", "Old_Sogdian"), ("oldsoutharabian", "Old_South_Arabian"), ("oldturkic", "Old_Turkic"), ("oriya", "Oriya"), ("orkh", "Old_Turkic"), ("orya", "Oriya"), ("osage", "Osage"), ("osge", "Osage"), ("osma", "Osmanya"), ("osmanya", "Osmanya"), ("pahawhhmong", "Pahawh_Hmong"), ("palm", "Palmyrene"), ("palmyrene", "Palmyrene"), ("pauc", "Pau_Cin_Hau"), ("paucinhau", "Pau_Cin_Hau"), ("perm", "Old_Permic"), ("phag", "Phags_Pa"), ("phagspa", "Phags_Pa"), ("phli", "Inscriptional_Pahlavi"), ("phlp", "Psalter_Pahlavi"), ("phnx", "Phoenician"), ("phoenician", "Phoenician"), ("plrd", "Miao"), ("prti", "Inscriptional_Parthian"), ("psalterpahlavi", "Psalter_Pahlavi"), ("qaac", "Coptic"), ("qaai", "Inherited"), ("rejang", "Rejang"), ("rjng", "Rejang"), ("rohg", "Hanifi_Rohingya"), ("runic", "Runic"), ("runr", "Runic"), ("samaritan", "Samaritan"), ("samr", "Samaritan"), ("sarb", "Old_South_Arabian"), ("saur", "Saurashtra"), ("saurashtra", "Saurashtra"), ("sgnw", "SignWriting"), ("sharada", "Sharada"), ("shavian", "Shavian"), ("shaw", "Shavian"), ("shrd", "Sharada"), ("sidd", "Siddham"), ("siddham", "Siddham"), ("signwriting", "SignWriting"), ("sind", "Khudawadi"), ("sinh", "Sinhala"), ("sinhala", "Sinhala"), ("sogd", "Sogdian"), ("sogdian", "Sogdian"), ("sogo", "Old_Sogdian"), ("sora", "Sora_Sompeng"), ("sorasompeng", "Sora_Sompeng"), ("soyo", "Soyombo"), ("soyombo", "Soyombo"), ("sund", "Sundanese"), ("sundanese", "Sundanese"), ("sylo", "Syloti_Nagri"), ("sylotinagri", "Syloti_Nagri"), ("syrc", "Syriac"), ("syriac", "Syriac"), ("tagalog", "Tagalog"), ("tagb", "Tagbanwa"), ("tagbanwa", "Tagbanwa"), ("taile", "Tai_Le"), ("taitham", "Tai_Tham"), ("taiviet", "Tai_Viet"), ("takr", "Takri"), ("takri", "Takri"), ("tale", "Tai_Le"), ("talu", "New_Tai_Lue"), ("tamil", "Tamil"), ("taml", "Tamil"), ("tang", "Tangut"), ("tangut", "Tangut"), ("tavt", "Tai_Viet"), ("telu", "Telugu"), ("telugu", "Telugu"), ("tfng", "Tifinagh"), ("tglg", "Tagalog"), ("thaa", "Thaana"), ("thaana", "Thaana"), ("thai", "Thai"), ("tibetan", "Tibetan"), ("tibt", "Tibetan"), ("tifinagh", "Tifinagh"), ("tirh", "Tirhuta"), ("tirhuta", "Tirhuta"), ("ugar", "Ugaritic"), ("ugaritic", "Ugaritic"), ("unknown", "Unknown"), ("vai", "Vai"), ("vaii", "Vai"), ("wara", "Warang_Citi"), ("warangciti", "Warang_Citi"), ("xpeo", "Old_Persian"), ("xsux", "Cuneiform"), ("yi", "Yi"), ("yiii", "Yi"), ("zanabazarsquare", "Zanabazar_Square"), ("zanb", "Zanabazar_Square"), ("zinh", "Inherited"), ("zyyy", "Common"), ("zzzz", "Unknown"), ]), ];
64.117241
98
0.577444
0a56f4fad6d15ee69b5bedb31a46912862c7f28b
3,811
//! Platform-dependent platform abstraction. //! //! The `std::sys` module is the abstracted interface through which //! `std` talks to the underlying operating system. It has different //! implementations for different operating system families, today //! just Unix and Windows, and initial support for Redox. //! //! The centralization of platform-specific code in this module is //! enforced by the "platform abstraction layer" tidy script in //! `tools/tidy/src/pal.rs`. //! //! This module is closely related to the platform-independent system //! integration code in `std::sys_common`. See that module's //! documentation for details. //! //! In the future it would be desirable for the independent //! implementations of this module to be extracted to their own crates //! that `std` can link to, thus enabling their implementation //! out-of-tree via crate replacement. Though due to the complex //! inter-dependencies within `std` that will be a challenging goal to //! achieve. #![allow(missing_debug_implementations)] cfg_if! { if #[cfg(unix)] { mod unix; pub use self::unix::*; } else if #[cfg(windows)] { mod windows; pub use self::windows::*; } else if #[cfg(target_os = "cloudabi")] { mod cloudabi; pub use self::cloudabi::*; } else if #[cfg(target_os = "redox")] { mod redox; pub use self::redox::*; } else if #[cfg(target_arch = "wasm32")] { mod wasm; pub use self::wasm::*; } else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] { mod sgx; pub use self::sgx::*; } else { compile_error!("libstd doesn't compile for this platform yet"); } } // Import essential modules from both platforms when documenting. These are // then later used in the `std::os` module when documenting, for example, // Windows when we're compiling for Linux. #[cfg(rustdoc)] cfg_if! { if #[cfg(any(unix, target_os = "redox"))] { // On unix we'll document what's already available #[stable(feature = "rust1", since = "1.0.0")] pub use self::ext as unix_ext; } else if #[cfg(any(target_os = "cloudabi", target_arch = "wasm32", all(target_vendor = "fortanix", target_env = "sgx")))] { // On CloudABI and wasm right now the module below doesn't compile // (missing things in `libc` which is empty) so just omit everything // with an empty module #[unstable(issue = "0", feature = "std_internals")] #[allow(missing_docs)] pub mod unix_ext {} } else { // On other platforms like Windows document the bare bones of unix use os::linux as platform; #[path = "unix/ext/mod.rs"] pub mod unix_ext; } } #[cfg(rustdoc)] cfg_if! { if #[cfg(windows)] { // On windows we'll just be documenting what's already available #[allow(missing_docs)] #[stable(feature = "rust1", since = "1.0.0")] pub use self::ext as windows_ext; } else if #[cfg(any(target_os = "cloudabi", target_arch = "wasm32", all(target_vendor = "fortanix", target_env = "sgx")))] { // On CloudABI and wasm right now the shim below doesn't compile, so // just omit it #[unstable(issue = "0", feature = "std_internals")] #[allow(missing_docs)] pub mod windows_ext {} } else { // On all other platforms (aka linux/osx/etc) then pull in a "minimal" // amount of windows goop which ends up compiling #[macro_use] #[path = "windows/compat.rs"] mod compat; #[path = "windows/c.rs"] mod c; #[path = "windows/ext/mod.rs"] pub mod windows_ext; } }
36.295238
80
0.607977
0e77cad9e206cb95de0c9239d567429fa4f7b348
174
pub enum List<T> { Nil, Cons(T, List<T>) } // error: illegal recursive enum type; // wrap the inner value in a box // to make it representable [E0072]
19.333333
42
0.591954
e8fda1ba76a4564dfda96842a968beb7e311851c
4,074
use crate::worldgen::voronoi::VoronoiGrid; use crate::worldgen::{BiomeGenerator, ChunkBiomes}; use feather_core::{Biome, ChunkPosition}; use num_traits::FromPrimitive; use rand::{Rng, SeedableRng}; use rand_xorshift::XorShiftRng; use strum::EnumCount; /// Biome grid generator based on a distorted Voronoi /// noise. #[derive(Default)] pub struct DistortedVoronoiBiomeGenerator; impl BiomeGenerator for DistortedVoronoiBiomeGenerator { fn generate_for_chunk(&self, chunk: ChunkPosition, seed: u64) -> ChunkBiomes { let mut voronoi = VoronoiGrid::new(384, seed); let mut biomes = ChunkBiomes::from_array([Biome::Plains; 16 * 16]); // Will be overridden // Noise is used to distort each coordinate. /*let x_noise = NoiseBuilder::gradient_2d_offset(chunk.x as f32 * 16.0, 16, chunk.z as f32 * 16.0, 16) .with_seed(seed as i32 + 1) .generate_scaled(-4.0, 4.0); let z_noise = NoiseBuilder::gradient_2d_offset(chunk.x as f32 * 16.0, 16, chunk.z as f32 * 16.0, 16) .with_seed(seed as i32 + 2) .generate_scaled(-4.0, 4.0);*/ for x in 0..16 { for z in 0..16 { // Apply distortion to coordinate before passing to voronoi // generator. //let distort_x = x_noise[(z << 4) | x] as i32 * 8; //let distort_z = z_noise[(z << 4) | x] as i32 * 8; let distort_x = 0; let distort_z = 0; let (closest_x, closest_y) = voronoi.get( (chunk.x * 16) + x as i32 + distort_x, (chunk.z * 16) + z as i32 + distort_z, ); // Shift around the closest_x and closest_y values // and deterministically select a biome based on the // computed value. Continue shifting the value until // a valid biome is computed. let combined = (i64::from(closest_x) << 32) | i64::from(closest_y); let mut rng = XorShiftRng::seed_from_u64(combined as u64); loop { let shifted: u64 = rng.gen(); let biome = Biome::from_u64(shifted % Biome::count() as u64).unwrap(); if is_biome_allowed(biome) { biomes.set_biome_at(x, z, biome); break; } } } } biomes } } /// Returns whether the given biome is allowed in the overworld. fn is_biome_allowed(biome: Biome) -> bool { match biome { Biome::TheEnd | Biome::TheVoid | Biome::Nether | Biome::SmallEndIslands | Biome::EndBarrens | Biome::EndHighlands | Biome::EndMidlands => false, _ => true, } } #[cfg(test)] mod tests { use super::*; #[test] fn test_not_all_plains() { // Check that the `ChunkBiomes` was overridden correctly. let gen = DistortedVoronoiBiomeGenerator::default(); let chunk = ChunkPosition::new(5433, 132); let biomes = gen.generate_for_chunk(chunk, 8344); println!("{:?}", biomes); let mut num_plains = 0; for x in 0..16 { for z in 0..16 { if biomes.biome_at(x, z) == Biome::Plains { num_plains += 1; } } } assert_ne!(num_plains, 16 * 16); } #[test] fn test_deterministic() { // Check that the result is always deterministic. let gen = DistortedVoronoiBiomeGenerator::default(); let chunk = ChunkPosition::new(0, 0); let seed = 52; let first = gen.generate_for_chunk(chunk, seed); for _ in 0..5 { let next = gen.generate_for_chunk(chunk, seed); for x in 0..16 { for z in 0..16 { assert_eq!(first.biome_at(x, z), next.biome_at(x, z)); } } } } }
31.338462
98
0.531419
561ef65f8e33f5773b7745388ec81ce6f6050444
2,322
use otspec::types::*; use otspec::Deserializer; use otspec_macros::tables; tables!( AxisValueMap { F2DOT14 fromCoordinate F2DOT14 toCoordinate } SegmentMap { Counted(AxisValueMap) axisValueMaps } avar { uint16 majorVersion uint16 minorVersion uint16 reserved Counted(SegmentMap) axisSegmentMaps } ); #[cfg(test)] mod tests { use otspec::ser; /* All numbers here carefully chosen to avoid OT rounding errors... */ #[test] fn avar_axis_value_map_serde() { let v = super::AxisValueMap { fromCoordinate: 0.2999878, toCoordinate: 0.5, }; let binary_avarmap = ser::to_bytes(&v).unwrap(); let deserialized: super::AxisValueMap = otspec::de::from_bytes(&binary_avarmap).unwrap(); assert_eq!(deserialized, v); } // #[test] // fn avar_ser() { // let favar = super::avar { // majorVersion: 1, // minorVersion: 0, // reserved: 0, // axisSegmentMaps: vec![ // super::SegmentMap::new(vec![ // (-1.0, -1.0), // (0.0, 0.0), // (0.125, 0.11444092), // (0.25, 0.23492432), // (0.5, 0.3554077), // (0.625, 0.5), // (0.75, 0.6566162), // (0.875, 0.8192749), // (1.0, 1.0), // ]), // super::SegmentMap::new(vec![(-1.0, -1.0), (0.0, 0.0), (1.0, 1.0)]), // ], // }; // let binary_avar = vec![ // 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x09, 0xc0, 0x00, 0xc0, 0x00, // 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x07, 0x53, 0x10, 0x00, 0x0f, 0x09, 0x20, 0x00, // 0x16, 0xbf, 0x28, 0x00, 0x20, 0x00, 0x30, 0x00, 0x2a, 0x06, 0x38, 0x00, 0x34, 0x6f, // 0x40, 0x00, 0x40, 0x00, 0x00, 0x03, 0xc0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, // 0x40, 0x00, 0x40, 0x00, // ]; // assert_eq!(ser::to_bytes(&favar).unwrap(), binary_avar); // let deserialized: super::avar = otspec::de::from_bytes(&binary_avar).unwrap(); // assert_eq!(deserialized, favar); // } }
32.25
98
0.488803
d707764b3a097d12ef693e224b3878ea28cf58c2
1,079
#[doc = "Reader of register _0_CMPB"] pub type R = crate::R<u32, super::_0_CMPB>; #[doc = "Writer for register _0_CMPB"] pub type W = crate::W<u32, super::_0_CMPB>; #[doc = "Register _0_CMPB `reset()`'s with value 0"] impl crate::ResetValue for super::_0_CMPB { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `CMPB`"] pub type CMPB_R = crate::R<u16, u16>; #[doc = "Write proxy for field `CMPB`"] pub struct CMPB_W<'a> { w: &'a mut W, } impl<'a> CMPB_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff); self.w } } impl R { #[doc = "Bits 0:15 - Comparator B Value"] #[inline(always)] pub fn cmpb(&self) -> CMPB_R { CMPB_R::new((self.bits & 0xffff) as u16) } } impl W { #[doc = "Bits 0:15 - Comparator B Value"] #[inline(always)] pub fn cmpb(&mut self) -> CMPB_W { CMPB_W { w: self } } }
26.317073
74
0.565338
f5d24d0602e7c56e1ba1d702664b0bbef8410963
338
// tuple1.rs fn add_mul(x: f64, y: f64) -> (f64,f64) { (x+y, x*y) } fn main() { let t = add_mul(2.0,10.0); // can debug print println!("t {:?}",t); // can 'index' the values println!("add {} mul {}",t.0,t.1); // can _extract_ values let (add,mul) = t; println!("add {} mul {}",add,mul); }
16.9
41
0.473373
fc25004eda68d19f678e6783a4f802ce5c91121c
16,165
// This file is part of Substrate. // Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! # Scored Pool Pallet //! //! The pallet maintains a scored membership pool. Each entity in the //! pool can be attributed a `Score`. From this pool a set `Members` //! is constructed. This set contains the `MemberCount` highest //! scoring entities. Unscored entities are never part of `Members`. //! //! If an entity wants to be part of the pool a deposit is required. //! The deposit is returned when the entity withdraws or when it //! is removed by an entity with the appropriate authority. //! //! Every `Period` blocks the set of `Members` is refreshed from the //! highest scoring members in the pool and, no matter if changes //! occurred, `T::MembershipChanged::set_members_sorted` is invoked. //! On first load `T::MembershipInitialized::initialize_members` is //! invoked with the initial `Members` set. //! //! It is possible to withdraw candidacy/resign your membership at any //! time. If an entity is currently a member, this results in removal //! from the `Pool` and `Members`; the entity is immediately replaced //! by the next highest scoring candidate in the pool, if available. //! //! - [`Config`] //! - [`Call`] //! - [`Pallet`] //! //! ## Interface //! //! ### Public Functions //! //! - `submit_candidacy` - Submit candidacy to become a member. Requires a deposit. //! - `withdraw_candidacy` - Withdraw candidacy. Deposit is returned. //! - `score` - Attribute a quantitative score to an entity. //! - `kick` - Remove an entity from the pool and members. Deposit is returned. //! - `change_member_count` - Changes the amount of candidates taken into `Members`. //! //! ## Usage //! //! ``` //! use frame_support::{decl_module, dispatch}; //! use frame_system::ensure_signed; //! use pallet_scored_pool::{self as scored_pool}; //! //! pub trait Config: scored_pool::Config {} //! //! decl_module! { //! pub struct Module<T: Config> for enum Call where origin: T::Origin { //! #[weight = 0] //! pub fn candidate(origin) -> dispatch::DispatchResult { //! let who = ensure_signed(origin)?; //! //! let _ = <scored_pool::Pallet<T>>::submit_candidacy( //! T::Origin::from(Some(who.clone()).into()) //! ); //! Ok(()) //! } //! } //! } //! //! # fn main() { } //! ``` //! //! ## Dependencies //! //! This pallet depends on the [System pallet](../frame_system/index.html). // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] #[cfg(test)] mod mock; #[cfg(test)] mod tests; use codec::FullCodec; use frame_support::{ ensure, traits::{ChangeMembers, Currency, Get, InitializeMembers, ReservableCurrency}, }; pub use pallet::*; use sp_runtime::traits::{AtLeast32Bit, StaticLookup, Zero}; use sp_std::{fmt::Debug, prelude::*}; type BalanceOf<T, I> = <<T as Config<I>>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance; type PoolT<T, I> = Vec<(<T as frame_system::Config>::AccountId, Option<<T as Config<I>>::Score>)>; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated /// type function will be invoked. enum ChangeReceiver { /// Should call `T::MembershipInitialized`. MembershipInitialized, /// Should call `T::MembershipChanged`. MembershipChanged, } #[frame_support::pallet] pub mod pallet { use super::*; use frame_support::{pallet_prelude::*, traits::EnsureOrigin, weights::Weight}; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; use sp_runtime::traits::MaybeSerializeDeserialize; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet<T, I = ()>(_); #[pallet::config] pub trait Config<I: 'static = ()>: frame_system::Config { /// The currency used for deposits. type Currency: Currency<Self::AccountId> + ReservableCurrency<Self::AccountId>; /// The score attributed to a member or candidate. type Score: AtLeast32Bit + Clone + Copy + Default + FullCodec + MaybeSerializeDeserialize + Debug; /// The overarching event type. type Event: From<Event<Self, I>> + IsType<<Self as frame_system::Config>::Event>; // The deposit which is reserved from candidates if they want to // start a candidacy. The deposit gets returned when the candidacy is // withdrawn or when the candidate is kicked. #[pallet::constant] type CandidateDeposit: Get<BalanceOf<Self, I>>; /// Every `Period` blocks the `Members` are filled with the highest scoring /// members in the `Pool`. #[pallet::constant] type Period: Get<Self::BlockNumber>; /// The receiver of the signal for when the membership has been initialized. /// This happens pre-genesis and will usually be the same as `MembershipChanged`. /// If you need to do something different on initialization, then you can change /// this accordingly. type MembershipInitialized: InitializeMembers<Self::AccountId>; /// The receiver of the signal for when the members have changed. type MembershipChanged: ChangeMembers<Self::AccountId>; /// Allows a configurable origin type to set a score to a candidate in the pool. type ScoreOrigin: EnsureOrigin<Self::Origin>; /// Required origin for removing a member (though can always be Root). /// Configurable origin which enables removing an entity. If the entity /// is part of the `Members` it is immediately replaced by the next /// highest scoring candidate, if available. type KickOrigin: EnsureOrigin<Self::Origin>; } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config<I>, I: 'static = ()> { /// The given member was removed. See the transaction for who. MemberRemoved, /// An entity has issued a candidacy. See the transaction for who. CandidateAdded, /// An entity withdrew candidacy. See the transaction for who. CandidateWithdrew, /// The candidacy was forcefully removed for an entity. /// See the transaction for who. CandidateKicked, /// A score was attributed to the candidate. /// See the transaction for who. CandidateScored, } /// Error for the scored-pool pallet. #[pallet::error] pub enum Error<T, I = ()> { /// Already a member. AlreadyInPool, /// Index out of bounds. InvalidIndex, /// Index does not match requested account. WrongAccountIndex, } /// The current pool of candidates, stored as an ordered Vec /// (ordered descending by score, `None` last, highest first). #[pallet::storage] #[pallet::getter(fn pool)] pub(crate) type Pool<T: Config<I>, I: 'static = ()> = StorageValue<_, PoolT<T, I>, ValueQuery>; /// A Map of the candidates. The information in this Map is redundant /// to the information in the `Pool`. But the Map enables us to easily /// check if a candidate is already in the pool, without having to /// iterate over the entire pool (the `Pool` is not sorted by /// `T::AccountId`, but by `T::Score` instead). #[pallet::storage] #[pallet::getter(fn candidate_exists)] pub(crate) type CandidateExists<T: Config<I>, I: 'static = ()> = StorageMap<_, Twox64Concat, T::AccountId, bool, ValueQuery>; /// The current membership, stored as an ordered Vec. #[pallet::storage] #[pallet::getter(fn members)] pub(crate) type Members<T: Config<I>, I: 'static = ()> = StorageValue<_, Vec<T::AccountId>, ValueQuery>; /// Size of the `Members` set. #[pallet::storage] #[pallet::getter(fn member_count)] pub(crate) type MemberCount<T, I = ()> = StorageValue<_, u32, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig<T: Config<I>, I: 'static = ()> { pub pool: PoolT<T, I>, pub member_count: u32, } #[cfg(feature = "std")] impl<T: Config<I>, I: 'static> Default for GenesisConfig<T, I> { fn default() -> Self { Self { pool: Default::default(), member_count: Default::default() } } } #[pallet::genesis_build] impl<T: Config<I>, I: 'static> GenesisBuild<T, I> for GenesisConfig<T, I> { fn build(&self) { let mut pool = self.pool.clone(); // reserve balance for each candidate in the pool. // panicking here is ok, since this just happens one time, pre-genesis. pool.iter().for_each(|(who, _)| { T::Currency::reserve(&who, T::CandidateDeposit::get()) .expect("balance too low to create candidacy"); <CandidateExists<T, I>>::insert(who, true); }); // Sorts the `Pool` by score in a descending order. Entities which // have a score of `None` are sorted to the beginning of the vec. pool.sort_by_key(|(_, maybe_score)| Reverse(maybe_score.unwrap_or_default())); <MemberCount<T, I>>::put(self.member_count); <Pool<T, I>>::put(&pool); <Pallet<T, I>>::refresh_members(pool, ChangeReceiver::MembershipInitialized); } } #[pallet::hooks] impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> { /// Every `Period` blocks the `Members` set is refreshed from the /// highest scoring members in the pool. fn on_initialize(n: T::BlockNumber) -> Weight { if n % T::Period::get() == Zero::zero() { let pool = <Pool<T, I>>::get(); <Pallet<T, I>>::refresh_members(pool, ChangeReceiver::MembershipChanged); } 0 } } #[pallet::call] impl<T: Config<I>, I: 'static> Pallet<T, I> { /// Add `origin` to the pool of candidates. /// /// This results in `CandidateDeposit` being reserved from /// the `origin` account. The deposit is returned once /// candidacy is withdrawn by the candidate or the entity /// is kicked by `KickOrigin`. /// /// The dispatch origin of this function must be signed. /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. #[pallet::weight(0)] pub fn submit_candidacy(origin: OriginFor<T>) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(!<CandidateExists<T, I>>::contains_key(&who), Error::<T, I>::AlreadyInPool); let deposit = T::CandidateDeposit::get(); T::Currency::reserve(&who, deposit)?; // can be inserted as last element in pool, since entities with // `None` are always sorted to the end. <Pool<T, I>>::append((who.clone(), Option::<<T as Config<I>>::Score>::None)); <CandidateExists<T, I>>::insert(&who, true); Self::deposit_event(Event::<T, I>::CandidateAdded); Ok(()) } /// An entity withdraws candidacy and gets its deposit back. /// /// If the entity is part of the `Members`, then the highest member /// of the `Pool` that is not currently in `Members` is immediately /// placed in the set instead. /// /// The dispatch origin of this function must be signed. /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. #[pallet::weight(0)] pub fn withdraw_candidacy(origin: OriginFor<T>, index: u32) -> DispatchResult { let who = ensure_signed(origin)?; let pool = <Pool<T, I>>::get(); Self::ensure_index(&pool, &who, index)?; Self::remove_member(pool, who, index)?; Self::deposit_event(Event::<T, I>::CandidateWithdrew); Ok(()) } /// Kick a member `who` from the set. /// /// May only be called from `T::KickOrigin`. /// /// The `index` parameter of this function must be set to /// the index of `dest` in the `Pool`. #[pallet::weight(0)] pub fn kick( origin: OriginFor<T>, dest: <T::Lookup as StaticLookup>::Source, index: u32, ) -> DispatchResult { T::KickOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(dest)?; let pool = <Pool<T, I>>::get(); Self::ensure_index(&pool, &who, index)?; Self::remove_member(pool, who, index)?; Self::deposit_event(Event::<T, I>::CandidateKicked); Ok(()) } /// Score a member `who` with `score`. /// /// May only be called from `T::ScoreOrigin`. /// /// The `index` parameter of this function must be set to /// the index of the `dest` in the `Pool`. #[pallet::weight(0)] pub fn score( origin: OriginFor<T>, dest: <T::Lookup as StaticLookup>::Source, index: u32, score: T::Score, ) -> DispatchResult { T::ScoreOrigin::ensure_origin(origin)?; let who = T::Lookup::lookup(dest)?; let mut pool = <Pool<T, I>>::get(); Self::ensure_index(&pool, &who, index)?; pool.remove(index as usize); // we binary search the pool (which is sorted descending by score). // if there is already an element with `score`, we insert // right before that. if not, the search returns a location // where we can insert while maintaining order. let item = (who, Some(score.clone())); let location = pool .binary_search_by_key(&Reverse(score), |(_, maybe_score)| { Reverse(maybe_score.unwrap_or_default()) }) .unwrap_or_else(|l| l); pool.insert(location, item); <Pool<T, I>>::put(&pool); Self::deposit_event(Event::<T, I>::CandidateScored); Ok(()) } /// Dispatchable call to change `MemberCount`. /// /// This will only have an effect the next time a refresh happens /// (this happens each `Period`). /// /// May only be called from root. #[pallet::weight(0)] pub fn change_member_count(origin: OriginFor<T>, count: u32) -> DispatchResult { ensure_root(origin)?; MemberCount::<T, I>::put(&count); Ok(()) } } } impl<T: Config<I>, I: 'static> Pallet<T, I> { /// Fetches the `MemberCount` highest scoring members from /// `Pool` and puts them into `Members`. /// /// The `notify` parameter is used to deduct which associated /// type function to invoke at the end of the method. fn refresh_members(pool: PoolT<T, I>, notify: ChangeReceiver) { let count = MemberCount::<T, I>::get(); let mut new_members: Vec<T::AccountId> = pool .into_iter() .filter(|(_, score)| score.is_some()) .take(count as usize) .map(|(account_id, _)| account_id) .collect(); new_members.sort(); let old_members = <Members<T, I>>::get(); <Members<T, I>>::put(&new_members); match notify { ChangeReceiver::MembershipInitialized => T::MembershipInitialized::initialize_members(&new_members), ChangeReceiver::MembershipChanged => T::MembershipChanged::set_members_sorted(&new_members[..], &old_members[..]), } } /// Removes an entity `remove` at `index` from the `Pool`. /// /// If the entity is a member it is also removed from `Members` and /// the deposit is returned. fn remove_member( mut pool: PoolT<T, I>, remove: T::AccountId, index: u32, ) -> Result<(), Error<T, I>> { // all callers of this function in this pallet also check // the index for validity before calling this function. // nevertheless we check again here, to assert that there was // no mistake when invoking this sensible function. Self::ensure_index(&pool, &remove, index)?; pool.remove(index as usize); <Pool<T, I>>::put(&pool); // remove from set, if it was in there let members = <Members<T, I>>::get(); if members.binary_search(&remove).is_ok() { Self::refresh_members(pool, ChangeReceiver::MembershipChanged); } <CandidateExists<T, I>>::remove(&remove); T::Currency::unreserve(&remove, T::CandidateDeposit::get()); Self::deposit_event(Event::<T, I>::MemberRemoved); Ok(()) } /// Checks if `index` is a valid number and if the element found /// at `index` in `Pool` is equal to `who`. fn ensure_index(pool: &PoolT<T, I>, who: &T::AccountId, index: u32) -> Result<(), Error<T, I>> { ensure!(index < pool.len() as u32, Error::<T, I>::InvalidIndex); let (index_who, _index_score) = &pool[index as usize]; ensure!(index_who == who, Error::<T, I>::WrongAccountIndex); Ok(()) } }
33.677083
98
0.672131
ed3147d861a30ffaa1c2102c4c7cc14a89f843c4
119
/* Bool type - expression based */ fn main() { let is_greater = 10 > 5; println!("{}", is_greater); }
13.222222
31
0.521008
e61ab4c4ea862541501609da10c7fcd71e482406
18,362
// SPDX-FileCopyrightText: 2020 Sean Cross <[email protected]> // SPDX-License-Identifier: Apache-2.0 use core::mem; static mut PROCESS: *mut ProcessImpl = 0xff80_1000 as *mut ProcessImpl; pub const MAX_THREAD: TID = 31; pub const INITIAL_TID: TID = 1; pub const IRQ_TID: TID = 0; use crate::arch::mem::PAGE_SIZE; use crate::services::ProcessInner; use xous_kernel::{ProcessInit, ThreadInit, PID, TID}; // use crate::args::KernelArguments; pub const DEFAULT_STACK_SIZE: usize = 131072; pub const MAX_PROCESS_COUNT: usize = 64; // pub use crate::arch::mem::DEFAULT_STACK_TOP; /// This is the address a program will jump to in order to return from an ISR. pub const RETURN_FROM_ISR: usize = 0xff80_2000; /// This is the address a thread will return to when it exits. pub const EXIT_THREAD: usize = 0xff80_3000; // Thread IDs have three possible meaning: // Logical Thread ID: What the user sees // Thread Context Index: An index into the thread slice // Hardware Thread ID: The index that the ISR uses // // The Hardware Thread ID is always equal to the Thread Context // Index, minus one. For example, the default thread ID is // Hardware Thread ID 1 is Thread Context Index 0. // The Logical Thread ID is equal to the Hardware Thread ID // plus one again. This is because the ISR context is Thread // Context Index 0. // Therefore, the first Logical Thread ID is 1, which maps // to Hardware Thread ID 2, which is Thread Context Index 1. // // +-----------------+-----------------+-----------------+ // | Thread ID | Context Index | Hardware Thread | // +=================+=================+=================+ // | ISR Context | 0 | 1 | // | 1 | 1 | 2 | // | 2 | 2 | 3 | // ProcessImpl occupies a multiple of pages mapped to virtual address `0xff80_1000`. // Each thread is 128 bytes (32 4-byte registers). The first "thread" does not exist, // and instead is any bookkeeping information related to the process. #[derive(Debug, Copy, Clone)] #[repr(C)] struct ProcessImpl { /// Used by the interrupt handler to calculate offsets scratch: usize, /// The currently-active thread for this process. This must /// be the 2nd item, because the ISR directly writes this value. hardware_thread: usize, /// Global parameters used by the operating system pub inner: ProcessInner, /// The last thread ID that was allocated last_tid_allocated: u8, /// Pad everything to 128 bytes, so the Thread slice starts at /// offset 128. _padding: [u32; 13], /// This enables the kernel to keep track of threads in the /// target process, and know which threads are ready to /// receive messages. threads: [Thread; MAX_THREAD], } /// Singleton process table. Each process in the system gets allocated from this table. struct ProcessTable { /// The process upon which the current syscall is operating current: PID, /// The actual table contents. `true` if a process is allocated, /// `false` if it is free. table: [bool; MAX_PROCESS_COUNT], } static mut PROCESS_TABLE: ProcessTable = ProcessTable { current: unsafe { PID::new_unchecked(1) }, table: [false; MAX_PROCESS_COUNT], }; #[repr(C)] #[cfg(baremetal)] /// The stage1 bootloader sets up some initial processes. These are reported /// to us as (satp, entrypoint, sp) tuples, which can be turned into a structure. /// The first element is always the kernel. pub struct InitialProcess { /// The RISC-V SATP value, which includes the offset of the root page /// table plus the process ID. pub satp: usize, /// Where execution begins pub entrypoint: usize, /// Address of the top of the stack pub sp: usize, } #[repr(C)] #[derive(Debug)] pub struct Process { pid: PID, } #[repr(C)] #[derive(Copy, Clone, Debug, Default)] /// Everything required to keep track of a single thread of execution. pub struct Thread { /// Storage for all RISC-V registers, minus $zero pub registers: [usize; 31], /// The return address. Note that if this thread was created because of an /// `ecall` instruction, you will need to add `4` to this before returning, /// to prevent that instruction from getting executed again. If this is 0, /// then this thread is not valid. pub sepc: usize, } impl Process { pub fn current() -> Process { let pid = unsafe { PROCESS_TABLE.current }; let hardware_pid = (riscv::register::satp::read().bits() >> 22) & ((1 << 9) - 1); assert!((pid.get() as usize) == hardware_pid); Process { pid } } /// Mark this process as running on the current core pub fn activate(&mut self) -> Result<(), xous_kernel::Error> { Ok(()) } /// Calls the provided function with the current inner process state. pub fn with_inner<F, R>(f: F) -> R where F: FnOnce(&ProcessInner) -> R, { let process = unsafe { &*PROCESS }; f(&process.inner) } /// Calls the provided function with the current inner process state. pub fn with_current<F, R>(f: F) -> R where F: FnOnce(&Process) -> R, { let process = Self::current(); f(&process) } /// Calls the provided function with the current inner process state. pub fn with_current_mut<F, R>(f: F) -> R where F: FnOnce(&mut Process) -> R, { let mut process = Self::current(); f(&mut process) } pub fn with_inner_mut<F, R>(f: F) -> R where F: FnOnce(&mut ProcessInner) -> R, { let process = unsafe { &mut *PROCESS }; f(&mut process.inner) } pub fn current_thread_mut(&mut self) -> &mut Thread { let process = unsafe { &mut *PROCESS }; assert!(process.hardware_thread != 0, "thread number was 0"); &mut process.threads[process.hardware_thread - 1] } pub fn current_thread(&self) -> &Thread { let process = unsafe { &mut *PROCESS }; &mut process.threads[process.hardware_thread - 1] // self.thread(process.hardware_thread - 1) } pub fn current_tid(&self) -> TID { let process = unsafe { &*PROCESS }; process.hardware_thread - 1 } pub fn thread_exists(&self, tid: TID) -> bool { self.thread(tid).sepc != 0 } /// Set the current thread number. pub fn set_thread(&mut self, thread: TID) -> Result<(), xous_kernel::Error> { let mut process = unsafe { &mut *PROCESS }; // println!("KERNEL({}:{}): Switching to thread {}", self.pid, process.hardware_thread - 1, thread); assert!( thread <= process.threads.len(), "attempt to switch to an invalid thread {}", thread ); process.hardware_thread = thread + 1; Ok(()) } pub fn thread_mut(&mut self, thread: TID) -> &mut Thread { let process = unsafe { &mut *PROCESS }; assert!( thread <= process.threads.len(), "attempt to retrieve an invalid thread {}", thread ); &mut process.threads[thread] } pub fn thread(&self, thread: TID) -> &Thread { let process = unsafe { &mut *PROCESS }; assert!( thread <= process.threads.len(), "attempt to retrieve an invalid thread {}", thread ); &process.threads[thread] } pub fn for_each_thread_mut<F>(&self, mut op: F) where F: FnMut(TID, &Thread), { let process = unsafe { &mut *PROCESS }; for (idx, thread) in process.threads.iter_mut().enumerate() { // Ignore threads that have no PC, and ignore the ISR thread if thread.sepc == 0 || idx == IRQ_TID { continue; } op(idx, thread); } } pub fn find_free_thread(&self) -> Option<TID> { let process = unsafe { &mut *PROCESS }; let start_tid = process.last_tid_allocated as usize; let a = &process.threads[start_tid..process.threads.len()]; let b = &process.threads[0..start_tid]; for (index, thread) in a.iter().chain(b.iter()).enumerate() { let mut tid = index + start_tid; if tid >= process.threads.len() { tid -= process.threads.len() } if tid != IRQ_TID && thread.sepc == 0 { process.last_tid_allocated = tid as _; return Some(tid as TID); } } None } pub fn set_thread_result(&mut self, thread_nr: TID, result: xous_kernel::Result) { let vals = unsafe { mem::transmute::<_, [usize; 8]>(result) }; let thread = self.thread_mut(thread_nr); for (idx, reg) in vals.iter().enumerate() { thread.registers[9 + idx] = *reg; } } pub fn retry_instruction(&mut self, tid: TID) -> Result<(), xous_kernel::Error> { let process = unsafe { &mut *PROCESS }; let mut thread = &mut process.threads[tid]; if thread.sepc >= 4 { thread.sepc -= 4; } Ok(()) } /// Initialize this process thread with the given entrypoint and stack /// addresses. pub fn setup_process(pid: PID, thread_init: ThreadInit) -> Result<(), xous_kernel::Error> { let mut process = unsafe { &mut *PROCESS }; let tid = INITIAL_TID; assert!(tid != IRQ_TID, "tried to init using the irq thread"); assert!( mem::size_of::<ProcessImpl>() == PAGE_SIZE, "Process size is {}, not PAGE_SIZE ({}) (Thread size: {}, array: {}, Inner: {})", mem::size_of::<ProcessImpl>(), PAGE_SIZE, mem::size_of::<Thread>(), mem::size_of::<[Thread; MAX_THREAD + 1]>(), mem::size_of::<ProcessInner>(), ); assert!( tid - 1 < process.threads.len(), "tried to init a thread that's out of range" ); assert!( tid == INITIAL_TID, "tried to init using a thread {} that wasn't {}. This probably isn't what you want.", tid, INITIAL_TID ); unsafe { let pid_idx = (pid.get() as usize) - 1; assert!( !PROCESS_TABLE.table[pid_idx], "process {} is already allocated", pid ); PROCESS_TABLE.table[pid_idx] = true; } // By convention, thread 0 is the trap thread. Therefore, thread 1 is // the first default thread. There is an offset of 1 due to how the // interrupt handler functions. process.hardware_thread = tid + 1; // Reset the thread state, since it's possibly uninitialized memory for thread in process.threads.iter_mut() { *thread = Default::default(); } let process = unsafe { &mut *PROCESS }; let mut thread = &mut process.threads[tid]; thread.sepc = unsafe { core::mem::transmute::<_, usize>(thread_init.call) }; thread.registers[1] = thread_init.stack.as_ptr() as usize + thread_init.stack.len(); thread.registers[9] = thread_init.arg1; thread.registers[10] = thread_init.arg2; thread.registers[11] = thread_init.arg3; thread.registers[12] = thread_init.arg4; #[cfg(any(feature = "debug-print", feature = "print-panics"))] { let pid = pid.get(); if pid != 1 { klog!( "initializing PID {} thread {} with entrypoint {:08x}, stack @ {:08x}, arg {:08x}", pid, tid, thread.sepc, thread.registers[1], thread.registers[9], ); } } process.inner = Default::default(); // Mark the stack as "unallocated-but-free" let init_sp = (thread_init.stack.as_ptr() as usize) & !0xfff; if init_sp != 0 { let stack_size = thread_init.stack.len(); crate::mem::MemoryManager::with_mut(|memory_manager| { memory_manager .reserve_range( init_sp as *mut u8, stack_size + 4096, xous_kernel::MemoryFlags::R | xous_kernel::MemoryFlags::W, ) .expect("couldn't reserve stack") }); } Ok(()) } pub fn setup_thread( &mut self, new_tid: TID, setup: ThreadInit, ) -> Result<(), xous_kernel::Error> { let entrypoint = unsafe { core::mem::transmute::<_, usize>(setup.call) }; // Create the new context and set it to run in the new address space. let pid = self.pid.get(); let thread = self.thread_mut(new_tid); // println!("Setting up thread {}, pid {}", new_tid, pid); let sp = setup.stack.as_ptr() as usize + setup.stack.len(); if sp <= 16 { Err(xous_kernel::Error::BadAddress)?; } crate::arch::syscall::invoke( thread, pid == 1, entrypoint, (sp - 16) & !0xf, EXIT_THREAD, &[setup.arg1, setup.arg2, setup.arg3, setup.arg4], ); Ok(()) } /// Destroy a given thread and return its return value. /// /// # Returns /// The return value of the function /// /// # Errors /// xous::ThreadNotAvailable - the thread did not exist pub fn destroy_thread(&mut self, tid: TID) -> Result<usize, xous_kernel::Error> { let thread = self.thread_mut(tid); // Ensure this thread is valid if thread.sepc == 0 || tid == IRQ_TID { Err(xous_kernel::Error::ThreadNotAvailable)?; } // thread.registers[0] == x1 // thread.registers[1] == x2 // ... // thread.registers[4] == x5 == t0 // ... // thread.registers[9] == x10 == a0 // thread.registers[10] == x11 == a1 let return_value = thread.registers[9]; for val in &mut thread.registers { *val = 0; } thread.sepc = 0; Ok(return_value) } pub fn print_all_threads(&self) { let process = unsafe { &mut *PROCESS }; &mut process.threads[process.hardware_thread - 1]; for (tid_idx, &thread) in process.threads.iter().enumerate() { let tid = tid_idx + 1; if thread.registers[1] != 0 { Self::print_thread(tid, &thread); } } } pub fn print_current_thread(&self) { let thread = self.current_thread(); let tid = self.current_tid(); Self::print_thread(tid, &thread); } pub fn print_thread(_tid: TID, _thread: &Thread) { println!("Thread {}:", _tid); println!( "PC:{:08x} SP:{:08x} RA:{:08x}", _thread.sepc, _thread.registers[1], _thread.registers[0] ); println!( "GP:{:08x} TP:{:08x}", _thread.registers[2], _thread.registers[3] ); println!( "T0:{:08x} T1:{:08x} T2:{:08x}", _thread.registers[4], _thread.registers[5], _thread.registers[6] ); println!( "T3:{:08x} T4:{:08x} T5:{:08x} T6:{:08x}", _thread.registers[27], _thread.registers[28], _thread.registers[29], _thread.registers[30] ); println!( "S0:{:08x} S1:{:08x} S2:{:08x} S3:{:08x}", _thread.registers[7], _thread.registers[8], _thread.registers[17], _thread.registers[18] ); println!( "S4:{:08x} S5:{:08x} S6:{:08x} S7:{:08x}", _thread.registers[19], _thread.registers[20], _thread.registers[21], _thread.registers[22] ); println!( "S8:{:08x} S9:{:08x} S10:{:08x} S11:{:08x}", _thread.registers[23], _thread.registers[24], _thread.registers[25], _thread.registers[26] ); println!( "A0:{:08x} A1:{:08x} A2:{:08x} A3:{:08x}", _thread.registers[9], _thread.registers[10], _thread.registers[11], _thread.registers[12] ); println!( "A4:{:08x} A5:{:08x} A6:{:08x} A7:{:08x}", _thread.registers[13], _thread.registers[14], _thread.registers[15], _thread.registers[16] ); } pub fn create(_pid: PID, _init_data: ProcessInit) -> PID { todo!(); } pub fn destroy(pid: PID) -> Result<(), xous_kernel::Error> { let mut process_table = unsafe { &mut PROCESS_TABLE }; let pid_idx = pid.get() as usize - 1; if pid_idx >= process_table.table.len() { panic!("attempted to destroy PID that exceeds table index: {}", pid); } process_table.table[pid_idx] = false; Ok(()) } pub fn find_thread<F>(&self, op: F) -> Option<(TID, &mut Thread)> where F: Fn(TID, &Thread) -> bool, { let process = unsafe { &mut *PROCESS }; for (idx, thread) in process.threads.iter_mut().enumerate() { if thread.sepc == 0 { continue; } if op(idx, thread) { return Some((idx, thread)); } } None } } impl Thread { /// The current stack pointer for this thread pub fn stack_pointer(&self) -> usize { self.registers[1] } pub fn a0(&self) -> usize { self.registers[9] } pub fn a1(&self) -> usize { self.registers[10] } } pub fn set_current_pid(pid: PID) { let pid_idx = (pid.get() - 1) as usize; unsafe { let mut pt = &mut PROCESS_TABLE; match pt.table.get(pid_idx) { None | Some(false) => panic!("PID {} does not exist", pid), _ => (), } pt.current = pid; } } pub fn current_pid() -> PID { unsafe { PROCESS_TABLE.current } } pub fn current_tid() -> TID { unsafe { ((*PROCESS).hardware_thread) - 1 } }
32.614565
108
0.54994
e9c98355b5e3103af90bef19e8e55878fb2d4a97
8,004
#![deny(unused)] #![feature(metadata_ext, file_type, dir_entry_ext)] #![cfg_attr(test, deny(warnings))] #[cfg(test)] extern crate hamcrest; #[macro_use] extern crate log; extern crate curl; extern crate docopt; extern crate flate2; extern crate git2; extern crate glob; extern crate libc; extern crate libgit2_sys; extern crate num_cpus; extern crate regex; extern crate registry; extern crate rustc_serialize; extern crate semver; extern crate tar; extern crate term; extern crate threadpool; extern crate time; extern crate toml; extern crate url; use std::env; use std::error::Error; use std::io::prelude::*; use std::io; use rustc_serialize::{Decodable, Encodable}; use rustc_serialize::json::{self, Json}; use docopt::Docopt; use core::{Shell, MultiShell, ShellConfig}; use term::color::{BLACK, RED}; pub use util::{CargoError, CliError, CliResult, human, Config, ChainError}; pub mod core; pub mod ops; pub mod sources; pub mod util; pub fn execute_main<T, U, V>( exec: fn(T, U, &Config) -> CliResult<Option<V>>, options_first: bool, usage: &str) where V: Encodable, T: Decodable, U: Decodable { process::<V, _>(|rest, shell| { call_main(exec, shell, usage, rest, options_first) }); } pub fn call_main<T, U, V>( exec: fn(T, U, &Config) -> CliResult<Option<V>>, shell: &Config, usage: &str, args: &[String], options_first: bool) -> CliResult<Option<V>> where V: Encodable, T: Decodable, U: Decodable { let flags = try!(flags_from_args::<T>(usage, args, options_first)); let json = try!(json_from_stdin::<U>()); exec(flags, json, shell) } pub fn execute_main_without_stdin<T, V>( exec: fn(T, &Config) -> CliResult<Option<V>>, options_first: bool, usage: &str) where V: Encodable, T: Decodable { process::<V, _>(|rest, shell| { call_main_without_stdin(exec, shell, usage, rest, options_first) }); } pub fn call_main_without_stdin<T, V>( exec: fn(T, &Config) -> CliResult<Option<V>>, shell: &Config, usage: &str, args: &[String], options_first: bool) -> CliResult<Option<V>> where V: Encodable, T: Decodable { let flags = try!(flags_from_args::<T>(usage, args, options_first)); exec(flags, shell) } fn process<V, F>(mut callback: F) where F: FnMut(&[String], &Config) -> CliResult<Option<V>>, V: Encodable { let mut config = None; let result = (|| { config = Some(try!(Config::new(shell(true)))); let args: Vec<_> = try!(env::args_os().map(|s| { s.into_string().map_err(|s| { human(format!("invalid unicode in argument: {:?}", s)) }) }).collect()); callback(&args, config.as_ref().unwrap()) })(); let mut verbose_shell = shell(true); let mut shell = config.as_ref().map(|s| s.shell()); let shell = shell.as_mut().map(|s| &mut **s).unwrap_or(&mut verbose_shell); process_executed(result, shell) } pub fn process_executed<T>(result: CliResult<Option<T>>, shell: &mut MultiShell) where T: Encodable { match result { Err(e) => handle_error(e, shell), Ok(Some(encodable)) => { let encoded = json::encode(&encodable).unwrap(); println!("{}", encoded); } Ok(None) => {} } } pub fn shell(verbose: bool) -> MultiShell { let tty = isatty(libc::STDERR_FILENO); let stderr = Box::new(io::stderr()); let config = ShellConfig { color: true, verbose: verbose, tty: tty }; let err = Shell::create(stderr, config); let tty = isatty(libc::STDOUT_FILENO); let stdout = Box::new(io::stdout()); let config = ShellConfig { color: true, verbose: verbose, tty: tty }; let out = Shell::create(stdout, config); return MultiShell::new(out, err, verbose); #[cfg(unix)] fn isatty(fd: libc::c_int) -> bool { unsafe { libc::isatty(fd) != 0 } } #[cfg(windows)] fn isatty(fd: libc::c_int) -> bool { extern crate kernel32; extern crate winapi; unsafe { let handle = kernel32::GetStdHandle(if fd == libc::STDOUT_FILENO { winapi::winbase::STD_OUTPUT_HANDLE } else { winapi::winbase::STD_ERROR_HANDLE }); let mut out = 0; kernel32::GetConsoleMode(handle, &mut out) != 0 } } } // `output` print variant error strings to either stderr or stdout. // For fatal errors, print to stderr; // and for others, e.g. docopt version info, print to stdout. fn output(err: String, shell: &mut MultiShell, fatal: bool) { let std_shell = if fatal {shell.err()} else {shell.out()}; let color = if fatal {RED} else {BLACK}; let _ = std_shell.say(err, color); } pub fn handle_error(err: CliError, shell: &mut MultiShell) { debug!("handle_error; err={:?}", err); let CliError { error, exit_code, unknown } = err; let fatal = exit_code != 0; // exit_code == 0 is non-fatal error let hide = unknown && !shell.get_verbose(); if hide { let _ = shell.err().say("An unknown error occurred", RED); } else { output(error.to_string(), shell, fatal); } if !handle_cause(&error, shell) || hide { let _ = shell.err().say("\nTo learn more, run the command again \ with --verbose.".to_string(), BLACK); } std::process::exit(exit_code); } fn handle_cause(mut cargo_err: &CargoError, shell: &mut MultiShell) -> bool { let verbose = shell.get_verbose(); let mut err; loop { cargo_err = match cargo_err.cargo_cause() { Some(cause) => cause, None => { err = cargo_err.cause(); break } }; if !verbose && !cargo_err.is_human() { return false } print(cargo_err.to_string(), shell); } loop { let cause = match err { Some(err) => err, None => return true }; if !verbose { return false } print(cause.to_string(), shell); err = cause.cause(); } fn print(error: String, shell: &mut MultiShell) { let _ = shell.err().say("\nCaused by:", BLACK); let _ = shell.err().say(format!(" {}", error), BLACK); } } pub fn version() -> String { format!("cargo {}", match option_env!("CFG_VERSION") { Some(s) => s.to_string(), None => format!("{}.{}.{}{}", env!("CARGO_PKG_VERSION_MAJOR"), env!("CARGO_PKG_VERSION_MINOR"), env!("CARGO_PKG_VERSION_PATCH"), option_env!("CARGO_PKG_VERSION_PRE").unwrap_or("")) }) } fn flags_from_args<'a, T>(usage: &str, args: &[String], options_first: bool) -> CliResult<T> where T: Decodable { let docopt = Docopt::new(usage).unwrap() .options_first(options_first) .argv(args.iter().map(|s| &s[..])) .help(true) .version(Some(version())); docopt.decode().map_err(|e| { let code = if e.fatal() {1} else {0}; CliError::from_error(human(e.to_string()), code) }) } fn json_from_stdin<T: Decodable>() -> CliResult<T> { let mut reader = io::stdin(); let mut input = String::new(); try!(reader.read_to_string(&mut input).map_err(|_| { CliError::new("Standard in did not exist or was not UTF-8", 1) })); let json = try!(Json::from_str(&input).map_err(|_| { CliError::new("Could not parse standard in as JSON", 1) })); let mut decoder = json::Decoder::new(json); Decodable::decode(&mut decoder).map_err(|_| { CliError::new("Could not process standard in as input", 1) }) }
31.265625
83
0.568466
ab911d1d92b3254965f1e6a4348910f7b0156935
195
extern crate uuid; extern crate chrono; extern crate rustc_serialize; #[macro_use] extern crate log; pub mod config; pub mod entry; pub mod journal; pub mod file_journal; pub mod simple_logger;
16.25
29
0.789744
1d829f22df4e7ccc30b624cda1d343a316832619
5,796
use std::io::{BufReader, Read, Write}; use std::net::{IpAddr, SocketAddr, TcpStream}; use std::thread; use std::time::Duration; use crossbeam_channel::unbounded; use ipnet::Ipv4AddrRange; use crate::cluster::node::{GETINFO_REQUEST, GETINFO_RESPONSE}; pub fn get_local_network_ip_addresses(ip_addresses: Vec<IpAddr>) -> Vec<IpAddr> { ip_addresses .into_iter() .filter(|ip_address| { ip_address.is_ipv4() && !ip_address.is_loopback() && !ip_address.is_unspecified() && !ip_address.is_multicast() && match ip_address { IpAddr::V4(ip_address) => ip_address.is_private(), IpAddr::V6(_) => false, } }) .collect::<Vec<IpAddr>>() } pub fn get_ip_addresses() -> Vec<IpAddr> { let mut ip_addresses = vec![]; for interfaces in get_if_addrs::get_if_addrs() { for interface in interfaces { ip_addresses.push(interface.ip()); } } ip_addresses } pub enum Range { Sixteen, TwentyFour, } /// from an `ip_address` return all the ip_addresses coming from the same range /// supported ranges: /// - 10.0.0.0/8 /// - 172.16.0.0/12 /// - 192.168.0.0/16 pub fn get_range_from_ip_address(ip_address: IpAddr, range: Range) -> Vec<IpAddr> { let ip_address = match ip_address { IpAddr::V4(ip_address) => ip_address, IpAddr::V6(_) => return vec![], // do not support ipv6 }; let ip_addresses = match ip_address.octets() { [10, b, c, _] => match range { Range::Sixteen => Ipv4AddrRange::new( format!("10.{}.0.0", b).parse().unwrap(), format!("10.{}.255.255", b).parse().unwrap(), ), Range::TwentyFour => Ipv4AddrRange::new( format!("10.{}.{}.0", b, c).parse().unwrap(), format!("10.{}.{}.255", b, c).parse().unwrap(), ), }, // 10.0.0.0/8 [172, b, c, _] if b >= 16 && b <= 31 => match range { Range::Sixteen => Ipv4AddrRange::new( format!("172.{}.0.0", b).parse().unwrap(), format!("172.{}.255.255", b).parse().unwrap(), ), Range::TwentyFour => Ipv4AddrRange::new( format!("172.{}.{}.0", b, c).parse().unwrap(), format!("172.{}.{}.255", b, c).parse().unwrap(), ), }, // 172.16.0.0/12 [192, 168, c, _] => match range { Range::Sixteen => Ipv4AddrRange::new( "192.168.0.0".parse().unwrap(), "192.168.255.255".parse().unwrap(), ), Range::TwentyFour => Ipv4AddrRange::new( format!("192.168.{}.0", c).parse().unwrap(), format!("192.168.{}.255", c).parse().unwrap(), ), }, // 192.168.0.0/16, _ => return vec![], }; ip_addresses .into_iter() .map(|ip_address| IpAddr::V4(ip_address)) .collect() } enum ParallelResponse<T> { Ok(T), Continue, End, } type PeerId = String; /// TCP scan a range of ip addresses with a list of ports /// return a list of ip addresses with the associated port that are open pub fn scan_ip_range( ip_addresses: Vec<IpAddr>, ports_to_scan: Vec<u16>, ) -> Vec<(PeerId, SocketAddr)> { let mut opened_sockets = vec![]; let thread_pool = match rayon::ThreadPoolBuilder::new() .thread_name(|_| "scan range".to_string()) .build() { Ok(pool) => pool, Err(err) => { panic!("{:?}", err); } }; let (tx, rx) = unbounded::<ParallelResponse<(PeerId, SocketAddr)>>(); thread::spawn(move || { for ip_address in ip_addresses { let _tx = tx.clone(); let ports = ports_to_scan.clone(); let _ = thread_pool.spawn(move || { let tx = _tx.clone(); let ports = ports; for port in ports.iter() { let socket_addr = SocketAddr::new(ip_address, *port); let res = match TcpStream::connect_timeout(&socket_addr, Duration::from_millis(10)) { Ok(mut tcp_stream) => { // check that the remote is valid let _ = tcp_stream.write(GETINFO_REQUEST); let mut response_buffer = [0; 256]; let _ = tcp_stream.read(&mut response_buffer); match response_buffer { res if res.starts_with(GETINFO_RESPONSE) => { let node_id = &res[GETINFO_RESPONSE.len()..]; // "getinfo:<node_id>" ParallelResponse::Ok(( String::from_utf8(node_id.to_vec()).unwrap(), socket_addr, )) } _ => ParallelResponse::Continue, } } // socket opened - ip + port does exist Err(_) => ParallelResponse::Continue, // can't open a socket - then continue }; let _ = tx.send(res); } }); } }); for res in rx { match res { ParallelResponse::Ok(res) => { opened_sockets.push(res); } ParallelResponse::Continue => continue, ParallelResponse::End => break, } } opened_sockets }
32.931818
108
0.475155
de320ef778033c127bb9445ede74b80dfa393ccc
3,460
// * This file is part of the uutils coreutils package. // * // * (c) Michael Debertol <michael.debertol..AT..gmail.com> // * // * For the full copyright and license information, please view the LICENSE // * file that was distributed with this source code. //! Check if a file is ordered use crate::{ chunks::{self, Chunk, RecycledChunk}, compare_by, open, GlobalSettings, }; use itertools::Itertools; use std::{ cmp::Ordering, io::Read, iter, sync::mpsc::{sync_channel, Receiver, SyncSender}, thread, }; /// Check if the file at `path` is ordered. /// /// # Returns /// /// The code we should exit with. pub fn check(path: &str, settings: &GlobalSettings) -> i32 { let max_allowed_cmp = if settings.unique { // If `unique` is enabled, the previous line must compare _less_ to the next one. Ordering::Less } else { // Otherwise, the line previous line must compare _less or equal_ to the next one. Ordering::Equal }; let file = open(path); let (recycled_sender, recycled_receiver) = sync_channel(2); let (loaded_sender, loaded_receiver) = sync_channel(2); thread::spawn({ let settings = settings.clone(); move || reader(file, recycled_receiver, loaded_sender, &settings) }); for _ in 0..2 { let _ = recycled_sender.send(RecycledChunk::new(100 * 1024)); } let mut prev_chunk: Option<Chunk> = None; let mut line_idx = 0; for chunk in loaded_receiver.iter() { line_idx += 1; if let Some(prev_chunk) = prev_chunk.take() { // Check if the first element of the new chunk is greater than the last // element from the previous chunk let prev_last = prev_chunk.lines().last().unwrap(); let new_first = chunk.lines().first().unwrap(); if compare_by( prev_last, new_first, settings, prev_chunk.line_data(), chunk.line_data(), ) > max_allowed_cmp { if !settings.check_silent { eprintln!("sort: {}:{}: disorder: {}", path, line_idx, new_first.line); } return 1; } let _ = recycled_sender.send(prev_chunk.recycle()); } for (a, b) in chunk.lines().iter().tuple_windows() { line_idx += 1; if compare_by(a, b, settings, chunk.line_data(), chunk.line_data()) > max_allowed_cmp { if !settings.check_silent { eprintln!("sort: {}:{}: disorder: {}", path, line_idx, b.line); } return 1; } } prev_chunk = Some(chunk); } 0 } /// The function running on the reader thread. fn reader( mut file: Box<dyn Read + Send>, receiver: Receiver<RecycledChunk>, sender: SyncSender<Chunk>, settings: &GlobalSettings, ) { let mut carry_over = vec![]; for recycled_chunk in receiver.iter() { let should_continue = chunks::read( &sender, recycled_chunk, None, &mut carry_over, &mut file, &mut iter::empty(), if settings.zero_terminated { b'\0' } else { b'\n' }, settings, ); if !should_continue { break; } } }
29.827586
99
0.546821
0a8bf1456e77695e9a6c6424e0fa32d5b01b9da7
13,502
use super::common::*; use crate::circuit::anchor::AnchorCircuit; use ark_crypto_primitives::Error; use ark_ec::PairingEngine; use ark_ff::{BigInteger, PrimeField}; use ark_std::{ rand::{CryptoRng, Rng, RngCore}, rc::Rc, vec::Vec, }; use arkworks_gadgets::{ arbitrary::anchor_data::Input as AnchorDataInput, leaf::anchor::{ constraints::AnchorLeafGadget, AnchorLeaf, Private as LeafPrivate, Public as LeafPublic, }, merkle_tree::Path, set::membership::{ constraints::SetMembershipGadget, Private as SetMembershipPrivate, SetMembership, }, }; use arkworks_utils::{ poseidon::PoseidonParameters, utils::common::{setup_params_x5_3, setup_params_x5_4, Curve}, }; pub type AnchorConstraintDataInput<F> = AnchorDataInput<F>; pub type Leaf_x5<F> = AnchorLeaf<F, PoseidonCRH_x5_4<F>>; pub type LeafGadget_x5<F> = AnchorLeafGadget<F, PoseidonCRH_x5_4<F>, PoseidonCRH_x5_4Gadget<F>>; pub type TestSetMembership<F, const M: usize> = SetMembership<F, M>; pub type TestSetMembershipGadget<F, const M: usize> = SetMembershipGadget<F, M>; pub type Circuit_x5<F, const N: usize, const M: usize> = AnchorCircuit< F, PoseidonCRH_x5_4<F>, PoseidonCRH_x5_4Gadget<F>, TreeConfig_x5<F>, LeafCRHGadget<F>, PoseidonCRH_x5_3Gadget<F>, N, M, >; pub type Leaf_x17<F> = AnchorLeaf<F, PoseidonCRH_x17_5<F>>; pub type LeafGadget_x17<F> = AnchorLeafGadget<F, PoseidonCRH_x17_5<F>, PoseidonCRH_x17_5Gadget<F>>; pub type Circuit_x17<F, const N: usize, const M: usize> = AnchorCircuit< F, PoseidonCRH_x17_5<F>, PoseidonCRH_x17_5Gadget<F>, TreeConfig_x17<F>, LeafCRHGadget<F>, PoseidonCRH_x17_3Gadget<F>, N, M, >; pub fn setup_leaf_x5_4<F: PrimeField, R: RngCore>( curve: Curve, chain_id_bytes: Vec<u8>, rng: &mut R, ) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>), Error> { let params5 = setup_params_x5_4::<F>(curve); // Secret inputs for the leaf let leaf_private = LeafPrivate::generate(rng); let chain_id = F::from_le_bytes_mod_order(&chain_id_bytes); let leaf_public = LeafPublic::new(chain_id); let leaf_hash = Leaf_x5::create_leaf(&leaf_private, &leaf_public, &params5)?; let nullifier_hash = Leaf_x5::create_nullifier(&leaf_private, &params5)?; let secret_bytes = leaf_private.secret().into_repr().to_bytes_le(); let nullifier_bytes = leaf_private.nullifier().into_repr().to_bytes_le(); let leaf_bytes = leaf_hash.into_repr().to_bytes_le(); let nullifier_hash_bytes = nullifier_hash.into_repr().to_bytes_le(); Ok(( secret_bytes, nullifier_bytes, leaf_bytes, nullifier_hash_bytes, )) } pub fn setup_leaf_with_privates_raw_x5_4<F: PrimeField>( curve: Curve, secret_bytes: Vec<u8>, nullfier_bytes: Vec<u8>, chain_id_bytes: Vec<u8>, ) -> Result<(Vec<u8>, Vec<u8>), Error> { let params5 = setup_params_x5_4::<F>(curve); let secret = F::from_le_bytes_mod_order(&secret_bytes); let nullifier = F::from_le_bytes_mod_order(&nullfier_bytes); // Secret inputs for the leaf let leaf_private = LeafPrivate::new(secret, nullifier); let chain_id = F::from_le_bytes_mod_order(&chain_id_bytes); let leaf_public = LeafPublic::new(chain_id); let leaf_hash = Leaf_x5::create_leaf(&leaf_private, &leaf_public, &params5)?; let nullifier_hash = Leaf_x5::create_nullifier(&leaf_private, &params5)?; let leaf_bytes = leaf_hash.into_repr().to_bytes_le(); let nullifier_hash_bytes = nullifier_hash.into_repr().to_bytes_le(); Ok((leaf_bytes, nullifier_hash_bytes)) } pub const N: usize = 30; pub const M: usize = 2; type AnchorProverSetupBn254_30<F> = AnchorProverSetup<F, N, M>; pub fn setup_proof_x5_4<E: PairingEngine, R: RngCore + CryptoRng>( curve: Curve, chain_id: Vec<u8>, secret_raw: Vec<u8>, nullifier_raw: Vec<u8>, leaves_raw: Vec<Vec<u8>>, index: u64, roots: Vec<Vec<u8>>, recipient_raw: Vec<u8>, relayer_raw: Vec<u8>, commitment_raw: Vec<u8>, fee: u128, refund: u128, pk: Vec<u8>, rng: &mut R, ) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<Vec<u8>>), Error> { let params3 = setup_params_x5_3::<E::Fr>(curve); let params4 = setup_params_x5_4::<E::Fr>(curve); let prover = AnchorProverSetupBn254_30::new(params3, params4); let (circuit, leaf_raw, nullifier_hash_raw, root_raw, public_inputs_raw) = prover .setup_circuit_with_privates_raw( chain_id, secret_raw, nullifier_raw, leaves_raw, index, roots, recipient_raw, relayer_raw, commitment_raw, fee, refund, )?; let proof = prove_unchecked::<E, _, _>(circuit, &pk, rng)?; Ok(( proof, leaf_raw, nullifier_hash_raw, root_raw, public_inputs_raw, )) } pub fn setup_keys_x5_4<E: PairingEngine, R: RngCore + CryptoRng>( curve: Curve, rng: &mut R, ) -> Result<(Vec<u8>, Vec<u8>), Error> { let params3 = setup_params_x5_3::<E::Fr>(curve); let params5 = setup_params_x5_4::<E::Fr>(curve); let prover = AnchorProverSetupBn254_30::new(params3, params5); let (circuit, ..) = prover.setup_random_circuit(rng)?; let (pk, vk) = setup_keys_unchecked::<E, _, _>(circuit, rng)?; Ok((pk, vk)) } pub struct AnchorProverSetup<F: PrimeField, const M: usize, const N: usize> { params3: PoseidonParameters<F>, params4: PoseidonParameters<F>, } impl<F: PrimeField, const N: usize, const M: usize> AnchorProverSetup<F, M, N> { pub fn new(params3: PoseidonParameters<F>, params4: PoseidonParameters<F>) -> Self { Self { params3, params4 } } pub fn setup_set(root: &F, roots: &[F; M]) -> Result<SetMembershipPrivate<F, M>, Error> { TestSetMembership::generate_secrets(root, roots) } pub fn setup_arbitrary_data( recipient: F, relayer: F, fee: F, refund: F, commitment: F, ) -> AnchorConstraintDataInput<F> { AnchorConstraintDataInput::new(recipient, relayer, fee, refund, commitment) } #[allow(clippy::too_many_arguments)] pub fn construct_public_inputs( chain_id: F, nullifier_hash: F, roots: [F; M], root: F, recipient: F, relayer: F, fee: F, refund: F, commitment: F, ) -> Vec<F> { let mut pub_ins = vec![chain_id, nullifier_hash]; pub_ins.extend(roots.to_vec()); pub_ins.extend(vec![root, recipient, relayer, fee, refund, commitment]); pub_ins } #[allow(clippy::too_many_arguments)] pub fn deconstruct_public_inputs( public_inputs: &[F], ) -> ( F, // Chain id F, // Nullifier Hash Vec<F>, // Roots F, // Root F, // Recipient F, // Relayer F, // Fee F, // Refund F, // Commitment ) { let chain_id: F = public_inputs[0]; let nullifier_hash = public_inputs[1]; let offset = 2 + M; let roots = public_inputs[2..offset].to_vec(); let root = public_inputs[offset + 1]; let recipient = public_inputs[offset + 2]; let relayer = public_inputs[offset + 3]; let fee = public_inputs[offset + 4]; let refund = public_inputs[offset + 5]; let commitments = public_inputs[offset + 6]; ( chain_id, nullifier_hash, roots, root, recipient, relayer, fee, refund, commitments, ) } pub fn setup_leaf<R: Rng>( &self, chain_id: F, rng: &mut R, ) -> Result<(LeafPrivate<F>, LeafPublic<F>, F, F), Error> { // Secret inputs for the leaf let leaf_private = LeafPrivate::generate(rng); // Public inputs for the leaf let leaf_public = LeafPublic::new(chain_id); // Creating the leaf let leaf_hash = AnchorLeaf::<F, PoseidonCRH_x5_4<F>>::create_leaf( &leaf_private, &leaf_public, &self.params4, )?; let nullifier_hash = AnchorLeaf::<F, PoseidonCRH_x5_4<F>>::create_nullifier(&leaf_private, &self.params4)?; Ok((leaf_private, leaf_public, leaf_hash, nullifier_hash)) } pub fn setup_leaf_with_privates( &self, chain_id: F, secret: F, nullfier: F, ) -> Result<(LeafPrivate<F>, LeafPublic<F>, F, F), Error> { // Secret inputs for the leaf let leaf_private = LeafPrivate::new(secret, nullfier); let leaf_public = LeafPublic::new(chain_id); // Creating the leaf let leaf_hash = Leaf_x5::create_leaf(&leaf_private, &leaf_public, &self.params4)?; let nullifier_hash = Leaf_x5::create_nullifier(&leaf_private, &self.params4)?; Ok((leaf_private, leaf_public, leaf_hash, nullifier_hash)) } #[allow(clippy::too_many_arguments)] pub fn setup_circuit<R: Rng>( self, chain_id: F, leaves: &[F], index: u64, roots: &[F], // only first M - 1 member will be used recipient: F, relayer: F, fee: F, refund: F, commitment: F, rng: &mut R, ) -> Result<(Circuit_x5<F, N, M>, F, F, F, Vec<F>), Error> { let arbitrary_input = Self::setup_arbitrary_data(recipient, relayer, fee, refund, commitment); let (leaf_private, leaf_public, leaf, nullifier_hash) = self.setup_leaf(chain_id, rng)?; let mut leaves_new = leaves.to_vec(); leaves_new.push(leaf); let (tree, path) = self.setup_tree_and_path(&leaves_new, index)?; let root = tree.root().inner(); let mut roots_new: [F; M] = [F::default(); M]; roots_new[0] = root; let size_to_copy = if roots.len() > (M - 1) { M - 1 } else { roots.len() }; for i in 0..size_to_copy { roots_new[i + 1] = roots[i]; } let set_private_inputs = Self::setup_set(&root, &roots_new)?; let mc = Circuit_x5::new( arbitrary_input.clone(), leaf_private, leaf_public, set_private_inputs, roots_new, self.params4, path, root.clone(), nullifier_hash, ); let public_inputs = Self::construct_public_inputs( chain_id, nullifier_hash, roots_new, root, recipient, relayer, fee, refund, commitment, ); Ok((mc, leaf, nullifier_hash, root, public_inputs)) } #[allow(clippy::too_many_arguments)] pub fn setup_circuit_with_privates( self, chain_id: F, secret: F, nullifier: F, leaves: &[F], index: u64, roots: &[F], // only first M - 1 member will be used recipient: F, relayer: F, fee: F, refund: F, commitment: F, ) -> Result<(Circuit_x5<F, N, M>, F, F, F, Vec<F>), Error> { let arbitrary_input = Self::setup_arbitrary_data(recipient, relayer, fee, refund, commitment); let (leaf_private, leaf_public, leaf, nullifier_hash) = self.setup_leaf_with_privates(chain_id, secret, nullifier)?; let mut leaves_new = leaves.to_vec(); leaves_new.push(leaf); let (tree, path) = self.setup_tree_and_path(&leaves_new, index)?; let root = tree.root().inner(); let mut roots_new: [F; M] = [F::default(); M]; roots_new[0] = root; let size_to_copy = if roots.len() > (M - 1) { M - 1 } else { roots.len() }; for i in 0..size_to_copy { roots_new[i + 1] = roots[i]; } let set_private_inputs = Self::setup_set(&root, &roots_new)?; let mc = Circuit_x5::new( arbitrary_input.clone(), leaf_private, leaf_public, set_private_inputs, roots_new, self.params4, path, root.clone(), nullifier_hash, ); let public_inputs = Self::construct_public_inputs( chain_id, nullifier_hash, roots_new, root, recipient, relayer, fee, refund, commitment, ); Ok((mc, leaf, nullifier_hash, root, public_inputs)) } pub fn setup_random_circuit<R: Rng>( self, rng: &mut R, ) -> Result<(Circuit_x5<F, N, M>, F, F, F, Vec<F>), Error> { let chain_id = F::rand(rng); let leaves = Vec::new(); let index = 0; let roots = Vec::new(); let recipient = F::rand(rng); let relayer = F::rand(rng); let fee = F::rand(rng); let refund = F::rand(rng); let commitment = F::rand(rng); self.setup_circuit( chain_id, &leaves, index, &roots, recipient, relayer, fee, refund, commitment, rng, ) } pub fn setup_circuit_with_privates_raw( self, chain_id: Vec<u8>, secret: Vec<u8>, nullifier: Vec<u8>, leaves: Vec<Vec<u8>>, index: u64, roots: Vec<Vec<u8>>, recipient: Vec<u8>, relayer: Vec<u8>, commitment: Vec<u8>, fee: u128, refund: u128, ) -> Result<(Circuit_x5<F, N, M>, Vec<u8>, Vec<u8>, Vec<u8>, Vec<Vec<u8>>), Error> { let chain_id_f = F::from_le_bytes_mod_order(&chain_id); let secret_f = F::from_le_bytes_mod_order(&secret); let nullifier_f = F::from_le_bytes_mod_order(&nullifier); let leaves_f: Vec<F> = leaves .iter() .map(|x| F::from_le_bytes_mod_order(x)) .collect(); let roots_f: Vec<F> = roots .iter() .map(|x| F::from_le_bytes_mod_order(&x)) .collect(); let recipient_f = F::from_le_bytes_mod_order(&recipient); let relayer_f = F::from_le_bytes_mod_order(&relayer); let commitment_f = F::from_le_bytes_mod_order(&commitment); let fee_f = F::from(fee); let refund_f = F::from(refund); let (mc, leaf, nullifier_hash, root, public_inputs) = self.setup_circuit_with_privates( chain_id_f, secret_f, nullifier_f, &leaves_f, index, &roots_f, recipient_f, relayer_f, fee_f, refund_f, commitment_f, )?; let leaf_raw = leaf.into_repr().to_bytes_le(); let nullifier_hash_raw = nullifier_hash.into_repr().to_bytes_le(); let root_raw = root.into_repr().to_bytes_le(); let public_inputs_raw: Vec<Vec<u8>> = public_inputs .iter() .map(|x| x.into_repr().to_bytes_le()) .collect(); Ok(( mc, leaf_raw, nullifier_hash_raw, root_raw, public_inputs_raw, )) } pub fn setup_tree(&self, leaves: &[F]) -> Result<Tree_x5<F>, Error> { let inner_params = Rc::new(self.params3.clone()); let mt = Tree_x5::new_sequential(inner_params, Rc::new(()), leaves)?; Ok(mt) } pub fn setup_tree_and_path( &self, leaves: &[F], index: u64, ) -> Result<(Tree_x5<F>, Path<TreeConfig_x5<F>, N>), Error> { // Making the merkle tree let mt = self.setup_tree(leaves)?; // Getting the proof path let path = mt.generate_membership_proof(index); Ok((mt, path)) } }
26.015414
99
0.681751
de494a565bb03ba7a0594aab5352ae8a0e13e057
287
use actix_web::web; mod items; use super::path::Path; pub fn app_factory(app: &mut web::ServiceConfig) { let base_path: Path = Path { prefix: String::from("/"), }; app.route( &base_path.define(String::from("")), web::get().to(items::items), ); }
20.5
50
0.571429
d9b4552501304bd2460fd70bd193ee9ab72ecf46
17,017
//! Transforms which fuse loops to reduce memory movement and prevent unncessary //! traversals of data. use ast::*; use ast::ExprKind::*; use ast::Type::*; use ast::BuilderKind::*; use ast::LiteralKind::*; use error::*; use annotations::*; use exprs; use super::inliner::inline_apply; use util::SymbolGenerator; /// Fuses loops where one for loop takes another as it's input, which prevents intermediate results /// from being materialized. pub struct VerticalLoopFusion; /// Fuses for loops over the same vector in a zip into a single for loop which produces a vector of /// structs directly. /// /// Some examples: /// /// for(zip( /// result(for(a, appender, ...)) /// result(for(a, appender, ...)) /// ), ...) /// /// will become for(result(for(a, ...))) where the nested for will produce a vector of structs with /// two elements. /// /// Caveats: /// - Like all Zip-based transforms, this function currently assumes that the output of each /// expression in the Zip is the same length. /// pub fn fuse_loops_horizontal(expr: &mut Expr<Type>) { expr.transform(&mut |ref mut expr| { let mut sym_gen = SymbolGenerator::from_expression(expr); if let For{iters: ref all_iters, builder: ref outer_bldr, func: ref outer_func} = expr.kind { if all_iters.len() > 1 { // Vector of tuples containing the params and expressions of functions in nested lambdas. let mut lambdas = vec![]; let mut common_data = None; // Used to check if the same rows of each output are touched by the outer for. let first_iter = (&all_iters[0].start, &all_iters[0].end, &all_iters[0].stride); // First, check if all the lambdas are over the same vector and have a pattern we can merge. // Below, for each iterator in the for loop, we checked if each nested for loop is // over the same vector and has the same Iter parameters (i.e., same start, end, stride). if all_iters.iter().all(|ref iter| { if (&iter.start, &iter.end, &iter.stride) == first_iter { // Make sure each nested for loop follows the ``result(for(a, appender, ...)) pattern. if let Res{builder: ref res_bldr} = iter.data.kind { if let For{iters: ref iters2, builder: ref bldr2, func: ref lambda} = res_bldr.kind { if common_data.is_none() { common_data = Some(iters2.clone()); } if iters2 == common_data.as_ref().unwrap() { if let NewBuilder(_) = bldr2.kind { if let Builder(ref kind, _) = bldr2.ty { if let Appender(_) = *kind { if let Lambda{params: ref args, ref body} = lambda.kind { if let Merge{ref builder, ref value} = body.kind { if let Ident(ref n) = builder.kind { if *n == args[0].name { // Save the arguments and expressions for the function so // they can be used for fusion later. lambdas.push((args.clone(), value.clone())); return true } } } } } } } } } } } // The pattern doesn't match for some Iter -- abort the transform. return false }) { // All Iters are over the same range and same vector, with a pattern we can // transform. Produce the new expression by zipping the functions of each // nested for into a single merge into a struct. // Zip the expressions to create an appender whose merge (value) type is a struct. let merge_type = Struct(lambdas.iter().map(|ref e| e.1.ty.clone()).collect::<Vec<_>>()); // TODO(Deepak): Fix this to something meaningful. let builder_type = Builder(Appender(Box::new(merge_type.clone())), Annotations::new()); // The element type remains unchanged. let func_elem_type = lambdas[0].0[2].ty.clone(); // Parameters for the new fused function. Symbols names are generated using symbol // names for the builder and element from an existing function. let new_params = vec![ Parameter{ty: builder_type.clone(), name: sym_gen.new_symbol(&lambdas[0].0[0].name.name)}, Parameter{ty: Scalar(ScalarKind::I64), name: sym_gen.new_symbol(&lambdas[0].0[1].name.name)}, Parameter{ty: func_elem_type.clone(), name: sym_gen.new_symbol(&lambdas[0].0[2].name.name)}, ]; // Generate Ident expressions for the new symbols and substitute them in the // functions' merge expressions. let new_bldr_expr = Expr { ty: builder_type.clone(), kind: Ident(new_params[0].name.clone()), annotations: Annotations::new(), }; let new_index_expr = Expr { ty: Scalar(ScalarKind::I64), kind: Ident(new_params[1].name.clone()), annotations: Annotations::new(), }; let new_elem_expr = Expr { ty: func_elem_type.clone(), kind: Ident(new_params[2].name.clone()), annotations: Annotations::new(), }; for &mut (ref mut args, ref mut expr) in lambdas.iter_mut() { expr.substitute(&args[0].name, &new_bldr_expr); expr.substitute(&args[1].name, &new_index_expr); expr.substitute(&args[2].name, &new_elem_expr); } // Build up the new expression. The new expression merges structs into an // appender, where each struct field is an expression which was merged into an // appender in one of the original functions. For example, if there were two // zipped fors in the original expression with lambdas |b1,e1| merge(b1, // e1+1) and |b2,e2| merge(b2, e2+2), the new expression would be merge(b, // {e+1,e+2}) into a new builder b of type appender[{i32,i32}]. e1, e2, and e // refer to the same element in the expressions above since we check to ensure // each zipped for is over the same input data. let new_merge_expr = Expr{ ty: builder_type.clone(), kind: Merge{ builder: Box::new(new_bldr_expr), value: Box::new(Expr{ ty: merge_type.clone(), kind: MakeStruct{elems: lambdas.iter().map(|ref lambda| *lambda.1.clone()).collect::<Vec<_>>()}, annotations: Annotations::new(), }) }, annotations: Annotations::new(), }; let new_func = Expr{ ty: Function(new_params.iter().map(|ref p| p.ty.clone()).collect::<Vec<_>>(), Box::new(builder_type.clone())), kind: Lambda{params: new_params, body: Box::new(new_merge_expr)}, annotations: Annotations::new(), }; let new_iter_expr = Expr{ ty: Vector(Box::new(merge_type.clone())), kind: Res{builder: Box::new(Expr{ ty: builder_type.clone(), kind: For{iters: common_data.unwrap(), builder: Box::new(Expr{ty: builder_type.clone(), kind: NewBuilder(None), annotations: Annotations::new()}), func: Box::new(new_func)}, annotations: Annotations::new(), })}, annotations: Annotations::new(), }; // TODO(shoumik): Any way to avoid the clones here? return Some(Expr{ ty: expr.ty.clone(), kind: For{iters: vec![Iter{ data: Box::new(new_iter_expr), start: all_iters[0].start.clone(), end: all_iters[0].end.clone(), stride: all_iters[0].stride.clone(), kind: all_iters[0].kind.clone(), }], builder: outer_bldr.clone(), func: outer_func.clone()}, annotations: Annotations::new(), }); } } } None }); } /// Fuses loops where one for loop takes another as it's input, which prevents intermediate results /// from being materialized. pub fn fuse_loops_vertical(expr: &mut Expr<Type>) { expr.transform_and_continue_res(&mut |ref mut expr| { let mut sym_gen = SymbolGenerator::from_expression(expr); if let For { iters: ref all_iters, builder: ref bldr1, func: ref nested } = expr.kind { if all_iters.len() == 1 { let ref iter1 = all_iters[0]; if let Res { builder: ref res_bldr } = iter1.data.kind { if let For { iters: ref iters2, builder: ref bldr2, func: ref lambda, } = res_bldr.kind { if iters2.iter().all(|ref i| consumes_all(&i)) { if let NewBuilder(_) = bldr2.kind { if let Builder(ref kind, _) = bldr2.ty { if let Appender(_) = *kind { let mut e = exprs::for_expr(iters2.clone(), *bldr1.clone(), replace_builder(lambda, nested, &mut sym_gen)?, false)?; e.annotations = expr.annotations.clone(); return Ok((Some(e), true)); } } } } } } } } Ok((None, true)) }); } /// Given an iterator, returns whether the iterator consumes every element of its data vector. fn consumes_all(iter: &Iter<Type>) -> bool { if let &Iter { start: None, end: None, stride: None, .. } = iter { return true; } else if let &Iter { ref data, start: Some(ref start), end: Some(ref end), stride: Some(ref stride), .. } = iter { // Checks if the stride is 1 and an entire vector represented by a symbol is consumed. if let (&Literal(I64Literal(1)), &Literal(I64Literal(0)), &Ident(ref name), &Length { data: ref v }) = (&stride.kind, &start.kind, &data.kind, &end.kind) { if let Ident(ref vsym) = v.kind { return vsym == name; } } // Checks if an entire vector literal is consumed. if let (&Literal(I64Literal(1)), &Literal(I64Literal(0)), &MakeVector { ref elems }) = (&stride.kind, &start.kind, &data.kind) { let num_elems = elems.len() as i64; if let Literal(I64Literal(x)) = end.kind { return num_elems == x; } } } false } /// Given a lambda which takes a builder and an argument, returns a new function which takes a new /// builder type and calls nested on the values it would've merged into its old builder. This /// allows us to "compose" merge functions and avoid creating intermediate results. fn replace_builder(lambda: &Expr<Type>, nested: &Expr<Type>, sym_gen: &mut SymbolGenerator) -> WeldResult<Expr<Type>> { // Tests whether an identifier and symbol refer to the same value by // comparing the symbols. fn same_iden(a: &ExprKind<Type>, b: &Symbol) -> bool { if let Ident(ref symbol) = *a { symbol == b } else { false } } if let Lambda { params: ref args, ref body } = lambda.kind { if let Lambda { params: ref nested_args, .. } = nested.kind { let mut new_body = *body.clone(); let ref old_bldr = args[0]; let ref old_index = args[1]; let ref old_arg = args[2]; let new_bldr_sym = sym_gen.new_symbol(&old_bldr.name.name); let new_index_sym = sym_gen.new_symbol(&old_index.name.name); let new_bldr = exprs::ident_expr(new_bldr_sym.clone(), nested_args[0].ty.clone())?; let new_index = exprs::ident_expr(new_index_sym.clone(), nested_args[1].ty.clone())?; // Fix expressions to use the new builder. new_body.transform_and_continue_res(&mut |ref mut e| match e.kind { Merge { ref builder, ref value } if same_iden(&(*builder).kind, &old_bldr.name) => { let params: Vec<Expr<Type>> = vec![new_bldr.clone(), new_index.clone(), *value.clone()]; let mut expr = exprs::apply_expr(nested.clone(), params)?; inline_apply(&mut expr); Ok((Some(expr), true)) } For { iters: ref data, builder: ref bldr, ref func } if same_iden(&(*bldr).kind, &old_bldr.name) => { let expr = exprs::for_expr(data.clone(), new_bldr.clone(), replace_builder(func, nested, sym_gen)?, false)?; Ok((Some(expr), false)) } Ident(ref mut symbol) if *symbol == old_bldr.name => { Ok((Some(new_bldr.clone()), false)) } Ident(ref mut symbol) if *symbol == old_index.name => { Ok((Some(new_index.clone()), false)) } _ => Ok((None, true)), }); // Fix types to make sure the return type propagates through all subexpressions. match_types(&new_bldr.ty, &mut new_body); let new_params = vec![Parameter { ty: new_bldr.ty.clone(), name: new_bldr_sym.clone(), }, Parameter { ty: Scalar(ScalarKind::I64), name: new_index_sym.clone(), }, Parameter { ty: old_arg.ty.clone(), name: old_arg.name.clone(), }]; return exprs::lambda_expr(new_params, new_body); } } return weld_err!("Inconsistency in replace_builder"); } /// Given a root type, forces each expression to return that type. TODO For now, only supporting /// expressions which can be builders. We might want to factor this out to be somewhere else. fn match_types(root_ty: &Type, expr: &mut Expr<Type>) { expr.ty = root_ty.clone(); match expr.kind { If { ref mut on_true, ref mut on_false, ..} => { match_types(root_ty, on_true); match_types(root_ty, on_false); } Select { ref mut on_true, ref mut on_false, ..} => { match_types(root_ty, on_true); match_types(root_ty, on_false); } Let { ref mut body, ..} => { match_types(root_ty, body); } _ => {} }; }
50.495549
201
0.475289
87f7f2cbaec067457b0e3de2108af4fbe542ad8a
623
use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Deserialize, Serialize)] pub struct KeyboardButton { pub text: String, #[serde(skip_serializing_if = "Option::is_none")] pub request_contact: Option<bool>, #[serde(skip_serializing_if = "Option::is_none")] pub request_location: Option<bool>, // TODO: request_poll } impl KeyboardButton { pub fn new<T>(text: T) -> Self where T: Into<String>, { Self { text: text.into(), request_contact: None, request_location: None, } } }
24.92
85
0.613162
ac575813e36d4e9499181ada88c293f70aa3bc9c
7,195
//! Methods for creating iscsi targets. //! //! We create a wildcard portal and initiator groups when mayastor starts up. //! These groups allow unauthenticated access for any initiator. Then when //! exporting a replica we use these default groups and create one target per //! replica with one lun - LUN0. use std::{ cell::RefCell, ffi::CString, os::raw::{c_char, c_int}, ptr, }; use futures::channel::oneshot; use nix::errno::Errno; use snafu::{ResultExt, Snafu}; use spdk_sys::{ spdk_bdev_get_name, spdk_iscsi_find_tgt_node, spdk_iscsi_init_grp_create_from_initiator_list, spdk_iscsi_init_grp_destroy, spdk_iscsi_init_grp_unregister, spdk_iscsi_portal_create, spdk_iscsi_portal_grp_add_portal, spdk_iscsi_portal_grp_create, spdk_iscsi_portal_grp_open, spdk_iscsi_portal_grp_register, spdk_iscsi_portal_grp_release, spdk_iscsi_portal_grp_unregister, spdk_iscsi_shutdown_tgt_node_by_name, spdk_iscsi_tgt_node_construct, }; use crate::{ core::Bdev, ffihelper::{cb_arg, done_errno_cb, ErrnoResult}, jsonrpc::{Code, RpcErrorCode}, }; /// iSCSI target related errors #[derive(Debug, Snafu)] pub enum Error { #[snafu(display("Failed to create default portal group"))] CreatePortalGroup {}, #[snafu(display("Failed to create default iscsi portal"))] CreatePortal {}, #[snafu(display("Failed to add default portal to portal group"))] AddPortal {}, #[snafu(display("Failed to register default portal group"))] RegisterPortalGroup {}, #[snafu(display("Failed to create default initiator group"))] CreateInitiatorGroup {}, #[snafu(display("Failed to create iscsi target"))] CreateTarget {}, #[snafu(display("Failed to destroy iscsi target"))] DestroyTarget { source: Errno }, } impl RpcErrorCode for Error { fn rpc_error_code(&self) -> Code { Code::InternalError } } type Result<T, E = Error> = std::result::Result<T, E>; /// iscsi target port number const ISCSI_PORT: u16 = 3260; thread_local! { /// iscsi global state. /// /// It is thread-local because TLS is safe to access in rust without any /// synchronization overhead. It should be accessed only from /// reactor_0 thread. /// /// A counter used for assigning idx to newly created iscsi targets. static ISCSI_IDX: RefCell<i32> = RefCell::new(0); /// IP address of iscsi portal used for all created iscsi targets. static ADDRESS: RefCell<Option<String>> = RefCell::new(None); } /// Generate iqn based on provided uuid fn target_name(uuid: &str) -> String { format!("iqn.2019-05.io.openebs:{}", uuid) } /// Create iscsi portal and initiator group which will be used later when /// creating iscsi targets. pub fn init(address: &str) -> Result<()> { let portal_host = CString::new(address.to_owned()).unwrap(); let portal_port = CString::new(ISCSI_PORT.to_string()).unwrap(); let initiator_host = CString::new("ANY").unwrap(); let initiator_netmask = CString::new("ANY").unwrap(); let pg = unsafe { spdk_iscsi_portal_grp_create(0) }; if pg.is_null() { return Err(Error::CreatePortalGroup {}); } unsafe { let p = spdk_iscsi_portal_create( portal_host.as_ptr(), portal_port.as_ptr(), ); if p.is_null() { spdk_iscsi_portal_grp_release(pg); return Err(Error::CreatePortal {}); } spdk_iscsi_portal_grp_add_portal(pg, p); if spdk_iscsi_portal_grp_open(pg) != 0 { spdk_iscsi_portal_grp_release(pg); return Err(Error::AddPortal {}); } if spdk_iscsi_portal_grp_register(pg) != 0 { spdk_iscsi_portal_grp_release(pg); return Err(Error::RegisterPortalGroup {}); } } debug!("Created default iscsi portal group"); unsafe { if spdk_iscsi_init_grp_create_from_initiator_list( 0, 1, &mut (initiator_host.as_ptr() as *mut c_char) as *mut _, 1, &mut (initiator_netmask.as_ptr() as *mut c_char) as *mut _, ) != 0 { spdk_iscsi_portal_grp_release(pg); return Err(Error::CreateInitiatorGroup {}); } } ADDRESS.with(move |addr| { *addr.borrow_mut() = Some(address.to_owned()); }); debug!("Created default iscsi initiator group"); Ok(()) } /// Destroy iscsi default portal and initiator group. pub fn fini() { unsafe { let ig = spdk_iscsi_init_grp_unregister(0); if !ig.is_null() { spdk_iscsi_init_grp_destroy(ig); } let pg = spdk_iscsi_portal_grp_unregister(0); if !pg.is_null() { spdk_iscsi_portal_grp_release(pg); } } } /// Export given bdev over iscsi. That involves creating iscsi target and /// adding the bdev as LUN to it. pub fn share(uuid: &str, bdev: &Bdev) -> Result<()> { let iqn = target_name(uuid); let c_iqn = CString::new(iqn.clone()).unwrap(); let mut group_idx: c_int = 0; let mut lun_id: c_int = 0; let idx = ISCSI_IDX.with(move |iscsi_idx| { let idx = *iscsi_idx.borrow(); *iscsi_idx.borrow_mut() = idx + 1; idx }); let tgt = unsafe { spdk_iscsi_tgt_node_construct( idx, c_iqn.as_ptr(), ptr::null(), &mut group_idx as *mut _, &mut group_idx as *mut _, 1, // portal and initiator group list length &mut spdk_bdev_get_name(bdev.as_ptr()), &mut lun_id as *mut _, 1, // length of lun id list 128, // max queue depth false, // disable chap false, // require chap false, // mutual chap 0, // chap group false, // header digest false, // data digest ) }; if tgt.is_null() { Err(Error::CreateTarget {}) } else { info!("Created iscsi target {}", iqn); Ok(()) } } /// Undo export of a bdev over iscsi done above. pub async fn unshare(uuid: &str) -> Result<()> { let (sender, receiver) = oneshot::channel::<ErrnoResult<()>>(); let iqn = target_name(uuid); let c_iqn = CString::new(iqn.clone()).unwrap(); debug!("Destroying iscsi target {}", iqn); unsafe { spdk_iscsi_shutdown_tgt_node_by_name( c_iqn.as_ptr(), Some(done_errno_cb), cb_arg(sender), ); } receiver .await .expect("Cancellation is not supported") .context(DestroyTarget {})?; info!("Destroyed iscsi target {}", uuid); Ok(()) } /// Return iscsi target URI understood by nexus pub fn get_uri(uuid: &str) -> Option<String> { let iqn = target_name(uuid); let c_iqn = CString::new(iqn.clone()).unwrap(); let tgt = unsafe { spdk_iscsi_find_tgt_node(c_iqn.as_ptr()) }; if tgt.is_null() { return None; } ADDRESS.with(move |a| { let a_borrow = a.borrow(); let address = a_borrow.as_ref().unwrap(); Some(format!("iscsi://{}:{}/{}", address, ISCSI_PORT, iqn)) }) }
30.35865
77
0.61501
de25ce8daf31b1530f5a09d3c090b77b81141ea2
1,399
use std::sync::atomic::Ordering; use ::key::{hashkey_to_string}; use ::table::*; use ::state::SlotState; pub struct CounterIter<'a> { pub slots: &'a VectorTable, pub index: usize, } impl<'a> Iterator for CounterIter<'a> { type Item = (String, usize); fn next(&mut self) -> Option<Self::Item> { let ret; loop { let slot_opt = self.slots.get_index(self.index); match slot_opt { Some(slot) => match slot.state.get() { SlotState::Alive | SlotState::Copying | SlotState::Copied => { let key_ptr = slot.key.load(Ordering::Acquire); let val = slot.value.load(Ordering::Relaxed); if key_ptr.is_null() { panic!("Iterator found an active slot with a null key"); } let key = unsafe { hashkey_to_string(&(*key_ptr)) }; self.index += 1; ret = Some((key, val)); break; }, _ => { self.index += 1; } }, None => { self.slots.remove_thread(); ret = None; break; }, } } ret } }
30.413043
84
0.411008
0900941161f30988b77e3fbf49b67d92d30339cb
1,368
//! A global executor built on top of async-executor and async_io //! //! The global executor is lazily spawned on first use. It spawns as many threads //! as the number of cpus by default. You can override this using the //! `ASYNC_GLOBAL_EXECUTOR_THREADS` environment variable. //! //! # Examples //! //! ``` //! # use futures_lite::future; //! //! // spawn a task on the multi-threaded executor //! let task1 = async_global_executor::spawn(async { //! 1 + 2 //! }); //! // spawn a task on the local executor (same thread) //! let task2 = async_global_executor::spawn_local(async { //! 3 + 4 //! }); //! let task = future::zip(task1, task2); //! //! // run the executor //! async_global_executor::block_on(async { //! assert_eq!(task.await, (3, 7)); //! }); //! ``` #![forbid(unsafe_code)] #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] #[cfg(doctest)] doc_comment::doctest!("../README.md"); pub use async_executor::Task; pub use config::GlobalExecutorConfig; pub use executor::{block_on, spawn, spawn_blocking, spawn_local}; pub use init::{init, init_with_config}; pub use threading::{spawn_more_threads, stop_current_thread, stop_thread}; mod config; mod executor; mod init; mod reactor; mod threading; #[cfg(feature = "tokio")] mod tokio; #[cfg(feature = "tokio02")] mod tokio02; #[cfg(feature = "tokio03")] mod tokio03;
26.307692
81
0.687135
7a8160029fb5588b54f991d04ec651ed949a72a7
26,343
//! Implementation of various bits and pieces of the `panic!` macro and //! associated runtime pieces. //! //! Specifically, this module contains the implementation of: //! //! * Panic hooks //! * Executing a panic up to doing the actual implementation //! * Shims around "try" #![deny(unsafe_op_in_unsafe_fn)] use core::panic::{BoxMeUp, Location, PanicInfo}; use crate::any::Any; use crate::fmt; use crate::intrinsics; use crate::mem::{self, ManuallyDrop}; use crate::process; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sys::stdio::panic_output; use crate::sys_common::backtrace::{self, RustBacktrace}; use crate::sys_common::rwlock::StaticRWLock; use crate::sys_common::thread_info; use crate::thread; #[cfg(not(test))] use crate::io::set_output_capture; // make sure to use the stderr output configured // by libtest in the real copy of std #[cfg(test)] use realstd::io::set_output_capture; // Binary interface to the panic runtime that the standard library depends on. // // The standard library is tagged with `#![needs_panic_runtime]` (introduced in // RFC 1513) to indicate that it requires some other crate tagged with // `#![panic_runtime]` to exist somewhere. Each panic runtime is intended to // implement these symbols (with the same signatures) so we can get matched up // to them. // // One day this may look a little less ad-hoc with the compiler helping out to // hook up these functions, but it is not this day! #[allow(improper_ctypes)] extern "C" { fn __rust_panic_cleanup(payload: *mut u8) -> *mut (dyn Any + Send + 'static); } #[allow(improper_ctypes)] extern "C-unwind" { /// `payload` is passed through another layer of raw pointers as `&mut dyn Trait` is not /// FFI-safe. `BoxMeUp` lazily performs allocation only when needed (this avoids allocations /// when using the "abort" panic runtime). fn __rust_start_panic(payload: *mut &mut dyn BoxMeUp) -> u32; } /// This function is called by the panic runtime if FFI code catches a Rust /// panic but doesn't rethrow it. We don't support this case since it messes /// with our panic count. #[cfg(not(test))] #[rustc_std_internal_symbol] extern "C" fn __rust_drop_panic() -> ! { rtabort!("Rust panics must be rethrown"); } /// This function is called by the panic runtime if it catches an exception /// object which does not correspond to a Rust panic. #[cfg(not(test))] #[rustc_std_internal_symbol] extern "C" fn __rust_foreign_exception() -> ! { rtabort!("Rust cannot catch foreign exceptions"); } #[derive(Copy, Clone)] enum Hook { Default, Custom(*mut (dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send)), } static HOOK_LOCK: StaticRWLock = StaticRWLock::new(); static mut HOOK: Hook = Hook::Default; /// Registers a custom panic hook, replacing any that was previously registered. /// /// The panic hook is invoked when a thread panics, but before the panic runtime /// is invoked. As such, the hook will run with both the aborting and unwinding /// runtimes. The default hook prints a message to standard error and generates /// a backtrace if requested, but this behavior can be customized with the /// `set_hook` and [`take_hook`] functions. /// /// [`take_hook`]: ./fn.take_hook.html /// /// The hook is provided with a `PanicInfo` struct which contains information /// about the origin of the panic, including the payload passed to `panic!` and /// the source code location from which the panic originated. /// /// The panic hook is a global resource. /// /// # Panics /// /// Panics if called from a panicking thread. /// /// # Examples /// /// The following will print "Custom panic hook": /// /// ```should_panic /// use std::panic; /// /// panic::set_hook(Box::new(|_| { /// println!("Custom panic hook"); /// })); /// /// panic!("Normal panic"); /// ``` #[stable(feature = "panic_hooks", since = "1.10.0")] pub fn set_hook(hook: Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send>) { if thread::panicking() { panic!("cannot modify the panic hook from a panicking thread"); } unsafe { let guard = HOOK_LOCK.write(); let old_hook = HOOK; HOOK = Hook::Custom(Box::into_raw(hook)); drop(guard); if let Hook::Custom(ptr) = old_hook { #[allow(unused_must_use)] { Box::from_raw(ptr); } } } } /// Unregisters the current panic hook, returning it. /// /// *See also the function [`set_hook`].* /// /// [`set_hook`]: ./fn.set_hook.html /// /// If no custom hook is registered, the default hook will be returned. /// /// # Panics /// /// Panics if called from a panicking thread. /// /// # Examples /// /// The following will print "Normal panic": /// /// ```should_panic /// use std::panic; /// /// panic::set_hook(Box::new(|_| { /// println!("Custom panic hook"); /// })); /// /// let _ = panic::take_hook(); /// /// panic!("Normal panic"); /// ``` #[stable(feature = "panic_hooks", since = "1.10.0")] pub fn take_hook() -> Box<dyn Fn(&PanicInfo<'_>) + 'static + Sync + Send> { if thread::panicking() { panic!("cannot modify the panic hook from a panicking thread"); } unsafe { let guard = HOOK_LOCK.write(); let hook = HOOK; HOOK = Hook::Default; drop(guard); match hook { Hook::Default => Box::new(default_hook), Hook::Custom(ptr) => Box::from_raw(ptr), } } } #[cfg(target = "aarch64-skyline-switch")] fn show_error(code: u32, message: &core::primitive::str, details: &core::primitive::str) { use nnsdk::{err, settings}; let mut message_bytes = String::from(message).into_bytes(); let mut details_bytes = String::from(details).into_bytes(); if message_bytes.len() >= 2048 { message_bytes.truncate(2044); message_bytes.append(&mut String::from("...\0").into_bytes()); } if details_bytes.len() >= 2048 { details_bytes.truncate(2044); details_bytes.append(&mut String::from("...\0").into_bytes()); } let message = match core::str::from_utf8(&message_bytes) { Ok(s) => s, Err(e) => "Unable to parse error message.\0" }; let details = match core::str::from_utf8(&details_bytes) { Ok(s) => s, Err(e) => "Unable to parse error details.\0" }; unsafe { let error = err::ApplicationErrorArg::new_with_messages( code, message.as_bytes().as_ptr(), details.as_bytes().as_ptr(), &settings::LanguageCode_Make(settings::Language_Language_English), ); err::ShowApplicationError(&error); }; } fn default_hook(info: &PanicInfo<'_>) { // If this is a double panic, make sure that we print a backtrace // for this panic. Otherwise only print it if logging is enabled. let backtrace_env = if panic_count::get_count() >= 2 { RustBacktrace::Print(crate::backtrace_rs::PrintFmt::Full) } else { backtrace::rust_backtrace_env() }; // The current implementation always returns `Some`. let location = info.location().unwrap(); let msg = match info.payload().downcast_ref::<&'static str>() { Some(s) => *s, None => match info.payload().downcast_ref::<String>() { Some(s) => &s[..], None => "Box<dyn Any>", }, }; let thread = thread_info::current_thread(); let name = thread.as_ref().and_then(|t| t.name()).unwrap_or("<unnamed>"); let write = |err: &mut dyn crate::io::Write| { let err_msg = format!("Thread '{}' panicked at '{}', {}", name, msg, location); let _ = writeln!(err, "{}", err_msg.as_str()); #[cfg(target = "aarch64-skyline-switch")] show_error( 69, "Skyline plugin has panicked! Please open the details then send a screenshot of the message to the developer, then close the game.\0", err_msg.as_str(), ); static FIRST_PANIC: AtomicBool = AtomicBool::new(true); match backtrace_env { RustBacktrace::Print(format) => drop(backtrace::print(err, format)), RustBacktrace::Disabled => {} RustBacktrace::RuntimeDisabled => { if FIRST_PANIC.swap(false, Ordering::SeqCst) { let _ = writeln!( err, "note: run with `RUST_BACKTRACE=1` \ environment variable to display a backtrace" ); } } } }; if let Some(local) = set_output_capture(None) { write(&mut *local.lock().unwrap_or_else(|e| e.into_inner())); set_output_capture(Some(local)); } else if let Some(mut out) = panic_output() { write(&mut out); } } #[cfg(not(test))] #[doc(hidden)] #[unstable(feature = "update_panic_count", issue = "none")] pub mod panic_count { use crate::cell::Cell; use crate::sync::atomic::{AtomicUsize, Ordering}; pub const ALWAYS_ABORT_FLAG: usize = 1 << (usize::BITS - 1); // Panic count for the current thread. thread_local! { static LOCAL_PANIC_COUNT: Cell<usize> = Cell::new(0) } // Sum of panic counts from all threads. The purpose of this is to have // a fast path in `is_zero` (which is used by `panicking`). In any particular // thread, if that thread currently views `GLOBAL_PANIC_COUNT` as being zero, // then `LOCAL_PANIC_COUNT` in that thread is zero. This invariant holds before // and after increase and decrease, but not necessarily during their execution. // // Additionally, the top bit of GLOBAL_PANIC_COUNT (GLOBAL_ALWAYS_ABORT_FLAG) // records whether panic::always_abort() has been called. This can only be // set, never cleared. // // This could be viewed as a struct containing a single bit and an n-1-bit // value, but if we wrote it like that it would be more than a single word, // and even a newtype around usize would be clumsy because we need atomics. // But we use such a tuple for the return type of increase(). // // Stealing a bit is fine because it just amounts to assuming that each // panicking thread consumes at least 2 bytes of address space. static GLOBAL_PANIC_COUNT: AtomicUsize = AtomicUsize::new(0); pub fn increase() -> (bool, usize) { ( GLOBAL_PANIC_COUNT.fetch_add(1, Ordering::Relaxed) & ALWAYS_ABORT_FLAG != 0, LOCAL_PANIC_COUNT.with(|c| { let next = c.get() + 1; c.set(next); next }), ) } pub fn decrease() { GLOBAL_PANIC_COUNT.fetch_sub(1, Ordering::Relaxed); LOCAL_PANIC_COUNT.with(|c| { let next = c.get() - 1; c.set(next); next }); } pub fn set_always_abort() { GLOBAL_PANIC_COUNT.fetch_or(ALWAYS_ABORT_FLAG, Ordering::Relaxed); } // Disregards ALWAYS_ABORT_FLAG pub fn get_count() -> usize { LOCAL_PANIC_COUNT.with(|c| c.get()) } // Disregards ALWAYS_ABORT_FLAG #[inline] pub fn count_is_zero() -> bool { if GLOBAL_PANIC_COUNT.load(Ordering::Relaxed) & !ALWAYS_ABORT_FLAG == 0 { // Fast path: if `GLOBAL_PANIC_COUNT` is zero, all threads // (including the current one) will have `LOCAL_PANIC_COUNT` // equal to zero, so TLS access can be avoided. // // In terms of performance, a relaxed atomic load is similar to a normal // aligned memory read (e.g., a mov instruction in x86), but with some // compiler optimization restrictions. On the other hand, a TLS access // might require calling a non-inlinable function (such as `__tls_get_addr` // when using the GD TLS model). true } else { is_zero_slow_path() } } // Slow path is in a separate function to reduce the amount of code // inlined from `is_zero`. #[inline(never)] #[cold] fn is_zero_slow_path() -> bool { LOCAL_PANIC_COUNT.with(|c| c.get() == 0) } } #[cfg(test)] pub use realstd::rt::panic_count; /// Invoke a closure, capturing the cause of an unwinding panic if one occurs. pub unsafe fn r#try<R, F: FnOnce() -> R>(f: F) -> Result<R, Box<dyn Any + Send>> { union Data<F, R> { f: ManuallyDrop<F>, r: ManuallyDrop<R>, p: ManuallyDrop<Box<dyn Any + Send>>, } // We do some sketchy operations with ownership here for the sake of // performance. We can only pass pointers down to `do_call` (can't pass // objects by value), so we do all the ownership tracking here manually // using a union. // // We go through a transition where: // // * First, we set the data field `f` to be the argumentless closure that we're going to call. // * When we make the function call, the `do_call` function below, we take // ownership of the function pointer. At this point the `data` union is // entirely uninitialized. // * If the closure successfully returns, we write the return value into the // data's return slot (field `r`). // * If the closure panics (`do_catch` below), we write the panic payload into field `p`. // * Finally, when we come back out of the `try` intrinsic we're // in one of two states: // // 1. The closure didn't panic, in which case the return value was // filled in. We move it out of `data.r` and return it. // 2. The closure panicked, in which case the panic payload was // filled in. We move it out of `data.p` and return it. // // Once we stack all that together we should have the "most efficient' // method of calling a catch panic whilst juggling ownership. let mut data = Data { f: ManuallyDrop::new(f) }; let data_ptr = &mut data as *mut _ as *mut u8; // SAFETY: // // Access to the union's fields: this is `std` and we know that the `r#try` // intrinsic fills in the `r` or `p` union field based on its return value. // // The call to `intrinsics::r#try` is made safe by: // - `do_call`, the first argument, can be called with the initial `data_ptr`. // - `do_catch`, the second argument, can be called with the `data_ptr` as well. // See their safety preconditions for more informations unsafe { return if intrinsics::r#try(do_call::<F, R>, data_ptr, do_catch::<F, R>) == 0 { Ok(ManuallyDrop::into_inner(data.r)) } else { Err(ManuallyDrop::into_inner(data.p)) }; } // We consider unwinding to be rare, so mark this function as cold. However, // do not mark it no-inline -- that decision is best to leave to the // optimizer (in most cases this function is not inlined even as a normal, // non-cold function, though, as of the writing of this comment). #[cold] unsafe fn cleanup(payload: *mut u8) -> Box<dyn Any + Send + 'static> { // SAFETY: The whole unsafe block hinges on a correct implementation of // the panic handler `__rust_panic_cleanup`. As such we can only // assume it returns the correct thing for `Box::from_raw` to work // without undefined behavior. let obj = unsafe { Box::from_raw(__rust_panic_cleanup(payload)) }; panic_count::decrease(); obj } // SAFETY: // data must be non-NUL, correctly aligned, and a pointer to a `Data<F, R>` // Its must contains a valid `f` (type: F) value that can be use to fill // `data.r`. // // This function cannot be marked as `unsafe` because `intrinsics::r#try` // expects normal function pointers. #[inline] fn do_call<F: FnOnce() -> R, R>(data: *mut u8) { // SAFETY: this is the responsibilty of the caller, see above. unsafe { let data = data as *mut Data<F, R>; let data = &mut (*data); let f = ManuallyDrop::take(&mut data.f); data.r = ManuallyDrop::new(f()); } } // We *do* want this part of the catch to be inlined: this allows the // compiler to properly track accesses to the Data union and optimize it // away most of the time. // // SAFETY: // data must be non-NUL, correctly aligned, and a pointer to a `Data<F, R>` // Since this uses `cleanup` it also hinges on a correct implementation of // `__rustc_panic_cleanup`. // // This function cannot be marked as `unsafe` because `intrinsics::r#try` // expects normal function pointers. #[inline] fn do_catch<F: FnOnce() -> R, R>(data: *mut u8, payload: *mut u8) { // SAFETY: this is the responsibilty of the caller, see above. // // When `__rustc_panic_cleaner` is correctly implemented we can rely // on `obj` being the correct thing to pass to `data.p` (after wrapping // in `ManuallyDrop`). unsafe { let data = data as *mut Data<F, R>; let data = &mut (*data); let obj = cleanup(payload); data.p = ManuallyDrop::new(obj); } } } /// Determines whether the current thread is unwinding because of panic. #[inline] pub fn panicking() -> bool { !panic_count::count_is_zero() } /// The entry point for panicking with a formatted message. /// /// This is designed to reduce the amount of code required at the call /// site as much as possible (so that `panic!()` has as low an impact /// on (e.g.) the inlining of other functions as possible), by moving /// the actual formatting into this shared place. #[unstable(feature = "libstd_sys_internals", reason = "used by the panic! macro", issue = "none")] #[cold] // If panic_immediate_abort, inline the abort call, // otherwise avoid inlining because of it is cold path. #[cfg_attr(not(feature = "panic_immediate_abort"), track_caller)] #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] #[cfg_attr(feature = "panic_immediate_abort", inline)] #[cfg_attr(all(not(bootstrap), not(test)), lang = "begin_panic_fmt")] pub fn begin_panic_fmt(msg: &fmt::Arguments<'_>) -> ! { if cfg!(feature = "panic_immediate_abort") { intrinsics::abort() } let info = PanicInfo::internal_constructor(Some(msg), Location::caller()); begin_panic_handler(&info) } /// Entry point of panics from the libcore crate (`panic_impl` lang item). #[cfg_attr(not(test), panic_handler)] pub fn begin_panic_handler(info: &PanicInfo<'_>) -> ! { struct PanicPayload<'a> { inner: &'a fmt::Arguments<'a>, string: Option<String>, } impl<'a> PanicPayload<'a> { fn new(inner: &'a fmt::Arguments<'a>) -> PanicPayload<'a> { PanicPayload { inner, string: None } } fn fill(&mut self) -> &mut String { use crate::fmt::Write; let inner = self.inner; // Lazily, the first time this gets called, run the actual string formatting. self.string.get_or_insert_with(|| { let mut s = String::new(); drop(s.write_fmt(*inner)); s }) } } unsafe impl<'a> BoxMeUp for PanicPayload<'a> { fn take_box(&mut self) -> *mut (dyn Any + Send) { // We do two allocations here, unfortunately. But (a) they're required with the current // scheme, and (b) we don't handle panic + OOM properly anyway (see comment in // begin_panic below). let contents = mem::take(self.fill()); Box::into_raw(Box::new(contents)) } fn get(&mut self) -> &(dyn Any + Send) { self.fill() } } struct StrPanicPayload(&'static str); unsafe impl BoxMeUp for StrPanicPayload { fn take_box(&mut self) -> *mut (dyn Any + Send) { Box::into_raw(Box::new(self.0)) } fn get(&mut self) -> &(dyn Any + Send) { &self.0 } } let loc = info.location().unwrap(); // The current implementation always returns Some let msg = info.message().unwrap(); // The current implementation always returns Some crate::sys_common::backtrace::__rust_end_short_backtrace(move || { if let Some(msg) = msg.as_str() { rust_panic_with_hook(&mut StrPanicPayload(msg), info.message(), loc); } else { rust_panic_with_hook(&mut PanicPayload::new(msg), info.message(), loc); } }) } /// This is the entry point of panicking for the non-format-string variants of /// panic!() and assert!(). In particular, this is the only entry point that supports /// arbitrary payloads, not just format strings. #[unstable(feature = "libstd_sys_internals", reason = "used by the panic! macro", issue = "none")] #[cfg_attr(not(test), lang = "begin_panic")] // lang item for CTFE panic support // never inline unless panic_immediate_abort to avoid code // bloat at the call sites as much as possible #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))] #[cold] #[track_caller] pub fn begin_panic<M: Any + Send>(msg: M) -> ! { if cfg!(feature = "panic_immediate_abort") { intrinsics::abort() } let loc = Location::caller(); return crate::sys_common::backtrace::__rust_end_short_backtrace(move || { rust_panic_with_hook(&mut PanicPayload::new(msg), None, loc) }); struct PanicPayload<A> { inner: Option<A>, } impl<A: Send + 'static> PanicPayload<A> { fn new(inner: A) -> PanicPayload<A> { PanicPayload { inner: Some(inner) } } } unsafe impl<A: Send + 'static> BoxMeUp for PanicPayload<A> { fn take_box(&mut self) -> *mut (dyn Any + Send) { // Note that this should be the only allocation performed in this code path. Currently // this means that panic!() on OOM will invoke this code path, but then again we're not // really ready for panic on OOM anyway. If we do start doing this, then we should // propagate this allocation to be performed in the parent of this thread instead of the // thread that's panicking. let data = match self.inner.take() { Some(a) => Box::new(a) as Box<dyn Any + Send>, None => process::abort(), }; Box::into_raw(data) } fn get(&mut self) -> &(dyn Any + Send) { match self.inner { Some(ref a) => a, None => process::abort(), } } } } /// Central point for dispatching panics. /// /// Executes the primary logic for a panic, including checking for recursive /// panics, panic hooks, and finally dispatching to the panic runtime to either /// abort or unwind. fn rust_panic_with_hook( payload: &mut dyn BoxMeUp, message: Option<&fmt::Arguments<'_>>, location: &Location<'_>, ) -> ! { let (must_abort, panics) = panic_count::increase(); // If this is the third nested call (e.g., panics == 2, this is 0-indexed), // the panic hook probably triggered the last panic, otherwise the // double-panic check would have aborted the process. In this case abort the // process real quickly as we don't want to try calling it again as it'll // probably just panic again. if must_abort || panics > 2 { if panics > 2 { // Don't try to print the message in this case // - perhaps that is causing the recursive panics. rtprintpanic!("thread panicked while processing panic. aborting.\n"); } else { // Unfortunately, this does not print a backtrace, because creating // a `Backtrace` will allocate, which we must to avoid here. let panicinfo = PanicInfo::internal_constructor(message, location); rtprintpanic!("{}\npanicked after panic::always_abort(), aborting.\n", panicinfo); } intrinsics::abort() } unsafe { let mut info = PanicInfo::internal_constructor(message, location); let _guard = HOOK_LOCK.read(); match HOOK { // Some platforms (like wasm) know that printing to stderr won't ever actually // print anything, and if that's the case we can skip the default // hook. Since string formatting happens lazily when calling `payload` // methods, this means we avoid formatting the string at all! // (The panic runtime might still call `payload.take_box()` though and trigger // formatting.) Hook::Default if panic_output().is_none() => {} Hook::Default => { info.set_payload(payload.get()); default_hook(&info); } Hook::Custom(ptr) => { info.set_payload(payload.get()); (*ptr)(&info); } }; } if panics > 1 { // If a thread panics while it's already unwinding then we // have limited options. Currently our preference is to // just abort. In the future we may consider resuming // unwinding or otherwise exiting the thread cleanly. rtprintpanic!("thread panicked while panicking. aborting.\n"); intrinsics::abort() } rust_panic(payload) } /// This is the entry point for `resume_unwind`. /// It just forwards the payload to the panic runtime. pub fn rust_panic_without_hook(payload: Box<dyn Any + Send>) -> ! { panic_count::increase(); struct RewrapBox(Box<dyn Any + Send>); unsafe impl BoxMeUp for RewrapBox { fn take_box(&mut self) -> *mut (dyn Any + Send) { Box::into_raw(mem::replace(&mut self.0, Box::new(()))) } fn get(&mut self) -> &(dyn Any + Send) { &*self.0 } } rust_panic(&mut RewrapBox(payload)) } /// An unmangled function (through `rustc_std_internal_symbol`) on which to slap /// yer breakpoints. #[inline(never)] #[cfg_attr(not(test), rustc_std_internal_symbol)] fn rust_panic(mut msg: &mut dyn BoxMeUp) -> ! { let code = unsafe { let obj = &mut msg as *mut &mut dyn BoxMeUp; __rust_start_panic(obj) }; rtabort!("failed to initiate panic, error {}", code) }
36.48615
146
0.615609
89939d235934457a22a2487622341239f4561da6
20,943
use ic_interfaces::registry::{ RegistryClient, RegistryClientResult, RegistryClientVersionedResult, RegistryVersionedRecord, }; use ic_protobuf::registry::{ node::v1::NodeRecord, replica_version::v1::ReplicaVersionRecord, subnet::v1::{ CatchUpPackageContents, EcdsaConfig, GossipConfig, SubnetListRecord, SubnetRecord, }, }; use ic_protobuf::types::v1::SubnetId as SubnetIdProto; use ic_registry_common::values::deserialize_registry_value; use ic_registry_keys::{ make_catch_up_package_contents_key, make_node_record_key, make_replica_version_key, make_subnet_list_record_key, make_subnet_record_key, ROOT_SUBNET_ID_KEY, }; use ic_registry_subnet_features::SubnetFeatures; use ic_types::{Height, NodeId, PrincipalId, RegistryVersion, ReplicaVersion, SubnetId}; use std::convert::TryFrom; use std::time::Duration; #[derive(Clone, Debug, PartialEq)] pub struct NotarizationDelaySettings { pub unit_delay: Duration, pub initial_notary_delay: Duration, } pub struct IngressMessageSettings { /// Maximum number of bytes per message. This is a hard cap, which means /// ingress messages greater than the limit will be dropped. pub max_ingress_bytes_per_message: usize, /// Maximum number of messages per block. This is a hard cap, which means /// blocks will never have more than this number of messages. pub max_ingress_messages_per_block: usize, } /// A helper trait that wraps a RegistryClient and provides utility methods for /// querying subnet information. pub trait SubnetRegistry { fn get_subnet_record( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<SubnetRecord>; fn get_root_subnet_id(&self, version: RegistryVersion) -> RegistryClientResult<SubnetId>; fn get_node_ids_on_subnet( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Vec<NodeId>>; fn get_subnet_size( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<usize>; /// Returns ingress message settings. fn get_ingress_message_settings( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<IngressMessageSettings>; /// Returns gossip config fn get_gossip_config( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Option<GossipConfig>>; /// Returns SubnetFeatures fn get_features( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<SubnetFeatures>; /// Returns ecdsa config fn get_ecdsa_config( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Option<EcdsaConfig>>; /// Returns notarization delay settings: /// - the unit delay for blockmaker; /// - the initial delay for notary, to give time to rank-0 block /// propagation. fn get_notarization_delay_settings( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<NotarizationDelaySettings>; /// Returns the upper bound for the number of dealings we allow in a block. fn get_dkg_dealings_per_block( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<usize>; /// Returns the length of all DKG intervals for the given subnet. The /// interval length is the number of rounds, following the summary /// block, where dealers exchange their dealings. fn get_dkg_interval_length( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Height>; /// Returns whether the subnet record instructs the subnet to halt fn get_is_halted( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<bool>; /// Return the ReplicaVersion as recorded in the subnet record /// at the given height. fn get_replica_version( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<ReplicaVersion>; /// Return the ReplicaVersionRecord as recorded in the subnet record /// at the given height. fn get_replica_version_record( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<ReplicaVersionRecord>; fn get_replica_version_record_from_version_id( &self, replica_version_id: &ReplicaVersion, version: RegistryVersion, ) -> RegistryClientResult<ReplicaVersionRecord>; /// Return the RegistryVersion at which the SubnetRecord for the provided /// SubnetId was last updated. fn get_subnet_record_registry_version( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<RegistryVersion>; fn get_listed_subnet_for_node_id( &self, node_id: NodeId, version: RegistryVersion, ) -> RegistryClientResult<(SubnetId, SubnetRecord)>; fn get_all_listed_subnet_records( &self, version: RegistryVersion, ) -> RegistryClientResult<Vec<(SubnetId, SubnetRecord)>>; /// Get the necessary material to construct a genesis/recovery CUP for the /// given subnet fn get_cup_contents( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientVersionedResult<CatchUpPackageContents>; } impl<T: RegistryClient + ?Sized> SubnetRegistry for T { fn get_subnet_record( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<SubnetRecord> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); deserialize_registry_value::<SubnetRecord>(bytes) } /// Return the root subnet id if it is available and can be parsed fn get_root_subnet_id(&self, version: RegistryVersion) -> RegistryClientResult<SubnetId> { let bytes = self.get_value(ROOT_SUBNET_ID_KEY, version); Ok(deserialize_registry_value::<SubnetIdProto>(bytes)? .and_then(|subnet_id_proto| subnet_id_proto.principal_id) .map(|pr_id| PrincipalId::try_from(pr_id.raw).expect("Could not parse principal id!")) .map(SubnetId::from)) } fn get_node_ids_on_subnet( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Vec<NodeId>> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok(deserialize_registry_value::<SubnetRecord>(bytes)? .map(|subnet| get_node_ids_from_subnet_record(&subnet))) } fn get_subnet_size( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<usize> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok( deserialize_registry_value::<SubnetRecord>(bytes)? .map(|subnet| subnet.membership.len()), ) } fn get_ingress_message_settings( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<IngressMessageSettings> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok( deserialize_registry_value::<SubnetRecord>(bytes)?.map(|subnet| { IngressMessageSettings { max_ingress_bytes_per_message: subnet.max_ingress_bytes_per_message as usize, max_ingress_messages_per_block: subnet.max_ingress_messages_per_block as usize, } }), ) } fn get_gossip_config( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Option<GossipConfig>> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); let subnet = deserialize_registry_value::<SubnetRecord>(bytes)?; Ok(subnet.map(|subnet| subnet.gossip_config)) } fn get_features( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<SubnetFeatures> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); let subnet = deserialize_registry_value::<SubnetRecord>(bytes)?; Ok(subnet .map(|subnet| subnet.features) .flatten() .map(SubnetFeatures::from)) } fn get_ecdsa_config( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Option<EcdsaConfig>> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); let subnet = deserialize_registry_value::<SubnetRecord>(bytes)?; Ok(subnet.map(|subnet| subnet.ecdsa_config)) } fn get_notarization_delay_settings( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<NotarizationDelaySettings> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok( deserialize_registry_value::<SubnetRecord>(bytes)?.map(|subnet| { NotarizationDelaySettings { unit_delay: Duration::from_millis(subnet.unit_delay_millis), initial_notary_delay: Duration::from_millis(subnet.initial_notary_delay_millis), } }), ) } fn get_dkg_dealings_per_block( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<usize> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok(deserialize_registry_value::<SubnetRecord>(bytes)? .map(|subnet| subnet.dkg_dealings_per_block as usize)) } fn get_dkg_interval_length( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Height> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok(deserialize_registry_value::<SubnetRecord>(bytes)? .map(|subnet| Height::from(subnet.dkg_interval_length))) } fn get_is_halted( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<bool> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok(deserialize_registry_value::<SubnetRecord>(bytes)?.map(|subnet| subnet.is_halted)) } fn get_replica_version( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<ReplicaVersion> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok(deserialize_registry_value::<SubnetRecord>(bytes)? .and_then(|record| ReplicaVersion::try_from(record.replica_version_id.as_ref()).ok())) } fn get_replica_version_record( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<ReplicaVersionRecord> { let bytes = self.get_value(&make_subnet_record_key(subnet_id), version); Ok(match deserialize_registry_value::<SubnetRecord>(bytes)? { Some(record) => { let bytes = self.get_value( &make_replica_version_key(record.replica_version_id), version, ); deserialize_registry_value::<ReplicaVersionRecord>(bytes)? } None => None, }) } fn get_replica_version_record_from_version_id( &self, replica_version_id: &ReplicaVersion, version: RegistryVersion, ) -> RegistryClientResult<ReplicaVersionRecord> { let bytes = self.get_value(&make_replica_version_key(replica_version_id), version); deserialize_registry_value::<ReplicaVersionRecord>(bytes) } fn get_subnet_record_registry_version( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<RegistryVersion> { let record = self.get_versioned_value(&make_subnet_record_key(subnet_id), version)?; let result = if record.value.is_some() { Some(record.version) } else { None }; Ok(result) } /// Given a Node ID, this method returns a pair (subnet_id, subnet_record) /// iff there is a subnet that contains the node_id and subnet_id is /// contained in the subnet list. fn get_listed_subnet_for_node_id( &self, node_id: NodeId, version: RegistryVersion, ) -> RegistryClientResult<(SubnetId, SubnetRecord)> { Ok(self .get_all_listed_subnet_records(version)? .and_then(|records| { records.into_iter().find(|(_subnet_id, record)| { get_node_ids_from_subnet_record(record).contains(&node_id) }) })) } /// Returns a list of pairs (subnet_id, subnet_record). The subnet_id and /// the corresponding record are contained in the list iff the subnet_id is /// contained in the subnet list and the corresponding subnet record exists. fn get_all_listed_subnet_records( &self, version: RegistryVersion, ) -> RegistryClientResult<Vec<(SubnetId, SubnetRecord)>> { let mut records = vec![]; if let Some(ids) = self.get_subnet_ids(version)? { for id in ids { if let Some(r) = self.get_subnet_record(id, version)? { records.push((id, r)); } } return Ok(Some(records)); } Ok(None) } fn get_cup_contents( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientVersionedResult<CatchUpPackageContents> { let record = self.get_versioned_value(&make_catch_up_package_contents_key(subnet_id), version)?; let bytes = Ok(record.value); let value = deserialize_registry_value::<CatchUpPackageContents>(bytes)?; Ok(RegistryVersionedRecord { key: record.key, version: record.version, value, }) } } pub fn get_node_ids_from_subnet_record(subnet: &SubnetRecord) -> Vec<NodeId> { subnet .membership .iter() .map(|n| NodeId::from(PrincipalId::try_from(&n[..]).unwrap())) .collect() } /// A helper trait to access the subnet list; the list of subnets that are part /// of the current topology of the IC. pub trait SubnetListRegistry { fn get_subnet_ids(&self, version: RegistryVersion) -> RegistryClientResult<Vec<SubnetId>>; } impl<T: RegistryClient + ?Sized> SubnetListRegistry for T { fn get_subnet_ids(&self, version: RegistryVersion) -> RegistryClientResult<Vec<SubnetId>> { let bytes = self.get_value(make_subnet_list_record_key().as_str(), version); Ok( deserialize_registry_value::<SubnetListRecord>(bytes)?.map(|subnet| { subnet .subnets .iter() .map(|s| SubnetId::from(PrincipalId::try_from(s.clone().as_slice()).unwrap())) .collect() }), ) } } /// Helper methods primarily used in `transport`/`p2p` where both, where /// transport information for an entire subnetwork are often needed. pub trait SubnetTransportRegistry { /// Return a list of pairs containing the node id and corresponding node /// record for each node on subnetwork with `subnet_id`. /// /// As the transport information is stored individually for each node, this /// method performs `n+1` requests, where `n` is the number of nodes on the /// network. Potential inconsistencies are resolved as follows: /// /// * `Ok(None)` is return if the a request for the subnet member list /// returns `Ok(None)`, or if any of the requests for transport /// information return `Ok(None)`. /// * `Err(_)` if the request for subnet membership fails. /// * The method panics in all other cases. /// /// # Panics /// /// If the membership list for a subnet can be retrieved, but one of the /// requests for a node contained in the membership list fails, the method /// panics. fn get_subnet_transport_infos( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Vec<(NodeId, NodeRecord)>>; } impl<T: RegistryClient + ?Sized> SubnetTransportRegistry for T { fn get_subnet_transport_infos( &self, subnet_id: SubnetId, version: RegistryVersion, ) -> RegistryClientResult<Vec<(NodeId, NodeRecord)>> { let membership_bytes = self.get_value(&make_subnet_record_key(subnet_id), version); let node_ids: Vec<_> = match deserialize_registry_value::<SubnetRecord>(membership_bytes)? .map(|subnet| { subnet .membership .iter() .map(|n| NodeId::from(PrincipalId::try_from(&n[..]).unwrap())) .collect() }) { Some(val) => val, None => return Ok(None), }; let mut res = Vec::new(); for node_id in node_ids { let node_bytes = self.get_value(&make_node_record_key(node_id), version); let node_record = deserialize_registry_value::<NodeRecord>(node_bytes); match node_record { Ok(Some(node_record)) => res.push((node_id, node_record)), Ok(None) => return Ok(None), _ => panic!(), } } Ok(Some(res)) } } #[cfg(test)] mod tests { use super::*; use crate::client::RegistryClientImpl; use ic_registry_common::proto_registry_data_provider::ProtoRegistryDataProvider; use ic_types::PrincipalId; use std::sync::Arc; fn node_id(id: u64) -> NodeId { NodeId::from(PrincipalId::new_node_test_id(id)) } fn subnet_id(id: u64) -> SubnetId { SubnetId::from(PrincipalId::new_subnet_test_id(id)) } #[tokio::test] async fn can_get_node_ids_from_subnet() { let subnet_id = subnet_id(4); let version = RegistryVersion::from(2); let data_provider = Arc::new(ProtoRegistryDataProvider::new()); let subnet_record = SubnetRecord { membership: vec![ node_id(32u64).get().into_vec(), node_id(33u64).get().into_vec(), ], ..Default::default() }; data_provider .add( &make_subnet_record_key(subnet_id), version, Some(subnet_record), ) .unwrap(); let registry = Arc::new(RegistryClientImpl::new(data_provider, None)); // The trait can also "wrap" an arc of registry client. registry.fetch_and_start_polling().unwrap(); let registry: Arc<dyn RegistryClient> = registry; let node_ids = registry.get_node_ids_on_subnet(subnet_id, version).unwrap(); assert_eq!(node_ids, Some(vec![node_id(32), node_id(33)])); } #[tokio::test] async fn can_get_replica_version_from_subnet() { let subnet_id = subnet_id(4); let version = RegistryVersion::from(2); let data_provider = Arc::new(ProtoRegistryDataProvider::new()); let mut subnet_record = SubnetRecord::default(); let replica_version = ReplicaVersion::try_from("some_version").unwrap(); let replica_version_record = ReplicaVersionRecord::default(); subnet_record.replica_version_id = String::from(&replica_version); data_provider .add( &make_subnet_record_key(subnet_id), version, Some(subnet_record), ) .unwrap(); data_provider .add( &make_replica_version_key(String::from(&replica_version)), version, Some(replica_version_record.clone()), ) .unwrap(); let registry = Arc::new(RegistryClientImpl::new(data_provider, None)); // The trait can also "wrap" an arc of registry client. registry.fetch_and_start_polling().unwrap(); let registry: Arc<dyn RegistryClient> = registry; let result = registry.get_replica_version(subnet_id, version).unwrap(); assert_eq!(result, Some(replica_version)); let result = registry .get_replica_version_record(subnet_id, version) .unwrap(); assert_eq!(result, Some(replica_version_record)) } }
35.738908
100
0.63797
f9abe3702f847783d39d40f9adc2d9688c51c4a6
3,362
use std::{ fs, path::Path, time::{Duration, Instant}, }; use bevy::{ecs::prelude::*, prelude::DespawnRecursiveExt}; use crate::{ ai::BEST_RESULTS_SAVED_COUNT, shared::{ config::{Config, ModeEnum}, gamedata::GameData, }, }; use super::{save_summary, Fish, FishAlive}; pub fn send_fish_death_by_energy(config: Res<Config>, mut query: Query<(Entity, &mut Fish)>) { for (_, mut fish) in query.iter_mut() { if fish.alive && fish.energy <= 0.0 && config.fish.energy != -1f32 { fish.die(); } } } pub fn poll_dead_fish(mut gd: ResMut<GameData>, config: Res<Config>) { if gd.died_fishes.len() as i32 == config.fish.count { gd.died_fishes .sort_by(|f1, f2| f1.fitness().partial_cmp(&f2.fitness()).unwrap()); gd.died_fishes.reverse(); let mut gd_best_time: Option<Duration> = gd.best_time; let mut gd_create_generation = gd.create_generation; let gd_current_generation = gd.current_generation; let mut i: usize = 0; for f in &mut gd.died_fishes { //Last fish died if i <= BEST_RESULTS_SAVED_COUNT as usize { let result_dir = format!("{}{}", config.ai.state_path, f.brain.get_name()); //Create result dir if not exists fs::create_dir_all(&result_dir); let best_str = format!("{}/{}_pos.yaml", result_dir, i); let best_path = Path::new(&best_str); if config.ai.mode == ModeEnum::LEARN { f.brain.save(best_path); } if i == 0 { gd_best_time = Some(f.create_at.elapsed()); let date = chrono::offset::Local::now(); println!( "[{}] - Gen {}\tBest time: {:?}\tidx: {:>6}\tDistance: {}\tEnergy: {}\tFitness: {}\t", date.format("[%H:%M:%S"), gd_current_generation, gd_best_time.unwrap().as_secs_f32(), f.index, f.distance, f.energy, f.fitness() ); save_summary::save_summary(gd_current_generation, &config, f); } } i += 1; } gd.best_time = gd_best_time; gd.create_generation = true; gd.died_fishes.clear(); } } pub fn remove_dead_fish( mut commands: Commands, q_fish: Query<Without<Fish, (Entity, &FishAlive)>>, ) { for (e, _) in q_fish.iter() { commands.despawn_recursive(e); } } pub fn recover_dead_fish(mut_world: &mut World, resources: &mut Resources) { loop { let mut query_fish = mut_world .query::<(Entity, &Fish, &FishAlive)>() .filter(|(_, f, _)| !f.alive); let has_next = query_fish.next(); if has_next.is_some() { let res = has_next.unwrap(); let removed_fish = mut_world.remove_one::<Fish>(res.0); if removed_fish.is_ok() { let fish = removed_fish.unwrap(); let gd = resources.get_mut::<GameData>(); gd.unwrap().died_fishes.push(fish); } } else { break; } } }
30.288288
110
0.509816
fbfb22258b10a91c3e7774cbdd341d77cf9b04ad
315
#[macro_use] extern crate pyo3; use pyo3::prelude::*; #[pyfunction] fn hello(_py: Python) -> PyResult<()> { println!("Bonjour, monde!"); Ok(()) } #[pymodule] /// Module documentation string fn french(_py: Python, m: &PyModule) -> PyResult<()> { m.add_wrapped(wrap_pyfunction!(hello))?; Ok(()) }
16.578947
54
0.615873
143ebc91f3bf6ca3407b3f28bdbddbb54e386d8c
29,687
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::EVTOMCUFLAGSCLR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct RESERVED16R { bits: u16, } impl RESERVED16R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u16 { self.bits } } #[doc = r" Value of the field"] pub struct AUX_TIMER2_PULSER { bits: bool, } impl AUX_TIMER2_PULSER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TIMER2_EV3R { bits: bool, } impl AUX_TIMER2_EV3R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TIMER2_EV2R { bits: bool, } impl AUX_TIMER2_EV2R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TIMER2_EV1R { bits: bool, } impl AUX_TIMER2_EV1R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TIMER2_EV0R { bits: bool, } impl AUX_TIMER2_EV0R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_ADC_IRQR { bits: bool, } impl AUX_ADC_IRQR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct MCU_OBSMUX0R { bits: bool, } impl MCU_OBSMUX0R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_ADC_FIFO_ALMOST_FULLR { bits: bool, } impl AUX_ADC_FIFO_ALMOST_FULLR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_ADC_DONER { bits: bool, } impl AUX_ADC_DONER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_SMPH_AUTOTAKE_DONER { bits: bool, } impl AUX_SMPH_AUTOTAKE_DONER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TIMER1_EVR { bits: bool, } impl AUX_TIMER1_EVR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TIMER0_EVR { bits: bool, } impl AUX_TIMER0_EVR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_TDC_DONER { bits: bool, } impl AUX_TDC_DONER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_COMPBR { bits: bool, } impl AUX_COMPBR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_COMPAR { bits: bool, } impl AUX_COMPAR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct AUX_WU_EVR { bits: bool, } impl AUX_WU_EVR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Proxy"] pub struct _RESERVED16W<'a> { w: &'a mut W, } impl<'a> _RESERVED16W<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u16) -> &'a mut W { const MASK: u16 = 65535; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER2_PULSEW<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER2_PULSEW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 15; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER2_EV3W<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER2_EV3W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 14; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER2_EV2W<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER2_EV2W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 13; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER2_EV1W<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER2_EV1W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER2_EV0W<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER2_EV0W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 11; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_ADC_IRQW<'a> { w: &'a mut W, } impl<'a> _AUX_ADC_IRQW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _MCU_OBSMUX0W<'a> { w: &'a mut W, } impl<'a> _MCU_OBSMUX0W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_ADC_FIFO_ALMOST_FULLW<'a> { w: &'a mut W, } impl<'a> _AUX_ADC_FIFO_ALMOST_FULLW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_ADC_DONEW<'a> { w: &'a mut W, } impl<'a> _AUX_ADC_DONEW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_SMPH_AUTOTAKE_DONEW<'a> { w: &'a mut W, } impl<'a> _AUX_SMPH_AUTOTAKE_DONEW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER1_EVW<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER1_EVW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TIMER0_EVW<'a> { w: &'a mut W, } impl<'a> _AUX_TIMER0_EVW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_TDC_DONEW<'a> { w: &'a mut W, } impl<'a> _AUX_TDC_DONEW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_COMPBW<'a> { w: &'a mut W, } impl<'a> _AUX_COMPBW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_COMPAW<'a> { w: &'a mut W, } impl<'a> _AUX_COMPAW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _AUX_WU_EVW<'a> { w: &'a mut W, } impl<'a> _AUX_WU_EVW<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 16:31 - 31:16\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline] pub fn reserved16(&self) -> RESERVED16R { let bits = { const MASK: u16 = 65535; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) as u16 }; RESERVED16R { bits } } #[doc = "Bit 15 - 15:15\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_PULSE. Read value is 0."] #[inline] pub fn aux_timer2_pulse(&self) -> AUX_TIMER2_PULSER { let bits = { const MASK: bool = true; const OFFSET: u8 = 15; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER2_PULSER { bits } } #[doc = "Bit 14 - 14:14\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV3. Read value is 0."] #[inline] pub fn aux_timer2_ev3(&self) -> AUX_TIMER2_EV3R { let bits = { const MASK: bool = true; const OFFSET: u8 = 14; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER2_EV3R { bits } } #[doc = "Bit 13 - 13:13\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV2. Read value is 0."] #[inline] pub fn aux_timer2_ev2(&self) -> AUX_TIMER2_EV2R { let bits = { const MASK: bool = true; const OFFSET: u8 = 13; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER2_EV2R { bits } } #[doc = "Bit 12 - 12:12\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV1. Read value is 0."] #[inline] pub fn aux_timer2_ev1(&self) -> AUX_TIMER2_EV1R { let bits = { const MASK: bool = true; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER2_EV1R { bits } } #[doc = "Bit 11 - 11:11\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV0. Read value is 0."] #[inline] pub fn aux_timer2_ev0(&self) -> AUX_TIMER2_EV0R { let bits = { const MASK: bool = true; const OFFSET: u8 = 11; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER2_EV0R { bits } } #[doc = "Bit 10 - 10:10\\] Write 1 to clear EVTOMCUFLAGS.AUX_ADC_IRQ. Read value is 0."] #[inline] pub fn aux_adc_irq(&self) -> AUX_ADC_IRQR { let bits = { const MASK: bool = true; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_ADC_IRQR { bits } } #[doc = "Bit 9 - 9:9\\] Write 1 to clear EVTOMCUFLAGS.MCU_OBSMUX0. Read value is 0."] #[inline] pub fn mcu_obsmux0(&self) -> MCU_OBSMUX0R { let bits = { const MASK: bool = true; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) != 0 }; MCU_OBSMUX0R { bits } } #[doc = "Bit 8 - 8:8\\] Write 1 to clear EVTOMCUFLAGS.AUX_ADC_FIFO_ALMOST_FULL. Read value is 0."] #[inline] pub fn aux_adc_fifo_almost_full(&self) -> AUX_ADC_FIFO_ALMOST_FULLR { let bits = { const MASK: bool = true; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_ADC_FIFO_ALMOST_FULLR { bits } } #[doc = "Bit 7 - 7:7\\] Write 1 to clear EVTOMCUFLAGS.AUX_ADC_DONE. Read value is 0."] #[inline] pub fn aux_adc_done(&self) -> AUX_ADC_DONER { let bits = { const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_ADC_DONER { bits } } #[doc = "Bit 6 - 6:6\\] Write 1 to clear EVTOMCUFLAGS.AUX_SMPH_AUTOTAKE_DONE. Read value is 0."] #[inline] pub fn aux_smph_autotake_done(&self) -> AUX_SMPH_AUTOTAKE_DONER { let bits = { const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_SMPH_AUTOTAKE_DONER { bits } } #[doc = "Bit 5 - 5:5\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER1_EV. Read value is 0."] #[inline] pub fn aux_timer1_ev(&self) -> AUX_TIMER1_EVR { let bits = { const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER1_EVR { bits } } #[doc = "Bit 4 - 4:4\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER0_EV. Read value is 0."] #[inline] pub fn aux_timer0_ev(&self) -> AUX_TIMER0_EVR { let bits = { const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TIMER0_EVR { bits } } #[doc = "Bit 3 - 3:3\\] Write 1 to clear EVTOMCUFLAGS.AUX_TDC_DONE. Read value is 0."] #[inline] pub fn aux_tdc_done(&self) -> AUX_TDC_DONER { let bits = { const MASK: bool = true; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_TDC_DONER { bits } } #[doc = "Bit 2 - 2:2\\] Write 1 to clear EVTOMCUFLAGS.AUX_COMPB. Read value is 0."] #[inline] pub fn aux_compb(&self) -> AUX_COMPBR { let bits = { const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_COMPBR { bits } } #[doc = "Bit 1 - 1:1\\] Write 1 to clear EVTOMCUFLAGS.AUX_COMPA. Read value is 0."] #[inline] pub fn aux_compa(&self) -> AUX_COMPAR { let bits = { const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_COMPAR { bits } } #[doc = "Bit 0 - 0:0\\] Write 1 to clear EVTOMCUFLAGS.AUX_WU_EV. Read value is 0."] #[inline] pub fn aux_wu_ev(&self) -> AUX_WU_EVR { let bits = { const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }; AUX_WU_EVR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 16:31 - 31:16\\] Software should not rely on the value of a reserved. Writing any other value than the reset value may result in undefined behavior."] #[inline] pub fn reserved16(&mut self) -> _RESERVED16W { _RESERVED16W { w: self } } #[doc = "Bit 15 - 15:15\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_PULSE. Read value is 0."] #[inline] pub fn aux_timer2_pulse(&mut self) -> _AUX_TIMER2_PULSEW { _AUX_TIMER2_PULSEW { w: self } } #[doc = "Bit 14 - 14:14\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV3. Read value is 0."] #[inline] pub fn aux_timer2_ev3(&mut self) -> _AUX_TIMER2_EV3W { _AUX_TIMER2_EV3W { w: self } } #[doc = "Bit 13 - 13:13\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV2. Read value is 0."] #[inline] pub fn aux_timer2_ev2(&mut self) -> _AUX_TIMER2_EV2W { _AUX_TIMER2_EV2W { w: self } } #[doc = "Bit 12 - 12:12\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV1. Read value is 0."] #[inline] pub fn aux_timer2_ev1(&mut self) -> _AUX_TIMER2_EV1W { _AUX_TIMER2_EV1W { w: self } } #[doc = "Bit 11 - 11:11\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER2_EV0. Read value is 0."] #[inline] pub fn aux_timer2_ev0(&mut self) -> _AUX_TIMER2_EV0W { _AUX_TIMER2_EV0W { w: self } } #[doc = "Bit 10 - 10:10\\] Write 1 to clear EVTOMCUFLAGS.AUX_ADC_IRQ. Read value is 0."] #[inline] pub fn aux_adc_irq(&mut self) -> _AUX_ADC_IRQW { _AUX_ADC_IRQW { w: self } } #[doc = "Bit 9 - 9:9\\] Write 1 to clear EVTOMCUFLAGS.MCU_OBSMUX0. Read value is 0."] #[inline] pub fn mcu_obsmux0(&mut self) -> _MCU_OBSMUX0W { _MCU_OBSMUX0W { w: self } } #[doc = "Bit 8 - 8:8\\] Write 1 to clear EVTOMCUFLAGS.AUX_ADC_FIFO_ALMOST_FULL. Read value is 0."] #[inline] pub fn aux_adc_fifo_almost_full(&mut self) -> _AUX_ADC_FIFO_ALMOST_FULLW { _AUX_ADC_FIFO_ALMOST_FULLW { w: self } } #[doc = "Bit 7 - 7:7\\] Write 1 to clear EVTOMCUFLAGS.AUX_ADC_DONE. Read value is 0."] #[inline] pub fn aux_adc_done(&mut self) -> _AUX_ADC_DONEW { _AUX_ADC_DONEW { w: self } } #[doc = "Bit 6 - 6:6\\] Write 1 to clear EVTOMCUFLAGS.AUX_SMPH_AUTOTAKE_DONE. Read value is 0."] #[inline] pub fn aux_smph_autotake_done(&mut self) -> _AUX_SMPH_AUTOTAKE_DONEW { _AUX_SMPH_AUTOTAKE_DONEW { w: self } } #[doc = "Bit 5 - 5:5\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER1_EV. Read value is 0."] #[inline] pub fn aux_timer1_ev(&mut self) -> _AUX_TIMER1_EVW { _AUX_TIMER1_EVW { w: self } } #[doc = "Bit 4 - 4:4\\] Write 1 to clear EVTOMCUFLAGS.AUX_TIMER0_EV. Read value is 0."] #[inline] pub fn aux_timer0_ev(&mut self) -> _AUX_TIMER0_EVW { _AUX_TIMER0_EVW { w: self } } #[doc = "Bit 3 - 3:3\\] Write 1 to clear EVTOMCUFLAGS.AUX_TDC_DONE. Read value is 0."] #[inline] pub fn aux_tdc_done(&mut self) -> _AUX_TDC_DONEW { _AUX_TDC_DONEW { w: self } } #[doc = "Bit 2 - 2:2\\] Write 1 to clear EVTOMCUFLAGS.AUX_COMPB. Read value is 0."] #[inline] pub fn aux_compb(&mut self) -> _AUX_COMPBW { _AUX_COMPBW { w: self } } #[doc = "Bit 1 - 1:1\\] Write 1 to clear EVTOMCUFLAGS.AUX_COMPA. Read value is 0."] #[inline] pub fn aux_compa(&mut self) -> _AUX_COMPAW { _AUX_COMPAW { w: self } } #[doc = "Bit 0 - 0:0\\] Write 1 to clear EVTOMCUFLAGS.AUX_WU_EV. Read value is 0."] #[inline] pub fn aux_wu_ev(&mut self) -> _AUX_WU_EVW { _AUX_WU_EVW { w: self } } }
28.273333
168
0.530872
39b03875b8470e4b3e082b90518ba13eaf7bd241
1,230
// Copyright 2012-2015 The Rust Project Developers. // Copyright 2017 The UNIC Project Developers. // // See the COPYRIGHT file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![no_std] #![warn( bad_style, missing_debug_implementations, missing_docs, unconditional_recursion )] #![forbid(unsafe_code)] //! # UNIC - UCD - Unihan //! //! A component of [`unic`: Unicode and Internationalization Crates for Rust](/unic/). //! //! Accessor for Unicode Han Database (Unihan) mod readings; pub use crate::readings::{definition_of, mandarin_of}; mod variants; pub use crate::variants::{simplified_variant_of, traditional_variant_of}; use unic_ucd_version::UnicodeVersion; mod pkg_info; pub use crate::pkg_info::{PKG_DESCRIPTION, PKG_NAME, PKG_VERSION}; /// The [Unicode version](https://www.unicode.org/versions/) of data pub const UNICODE_VERSION: UnicodeVersion = include!("../tables/unicode_version.rsv");
30.75
86
0.741463
61a5a8ae2e190f007f0df42fcc339abda54bffea
347
fn main() { let reference_to_nothing = dangle(); } // ANCHOR: here fn dangle() -> &String { // dangle zwraca referencję do Stringa let s = String::from("witaj"); // s to nowy String &s // zwracamy referencję do Stringa s } // Tutaj s wychodzi z zasięgu i jest zwalniane. Znika z pamięci. // Niebezpieczeństwo! // ANCHOR_END: here
24.785714
66
0.665706
72802c3c0fb69eec800fafe2a4f862902451806c
1,160
use reqwest::r#async::{Client, Response}; use reqwest::Error; use tokio::prelude::Future; /// Store transactions into the local storage. /// The trytes to be used for this call are /// returned by attachToTangle. pub fn store_transactions( client: &Client, uri: &str, trytes: &[String], ) -> impl Future<Item = Response, Error = Error> { let body = json!({ "command": "storeTransactions", "trytes": trytes, }); client .post(uri) .header("ContentType", "application/json") .header("X-IOTA-API-Version", "1") .body(body.to_string()) .send() } /// This is a typed representation of the JSON response #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct StoreTransactionsResponse { /// Any errors that occurred error: Option<String>, /// Any exceptions that occurred exception: Option<String>, } impl StoreTransactionsResponse { /// Returns the error attribute fn error(&self) -> &Option<String> { &self.error } /// Returns the exception attribute fn exception(&self) -> &Option<String> { &self.exception } }
25.777778
56
0.632759
017d519b356bad8342c88ed5b6ea34d743de280a
10,018
#![cfg_attr(feature = "const_fn", feature(const_mut_refs, const_fn_fn_ptr_basics))] #![no_std] #[cfg(test)] #[macro_use] extern crate std; #[cfg(feature = "use_spin")] extern crate spin; extern crate alloc; use core::alloc::{GlobalAlloc, Layout}; use core::cmp::{max, min}; use core::fmt; use core::mem::size_of; #[cfg(feature = "use_spin")] use core::ops::Deref; use core::ptr::NonNull; #[cfg(feature = "use_spin")] use spin::Mutex; mod frame; pub mod linked_list; #[cfg(test)] mod test; pub use frame::*; /// A heap that uses buddy system with configurable order. /// /// # Usage /// /// Create a heap and add a memory region to it: /// ``` /// use buddy_system_allocator::*; /// # use core::mem::size_of; /// let mut heap = Heap::<32>::empty(); /// # let space: [usize; 100] = [0; 100]; /// # let begin: usize = space.as_ptr() as usize; /// # let end: usize = begin + 100 * size_of::<usize>(); /// # let size: usize = 100 * size_of::<usize>(); /// unsafe { /// heap.init(begin, size); /// // or /// heap.add_to_heap(begin, end); /// } /// ``` pub struct Heap<const ORDER: usize> { // buddy system with max order of `ORDER` free_list: [linked_list::LinkedList; ORDER], // statistics user: usize, allocated: usize, total: usize, } impl<const ORDER: usize> Heap<ORDER> { /// Create an empty heap pub const fn new() -> Self { Heap { free_list: [linked_list::LinkedList::new(); ORDER], user: 0, allocated: 0, total: 0, } } /// Create an empty heap pub const fn empty() -> Self { Self::new() } /// Add a range of memory [start, end) to the heap pub unsafe fn add_to_heap(&mut self, mut start: usize, mut end: usize) { // avoid unaligned access on some platforms start = (start + size_of::<usize>() - 1) & (!size_of::<usize>() + 1); end = end & (!size_of::<usize>() + 1); assert!(start <= end); let mut total = 0; let mut current_start = start; while current_start + size_of::<usize>() <= end { let lowbit = current_start & (!current_start + 1); let size = min(lowbit, prev_power_of_two(end - current_start)); total += size; self.free_list[size.trailing_zeros() as usize].push(current_start as *mut usize); current_start += size; } self.total += total; } /// Add a range of memory [start, end) to the heap pub unsafe fn init(&mut self, start: usize, size: usize) { self.add_to_heap(start, start + size); } /// Alloc a range of memory from the heap satifying `layout` requirements pub fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, ()> { let size = max( layout.size().next_power_of_two(), max(layout.align(), size_of::<usize>()), ); let class = size.trailing_zeros() as usize; for i in class..self.free_list.len() { // Find the first non-empty size class if !self.free_list[i].is_empty() { // Split buffers for j in (class + 1..i + 1).rev() { if let Some(block) = self.free_list[j].pop() { unsafe { self.free_list[j - 1] .push((block as usize + (1 << (j - 1))) as *mut usize); self.free_list[j - 1].push(block); } } else { return Err(()); } } let result = NonNull::new( self.free_list[class] .pop() .expect("current block should have free space now") as *mut u8, ); if let Some(result) = result { self.user += layout.size(); self.allocated += size; return Ok(result); } else { return Err(()); } } } Err(()) } /// Dealloc a range of memory from the heap pub fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) { let size = max( layout.size().next_power_of_two(), max(layout.align(), size_of::<usize>()), ); let class = size.trailing_zeros() as usize; unsafe { // Put back into free list self.free_list[class].push(ptr.as_ptr() as *mut usize); // Merge free buddy lists let mut current_ptr = ptr.as_ptr() as usize; let mut current_class = class; while current_class < self.free_list.len() { let buddy = current_ptr ^ (1 << current_class); let mut flag = false; for block in self.free_list[current_class].iter_mut() { if block.value() as usize == buddy { block.pop(); flag = true; break; } } // Free buddy found if flag { self.free_list[current_class].pop(); current_ptr = min(current_ptr, buddy); current_class += 1; self.free_list[current_class].push(current_ptr as *mut usize); } else { break; } } } self.user -= layout.size(); self.allocated -= size; } /// Return the number of bytes that user requests pub fn stats_alloc_user(&self) -> usize { self.user } /// Return the number of bytes that are actually allocated pub fn stats_alloc_actual(&self) -> usize { self.allocated } /// Return the total number of bytes in the heap pub fn stats_total_bytes(&self) -> usize { self.total } } impl<const ORDER: usize> fmt::Debug for Heap<ORDER> { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_struct("Heap") .field("user", &self.user) .field("allocated", &self.allocated) .field("total", &self.total) .finish() } } /// A locked version of `Heap` /// /// # Usage /// /// Create a locked heap and add a memory region to it: /// ``` /// use buddy_system_allocator::*; /// # use core::mem::size_of; /// let mut heap = LockedHeap::<32>::new(); /// # let space: [usize; 100] = [0; 100]; /// # let begin: usize = space.as_ptr() as usize; /// # let end: usize = begin + 100 * size_of::<usize>(); /// # let size: usize = 100 * size_of::<usize>(); /// unsafe { /// heap.lock().init(begin, size); /// // or /// heap.lock().add_to_heap(begin, end); /// } /// ``` #[cfg(feature = "use_spin")] pub struct LockedHeap<const ORDER: usize>(Mutex<Heap<ORDER>>); #[cfg(feature = "use_spin")] impl<const ORDER: usize> LockedHeap<ORDER> { /// Creates an empty heap pub const fn new() -> Self { LockedHeap(Mutex::new(Heap::<ORDER>::new())) } /// Creates an empty heap pub const fn empty() -> Self { LockedHeap(Mutex::new(Heap::<ORDER>::new())) } } #[cfg(feature = "use_spin")] impl<const ORDER: usize> Deref for LockedHeap<ORDER> { type Target = Mutex<Heap<ORDER>>; fn deref(&self) -> &Self::Target { &self.0 } } #[cfg(feature = "use_spin")] unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeap<ORDER> { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { self.0 .lock() .alloc(layout) .ok() .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { self.0.lock().dealloc(NonNull::new_unchecked(ptr), layout) } } /// A locked version of `Heap` with rescue before oom /// /// # Usage /// /// Create a locked heap: /// ``` /// use buddy_system_allocator::*; /// let heap = LockedHeapWithRescue::new(|heap: &mut Heap<32>| {}); /// ``` /// /// Before oom, the allocator will try to call rescue function and try for one more time. #[cfg(feature = "use_spin")] pub struct LockedHeapWithRescue<const ORDER: usize> { inner: Mutex<Heap<ORDER>>, rescue: fn(&mut Heap<ORDER>), } #[cfg(feature = "use_spin")] impl<const ORDER: usize> LockedHeapWithRescue<ORDER> { /// Creates an empty heap #[cfg(feature = "const_fn")] pub const fn new(rescue: fn(&mut Heap)) -> Self { LockedHeapWithRescue { inner: Mutex::new(Heap::<ORDER>::new()), rescue, } } /// Creates an empty heap #[cfg(not(feature = "const_fn"))] pub fn new(rescue: fn(&mut Heap<ORDER>)) -> Self { LockedHeapWithRescue { inner: Mutex::new(Heap::<ORDER>::new()), rescue, } } } #[cfg(feature = "use_spin")] impl<const ORDER: usize> Deref for LockedHeapWithRescue<ORDER> { type Target = Mutex<Heap<ORDER>>; fn deref(&self) -> &Self::Target { &self.inner } } #[cfg(feature = "use_spin")] unsafe impl<const ORDER: usize> GlobalAlloc for LockedHeapWithRescue<ORDER> { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { let mut inner = self.inner.lock(); match inner.alloc(layout) { Ok(allocation) => allocation.as_ptr(), Err(_) => { (self.rescue)(&mut inner); inner .alloc(layout) .ok() .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) } } } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { self.inner .lock() .dealloc(NonNull::new_unchecked(ptr), layout) } } pub(crate) fn prev_power_of_two(num: usize) -> usize { 1 << (8 * (size_of::<usize>()) - num.leading_zeros() as usize - 1) }
29.206997
93
0.524955
1e35a9fb79242bdd512b451271ab29acf295f06b
4,507
use std::cmp::Ordering; use std::fmt; use chrono::{NaiveDateTime, Utc}; use diesel::{ connection::SimpleConnection, delete, insert_into, prelude::*, sql_query, sql_types::Text, update, }; use super::super::{errors::Result, rfc::RFC822}; use super::{schema::schema_migrations, schema_migrations_exists, Connection, ID}; #[derive(Queryable)] pub struct Item { pub id: ID, pub version: String, pub name: String, pub up: String, pub down: String, pub run_at: Option<NaiveDateTime>, } impl fmt::Display for Item { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{:<14} {:<32} {}", self.version, match self.run_at { Some(v) => v.to_rfc822(), None => "N/A".to_string(), }, self.name, ) } } #[derive(Insertable, Eq, Clone)] #[table_name = "schema_migrations"] pub struct New<'a> { pub version: &'a str, pub name: &'a str, pub up: &'a str, pub down: &'a str, } impl<'a> fmt::Display for New<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}-{}", self.version, self.name) } } impl<'a> Ord for New<'a> { fn cmp(&self, other: &New) -> Ordering { self.version.cmp(&other.version) } } impl<'a> PartialOrd for New<'a> { fn partial_cmp(&self, other: &New) -> Option<Ordering> { Some(self.cmp(other)) } } impl<'a> PartialEq for New<'a> { fn eq(&self, other: &New) -> bool { self.version == other.version } } pub trait Dao { fn load(&self, items: &[New]) -> Result<()>; fn migrate(&self) -> Result<()>; fn rollback(&self) -> Result<()>; fn versions(&self) -> Result<Vec<Item>>; fn check(&self) -> Result<()>; } #[derive(QueryableByName)] pub struct Table { #[sql_type = "Text"] pub name: String, } impl Dao for Connection { fn check(&self) -> Result<()> { let rst = sql_query(schema_migrations_exists("schema_migrations")).load::<Table>(self)?; if rst.is_empty() { info!("database is empty"); self.batch_execute(super::UP)?; } Ok(()) } fn load(&self, items: &[New]) -> Result<()> { self.check()?; for it in items { info!("find migration: {}", it); let c: i64 = schema_migrations::dsl::schema_migrations .filter(schema_migrations::dsl::version.eq(it.version)) .filter(schema_migrations::dsl::name.eq(it.name)) .count() .get_result(self)?; if c == 0 { info!("migration {} not exist, insert it", it); insert_into(schema_migrations::dsl::schema_migrations) .values(it) .execute(self)?; } } Ok(()) } fn migrate(&self) -> Result<()> { let now = Utc::now().naive_utc(); for it in schema_migrations::dsl::schema_migrations .filter(schema_migrations::dsl::run_at.is_null()) .order(schema_migrations::dsl::version.asc()) .load::<Item>(self)? { info!("run migrate {}", it.up); self.batch_execute(&it.up)?; let it = schema_migrations::dsl::schema_migrations .filter(schema_migrations::dsl::id.eq(&it.id)); update(it) .set(schema_migrations::dsl::run_at.eq(&now)) .execute(self)?; } Ok(()) } fn rollback(&self) -> Result<()> { self.check()?; match schema_migrations::dsl::schema_migrations .filter(schema_migrations::dsl::run_at.is_not_null()) .order(schema_migrations::dsl::version.desc()) .first::<Item>(self) { Ok(it) => { info!("rollback {}", it.down); self.batch_execute(&it.down)?; delete( schema_migrations::dsl::schema_migrations .filter(schema_migrations::dsl::id.eq(it.id)), ) .execute(self)?; } Err(_) => warn!("database is empty"), }; Ok(()) } fn versions(&self) -> Result<Vec<Item>> { self.check()?; let items = schema_migrations::dsl::schema_migrations .order(schema_migrations::dsl::version.asc()) .load(self)?; Ok(items) } }
27.820988
96
0.516308
8f84fc63eb2b42815eb06efb3a94c8504afa3320
20,572
//! Type(Sort)-checking use super::*; lazy_static! { /// Cache of all types pub(super) static ref TERM_TYPES: RwLock<TypeTable> = RwLock::new(TypeTable { map: FxHashMap::default(), last_len: 0, }); } #[track_caller] /// Type-check this term, at a surface level. /// That is, determine its type without a full validity check. pub fn check(t: &Term) -> Sort { check_raw(t).unwrap() } #[track_caller] /// Fully type-check this term. /// That is, determine its type *with* a full validity check. pub fn check_rec(t: &Term) -> Sort { rec_check_raw(t).unwrap() } /// Return a list of child terms that must be typed first to type this term. fn check_dependencies(t: &Term) -> Vec<Term> { match &t.op { Op::Ite => vec![t.cs[1].clone()], Op::Eq => Vec::new(), Op::Var(_, _) => Vec::new(), Op::Const(_) => Vec::new(), Op::BvBinOp(_) => vec![t.cs[0].clone()], Op::BvBinPred(_) => Vec::new(), Op::BvNaryOp(_) => vec![t.cs[0].clone()], Op::BvUnOp(_) => vec![t.cs[0].clone()], Op::BoolToBv => Vec::new(), Op::BvExtract(_, _) => Vec::new(), Op::BvConcat => t.cs.clone(), Op::BvUext(_) => vec![t.cs[0].clone()], Op::BvSext(_) => vec![t.cs[0].clone()], Op::PfToBv(_) => Vec::new(), Op::Implies => Vec::new(), Op::BoolNaryOp(_) => Vec::new(), Op::Not => Vec::new(), Op::BvBit(_) => Vec::new(), Op::BoolMaj => Vec::new(), Op::FpBinOp(_) => vec![t.cs[0].clone()], Op::FpBinPred(_) => Vec::new(), Op::FpUnPred(_) => Vec::new(), Op::FpUnOp(_) => vec![t.cs[0].clone()], Op::BvToFp => vec![t.cs[0].clone()], Op::UbvToFp(_) => Vec::new(), Op::SbvToFp(_) => Vec::new(), Op::FpToFp(_) => Vec::new(), Op::PfUnOp(_) => vec![t.cs[0].clone()], Op::PfNaryOp(_) => vec![t.cs[0].clone()], Op::UbvToPf(_) => Vec::new(), Op::Select => vec![t.cs[0].clone()], Op::Store => vec![t.cs[0].clone()], Op::Tuple => t.cs.clone(), Op::Field(_) => vec![t.cs[0].clone()], Op::Update(_i) => vec![t.cs[0].clone()], Op::Map(_) => t.cs.clone(), } } fn check_raw_step(t: &Term, tys: &TypeTable) -> Result<Sort, TypeErrorReason> { let get_ty = |term: &Term| -> &Sort { tys.get(&term.to_weak()).unwrap_or_else(|| panic!("When checking the type of {} we needed the type of {}, but it was missing. This is a bug in check_dependencies", t, term)) }; match &t.op { Op::Ite => Ok(get_ty(&t.cs[1]).clone()), Op::Eq => Ok(Sort::Bool), Op::Var(_, s) => Ok(s.clone()), Op::Const(c) => Ok(c.sort()), Op::BvBinOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::BvBinPred(_) => Ok(Sort::Bool), Op::BvNaryOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::BvUnOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::BoolToBv => Ok(Sort::BitVector(1)), Op::BvExtract(a, b) => Ok(Sort::BitVector(a - b + 1)), Op::BvConcat => { t.cs.iter() .map(get_ty) .try_fold(0, |l: usize, r: &Sort| -> Result<usize, TypeErrorReason> { bv_or(r, "concat").map(|rr| l + rr.as_bv()) }) .map(Sort::BitVector) } Op::BvUext(a) => { bv_or(get_ty(&t.cs[0]), "bv-uext").map(|bv| Sort::BitVector(bv.as_bv() + a)) } Op::BvSext(a) => { bv_or(get_ty(&t.cs[0]), "bv-uext").map(|bv| Sort::BitVector(bv.as_bv() + a)) } Op::PfToBv(a) => Ok(Sort::BitVector(*a)), Op::Implies => Ok(Sort::Bool), Op::BoolNaryOp(_) => Ok(Sort::Bool), Op::Not => Ok(Sort::Bool), Op::BvBit(_) => Ok(Sort::Bool), Op::BoolMaj => Ok(Sort::Bool), Op::FpBinOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::FpBinPred(_) => Ok(Sort::Bool), Op::FpUnPred(_) => Ok(Sort::Bool), Op::FpUnOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::BvToFp => match bv_or(get_ty(&t.cs[0]), "bv-to-fp") { Ok(Sort::BitVector(32)) => Ok(Sort::F32), Ok(Sort::BitVector(64)) => Ok(Sort::F64), Ok(s) => Err(TypeErrorReason::Custom(format!( "Cannot convert {} to floating-point", s ))), Err(e) => Err(e), }, Op::UbvToFp(64) => Ok(Sort::F64), Op::UbvToFp(32) => Ok(Sort::F32), Op::SbvToFp(64) => Ok(Sort::F64), Op::SbvToFp(32) => Ok(Sort::F32), Op::FpToFp(64) => Ok(Sort::F64), Op::FpToFp(32) => Ok(Sort::F32), Op::PfUnOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::PfNaryOp(_) => Ok(get_ty(&t.cs[0]).clone()), Op::UbvToPf(m) => Ok(Sort::Field(m.clone())), Op::Select => array_or(get_ty(&t.cs[0]), "select").map(|(_, v)| v.clone()), Op::Store => Ok(get_ty(&t.cs[0]).clone()), Op::Tuple => Ok(Sort::Tuple(t.cs.iter().map(get_ty).cloned().collect())), Op::Field(i) => { let sort = get_ty(&t.cs[0]); let sorts = sort.as_tuple(); if i < &sorts.len() { Ok(sorts[*i].clone()) } else { Err(TypeErrorReason::OutOfBounds(format!( "index {} in tuple of sort {}", i, sort ))) } } Op::Update(_i) => Ok(get_ty(&t.cs[0]).clone()), Op::Map(op) => { let arg_cnt = t.cs.len(); let mut dterm_cs = Vec::new(); let mut key_sort = Sort::Bool; let mut size = 0; let mut error = None; match arrmap_or(get_ty(&t.cs[0]), "map") { Ok((k, _, s)) => { key_sort = k.clone(); size = *s; } Err(e) => { error = Some(e); } } for i in 0..arg_cnt { match array_or(get_ty(&t.cs[i]), "map inputs") { Ok((_, v)) => { dterm_cs.push(v.default_term()); } Err(e) => { error = Some(e); } } } match error { Some(e) => Err(e), None => { let term_ = term((**op).clone(), dterm_cs); Ok(Sort::Array( Box::new(key_sort), Box::new(get_ty(&term_).clone()), size, )) } } } o => Err(TypeErrorReason::Custom(format!("other operator: {}", o))), } } /// Type-check this term, *non-recursively*. /// All results are stored in the global type table. pub fn check_raw(t: &Term) -> Result<Sort, TypeError> { if let Some(s) = TERM_TYPES.read().unwrap().get(&t.to_weak()) { return Ok(s.clone()); } { let mut term_tys = TERM_TYPES.write().unwrap(); // to_check is a stack of (node, cs checked) pairs. let mut to_check = vec![(t.clone(), false)]; while !to_check.is_empty() { let back = to_check.last_mut().unwrap(); let weak = back.0.to_weak(); // The idea here is to check that if let Some((p, _)) = term_tys.get_key_value(&weak) { if p.to_hconsed().is_some() { to_check.pop(); continue; } else { term_tys.remove(&weak); } } if !back.1 { back.1 = true; for c in check_dependencies(&back.0) { to_check.push((c, false)); } } else { let ty = check_raw_step(&back.0, &*term_tys).map_err(|reason| TypeError { op: back.0.op.clone(), args: vec![], // not quite right reason, })?; term_tys.insert(back.0.to_weak(), ty); } } } Ok(TERM_TYPES .read() .unwrap() .get(&t.to_weak()) .unwrap() .clone()) } /// Helper function for rec_check_raw /// Type-check given term which is expressed as /// An operation and the sorts of its children pub fn rec_check_raw_helper(oper: &Op, a: &[&Sort]) -> Result<Sort, TypeErrorReason> { match (oper, a) { (Op::Eq, &[a, b]) => eq_or(a, b, "=").map(|_| Sort::Bool), (Op::Ite, &[&Sort::Bool, b, c]) => eq_or(b, c, "ITE").map(|_| b.clone()), (Op::Var(_, s), &[]) => Ok(s.clone()), (Op::Const(c), &[]) => Ok(c.sort()), (Op::BvBinOp(_), &[a, b]) => { let ctx = "bv binary op"; bv_or(a, ctx) .and_then(|_| eq_or(a, b, ctx)) .map(|_| a.clone()) } (Op::BvBinPred(_), &[a, b]) => { let ctx = "bv binary predicate"; bv_or(a, ctx) .and_then(|_| eq_or(a, b, ctx)) .map(|_| Sort::Bool) } (Op::BvNaryOp(_), a) => { let ctx = "bv nary op"; all_eq_or(a.iter().cloned(), ctx) .and_then(|t| bv_or(t, ctx)) .map(|a| a.clone()) } (Op::BvUnOp(_), &[a]) => bv_or(a, "bv unary op").map(|a| a.clone()), (Op::BoolToBv, &[Sort::Bool]) => Ok(Sort::BitVector(1)), (Op::BvExtract(high, low), &[Sort::BitVector(w)]) => { if low <= high && high < w { Ok(Sort::BitVector(high - low + 1)) } else { Err(TypeErrorReason::OutOfBounds(format!( "Cannot slice from {} to {} in a bit-vector of width {}", high, low, w ))) } } (Op::BvConcat, a) => a .iter() .try_fold(0, |w, x| match x { Sort::BitVector(ww) => Ok(w + ww), s => Err(TypeErrorReason::ExpectedBv((*s).clone(), "concat")), }) .map(Sort::BitVector), (Op::BvSext(a), &[Sort::BitVector(b)]) => Ok(Sort::BitVector(a + b)), (Op::PfToBv(a), &[Sort::Field(_)]) => Ok(Sort::BitVector(*a)), (Op::BvUext(a), &[Sort::BitVector(b)]) => Ok(Sort::BitVector(a + b)), (Op::Implies, &[a, b]) => { let ctx = "bool binary op"; bool_or(a, ctx) .and_then(|_| eq_or(a, b, ctx)) .map(|_| a.clone()) } (Op::BoolNaryOp(_), a) => { let ctx = "bool nary op"; all_eq_or(a.iter().cloned(), ctx) .and_then(|t| bool_or(t, ctx)) .map(|a| a.clone()) } (Op::Not, &[a]) => bool_or(a, "bool unary op").map(|a| a.clone()), (Op::BvBit(i), &[Sort::BitVector(w)]) => { if i < w { Ok(Sort::Bool) } else { Err(TypeErrorReason::OutOfBounds(format!( "Cannot get bit {} of a {}-bit bit-vector", i, w ))) } } (Op::BoolMaj, &[a, b, c]) => { let ctx = "bool majority"; bool_or(a, ctx) .and_then(|_| bool_or(b, ctx).and_then(|_| bool_or(c, ctx))) .map(|c| c.clone()) } (Op::FpBinOp(_), &[a, b]) => { let ctx = "fp binary op"; fp_or(a, ctx) .and_then(|_| eq_or(a, b, ctx)) .map(|_| a.clone()) } (Op::FpBinPred(_), &[a, b]) => { let ctx = "fp binary predicate"; fp_or(a, ctx) .and_then(|_| eq_or(a, b, ctx)) .map(|_| Sort::Bool) } (Op::FpUnOp(_), &[a]) => fp_or(a, "fp unary op").map(|a| a.clone()), (Op::FpUnPred(_), &[a]) => fp_or(a, "fp unary predicate").map(|_| Sort::Bool), (Op::BvToFp, &[Sort::BitVector(64)]) => Ok(Sort::F64), (Op::BvToFp, &[Sort::BitVector(32)]) => Ok(Sort::F64), (Op::UbvToFp(64), &[a]) => bv_or(a, "ubv-to-fp").map(|_| Sort::F64), (Op::UbvToFp(32), &[a]) => bv_or(a, "ubv-to-fp").map(|_| Sort::F32), (Op::SbvToFp(64), &[a]) => bv_or(a, "sbv-to-fp").map(|_| Sort::F64), (Op::SbvToFp(32), &[a]) => bv_or(a, "sbv-to-fp").map(|_| Sort::F32), (Op::FpToFp(64), &[a]) => fp_or(a, "fp-to-fp").map(|_| Sort::F64), (Op::FpToFp(32), &[a]) => fp_or(a, "fp-to-fp").map(|_| Sort::F32), (Op::PfNaryOp(_), a) => { let ctx = "pf nary op"; all_eq_or(a.iter().cloned(), ctx) .and_then(|t| pf_or(t, ctx)) .map(|a| a.clone()) } (Op::UbvToPf(m), &[a]) => bv_or(a, "ubv-to-pf").map(|_| Sort::Field(m.clone())), (Op::PfUnOp(_), &[a]) => pf_or(a, "pf unary op").map(|a| a.clone()), (Op::Select, &[Sort::Array(k, v, _), a]) => eq_or(k, a, "select").map(|_| (**v).clone()), (Op::Store, &[Sort::Array(k, v, n), a, b]) => eq_or(k, a, "store") .and_then(|_| eq_or(v, b, "store")) .map(|_| Sort::Array(k.clone(), v.clone(), *n)), (Op::Tuple, a) => Ok(Sort::Tuple(a.iter().map(|a| (*a).clone()).collect())), (Op::Field(i), &[a]) => tuple_or(a, "tuple field access").and_then(|t| { if i < &t.len() { Ok(t[*i].clone()) } else { Err(TypeErrorReason::OutOfBounds(format!( "index {} in tuple of sort {}", i, a ))) } }), (Op::Update(i), &[a, b]) => tuple_or(a, "tuple field update").and_then(|t| { if i < &t.len() { eq_or(&t[*i], b, "tuple update")?; Ok(a.clone()) } else { Err(TypeErrorReason::OutOfBounds(format!( "index {} in tuple of sort {}", i, a ))) } }), (Op::Map(op), a) => { // Check that key sorts are the same across all arrays // Get the value sorts of the argument arrays // recursively call helper to get value type of mapped array // then return Ok(...) let (key_sort, size) = match a[0].clone() { Sort::Array(k, _, s) => (*k, s), s => return Err(TypeErrorReason::ExpectedArray(s, "map")), }; let mut val_sorts = Vec::new(); for a_i in a { match (*a_i).clone() { Sort::Array(k, v, s) => { if *k != key_sort { return Err(TypeErrorReason::NotEqual(*k, key_sort, "map: key sorts")); } if s != size { return Err(TypeErrorReason::Custom( "map: array lengths unequal".to_string(), )); } val_sorts.push((*v).clone()); } s => return Err(TypeErrorReason::ExpectedArray(s, "map")), }; } let mut new_a = Vec::new(); for ptr in &val_sorts { new_a.push(ptr); } rec_check_raw_helper(&(*op.clone()), &new_a[..]) .map(|val_sort| Sort::Array(Box::new(key_sort), Box::new(val_sort), size)) } (_, _) => Err(TypeErrorReason::Custom("other".to_string())), } } /// Type-check this term, recursively as needed. /// All results are stored in the global type table. pub fn rec_check_raw(t: &Term) -> Result<Sort, TypeError> { if let Some(s) = TERM_TYPES.read().unwrap().get(&t.to_weak()) { return Ok(s.clone()); } { let mut term_tys = TERM_TYPES.write().unwrap(); // to_check is a stack of (node, cs checked) pairs. let mut to_check = vec![(t.clone(), false)]; while !to_check.is_empty() { let back = to_check.last_mut().unwrap(); let weak = back.0.to_weak(); // The idea here is to check that if let Some((p, _)) = term_tys.get_key_value(&weak) { if p.to_hconsed().is_some() { to_check.pop(); continue; } else { term_tys.remove(&weak); } } if !back.1 { back.1 = true; for c in back.0.cs.clone() { to_check.push((c, false)); } } else { let tys = back .0 .cs .iter() .map(|c| term_tys.get(&c.to_weak()).unwrap()) .collect::<Vec<_>>(); let ty = rec_check_raw_helper(&back.0.op, &tys[..]).map_err(|reason| TypeError { op: back.0.op.clone(), args: tys.into_iter().cloned().collect(), reason, })?; term_tys.insert(back.0.to_weak(), ty); } } } Ok(TERM_TYPES .read() .unwrap() .get(&t.to_weak()) .unwrap() .clone()) } #[derive(Debug, PartialEq, Eq)] /// A type error with some operator. pub struct TypeError { op: Op, args: Vec<Sort>, reason: TypeErrorReason, } #[derive(Debug, PartialEq, Eq)] /// Underlying reason for the error pub enum TypeErrorReason { /// Two sorts should be equal NotEqual(Sort, Sort, &'static str), /// A sort should be a boolean ExpectedBool(Sort, &'static str), /// A sort should be a floating-point ExpectedFp(Sort, &'static str), /// A sort should be a bit-vector ExpectedBv(Sort, &'static str), /// A sort should be a prime field ExpectedPf(Sort, &'static str), /// A sort should be an array ExpectedArray(Sort, &'static str), /// A sort should be a tuple ExpectedTuple(&'static str), /// An empty n-ary operator. EmptyNary(String), /// Something else Custom(String), /// Bad bounds OutOfBounds(String), } fn bv_or<'a>(a: &'a Sort, ctx: &'static str) -> Result<&'a Sort, TypeErrorReason> { if let Sort::BitVector(_) = a { Ok(a) } else { Err(TypeErrorReason::ExpectedBv(a.clone(), ctx)) } } fn array_or<'a>(a: &'a Sort, ctx: &'static str) -> Result<(&'a Sort, &'a Sort), TypeErrorReason> { if let Sort::Array(k, v, _) = a { Ok((&*k, &*v)) } else { Err(TypeErrorReason::ExpectedArray(a.clone(), ctx)) } } fn arrmap_or<'a>( a: &'a Sort, ctx: &'static str, ) -> Result<(&'a Sort, &'a Sort, &'a usize), TypeErrorReason> { if let Sort::Array(k, v, s) = a { Ok((&*k, &*v, &*s)) } else { Err(TypeErrorReason::ExpectedArray(a.clone(), ctx)) } } fn bool_or<'a>(a: &'a Sort, ctx: &'static str) -> Result<&'a Sort, TypeErrorReason> { if let Sort::Bool = a { Ok(a) } else { Err(TypeErrorReason::ExpectedBool(a.clone(), ctx)) } } fn fp_or<'a>(a: &'a Sort, ctx: &'static str) -> Result<&'a Sort, TypeErrorReason> { match a { Sort::F32 | Sort::F64 => Ok(a), _ => Err(TypeErrorReason::ExpectedFp(a.clone(), ctx)), } } fn pf_or<'a>(a: &'a Sort, ctx: &'static str) -> Result<&'a Sort, TypeErrorReason> { match a { Sort::Field(_) => Ok(a), _ => Err(TypeErrorReason::ExpectedPf(a.clone(), ctx)), } } fn tuple_or<'a>(a: &'a Sort, ctx: &'static str) -> Result<&'a [Sort], TypeErrorReason> { match a { Sort::Tuple(a) => Ok(a), _ => Err(TypeErrorReason::ExpectedTuple(ctx)), } } fn eq_or(a: &Sort, b: &Sort, ctx: &'static str) -> Result<(), TypeErrorReason> { if a == b { Ok(()) } else { Err(TypeErrorReason::NotEqual(a.clone(), b.clone(), ctx)) } } fn all_eq_or<'a, I: Iterator<Item = &'a Sort>>( mut a: I, ctx: &'static str, ) -> Result<&'a Sort, TypeErrorReason> { let first = a .next() .ok_or_else(|| TypeErrorReason::EmptyNary(ctx.to_owned()))?; for x in a { if first != x { return Err(TypeErrorReason::NotEqual( (*first).clone(), (*x).clone(), ctx, )); } } Ok(first) }
35.965035
181
0.446626
28036167537de6cdd65ac44cdfd08f244791ba2a
27,320
// Copyright 2019 Zhizhesihai (Beijing) Technology Limited. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. mod segment_infos; pub use self::segment_infos::*; mod segment_infos_format; pub use self::segment_infos_format::*; use serde::ser::SerializeStruct; use serde::{Serialize, Serializer}; use std::collections::{HashMap, HashSet}; use std::fmt; use std::hash::{Hash, Hasher}; use std::result; use std::sync::atomic::{AtomicBool, AtomicI32, AtomicI64, Ordering as AtomicOrdering}; use std::sync::Arc; use regex::Regex; use core::codec::field_infos::FieldInfos; use core::codec::{Codec, LiveDocsFormat}; use core::index::writer::BufferedUpdates; use core::search::sort_field::Sort; use core::store::directory::Directory; use core::store::IOContext; use core::util::to_base36; use core::util::FixedBitSet; use core::util::Version; use core::util::ID_LENGTH; use error::{ ErrorKind::{IllegalArgument, IllegalState}, Result, }; // index file names pub const INDEX_FILE_SEGMENTS: &str = "segments"; pub const INDEX_FILE_PENDING_SEGMENTS: &str = "pending_segments"; pub const INDEX_FILE_OLD_SEGMENT_GEN: &str = "segments.gen"; pub const CODEC_FILE_PATTERN: &str = r"_[a-z0-9]+(_.*)?\..*"; pub const CODEC_UPDATE_FNM_PATTERN: &str = r"(_[a-z0-9]+){2}\.fnm"; pub const CODEC_UPDATE_DV_PATTERN: &str = r"(_[a-z0-9]+){2}(_.*)*\.dv[md]"; #[allow(dead_code)] fn matches_extension(filename: &str, ext: &str) -> bool { filename.ends_with(ext) } // locates the boundary of the segment name, or None fn index_of_segment_name(filename: &str) -> Option<usize> { // If it is a .del file, there's an '_' after the first character let filename = &filename[1..]; if let Some(i) = filename.find('_') { return Some(i + 1); } filename.find('.').map(|i| i + 1) } pub fn strip_segment_name(name: &str) -> &str { if let Some(idx) = index_of_segment_name(name) { &name[idx..] } else { name } } pub fn segment_file_name(name: &str, suffix: &str, ext: &str) -> String { if !ext.is_empty() || !suffix.is_empty() { assert!(!ext.starts_with('.')); let mut filename = String::with_capacity(name.len() + 2 + suffix.len() + ext.len()); filename.push_str(name); if !suffix.is_empty() { filename.push('_'); filename.push_str(suffix); } if !ext.is_empty() { filename.push('.'); filename.push_str(ext); } filename } else { String::from(name) } } pub fn file_name_from_generation(base: &str, ext: &str, gen: u64) -> String { if gen == 0 { segment_file_name(base, "", ext) } else { let mut res = String::new(); res.push_str(base); res.push('_'); res += &to_base36(gen); if !ext.is_empty() { res.push('.'); res.push_str(ext); } res } } /// Returns the generation from this file name, /// or 0 if there is no generation pub fn parse_generation(filename: &str) -> Result<i64> { debug_assert!(filename.starts_with('_')); let parts: Vec<&str> = strip_extension(filename)[1..].split('_').collect(); // 4 cases: // segment.ext // segment_gen.ext // segment_codec_suffix.ext // segment_gen_codec_suffix.ext if parts.len() == 2 || parts.len() == 4 { Ok(parts[1].parse()?) } else { Ok(0) } } /// Parses the segment name out of the given file name. /// @return the segment name only, or filename if it /// does not contain a '.' and '_'. pub fn parse_segment_name(filename: &str) -> &str { if let Some(idx) = index_of_segment_name(filename) { &filename[..idx] } else { filename } } /// Removes the extension (anything after the first '.'), /// otherwise returns the original filename. fn strip_extension(filename: &str) -> &str { if let Some(idx) = filename.find('.') { &filename[..idx] } else { filename } } pub const SEGMENT_USE_COMPOUND_YES: u8 = 0x01; pub const SEGMENT_USE_COMPOUND_NO: u8 = 0xff; pub struct SegmentInfo<D: Directory, C: Codec> { pub name: String, pub max_doc: i32, pub directory: Arc<D>, pub is_compound_file: AtomicBool, pub id: [u8; ID_LENGTH], pub codec: Option<Arc<C>>, pub diagnostics: HashMap<String, String>, pub attributes: HashMap<String, String>, pub index_sort: Option<Sort>, pub version: Version, pub set_files: HashSet<String>, } impl<D: Directory, C: Codec> SegmentInfo<D, C> { #[allow(clippy::too_many_arguments)] pub fn new( version: Version, name: &str, max_doc: i32, directory: Arc<D>, is_compound_file: bool, codec: Option<Arc<C>>, diagnostics: HashMap<String, String>, id: [u8; ID_LENGTH], attributes: HashMap<String, String>, index_sort: Option<Sort>, ) -> Result<SegmentInfo<D, C>> { Ok(SegmentInfo { name: String::from(name), max_doc, directory, is_compound_file: AtomicBool::new(is_compound_file), id, version, codec, diagnostics, attributes, set_files: HashSet::new(), index_sort, }) } pub fn set_codec(&mut self, codec: Arc<C>) { self.codec = Some(codec); } pub fn codec(&self) -> &Arc<C> { assert!(self.codec.is_some()); &self.codec.as_ref().unwrap() } pub fn max_doc(&self) -> i32 { debug_assert!(self.max_doc >= 0); self.max_doc } pub fn is_compound_file(&self) -> bool { self.is_compound_file.load(AtomicOrdering::Acquire) } pub fn set_use_compound_file(&self) { self.is_compound_file.store(true, AtomicOrdering::Release) } pub fn get_id(&self) -> &[u8] { &self.id } /// Return all files referenced by this SegmentInfo. pub fn files(&self) -> &HashSet<String> { // debug_assert!(!self.set_files.is_empty()); &self.set_files } pub fn set_files(&mut self, files: &HashSet<String>) -> Result<()> { self.set_files = HashSet::with_capacity(files.len()); self.add_files(files) } pub fn add_file(&mut self, file: &str) -> Result<()> { self.check_file_name(file)?; let file = self.named_for_this_segment(file); self.set_files.insert(file); Ok(()) } pub fn add_files(&mut self, files: &HashSet<String>) -> Result<()> { for f in files { self.check_file_name(f)?; } for f in files { let file = self.named_for_this_segment(&f); self.set_files.insert(file); } Ok(()) } fn check_file_name(&self, file: &str) -> Result<()> { let pattern = Regex::new(CODEC_FILE_PATTERN).unwrap(); if !pattern.is_match(file) { bail!(IllegalArgument("invalid code file_name.".into())); } if file.to_lowercase().ends_with(".tmp") { bail!(IllegalArgument( "invalid code file_name, can't end with .tmp extension".into() )); } Ok(()) } fn named_for_this_segment(&self, file: &str) -> String { let mut name = self.name.clone(); name.push_str(strip_segment_name(file)); name } pub fn index_sort(&self) -> Option<&Sort> { self.index_sort.as_ref() } pub fn set_diagnostics(&mut self, diags: HashMap<String, String>) { self.diagnostics = diags; } pub fn set_max_doc(&mut self, max_doc: i32) -> Result<()> { if self.max_doc != -1 { bail!(IllegalState("max_doc was already set".into())); } self.max_doc = max_doc; Ok(()) } } impl<D: Directory, C: Codec> Clone for SegmentInfo<D, C> { fn clone(&self) -> Self { SegmentInfo { name: self.name.clone(), max_doc: self.max_doc, is_compound_file: AtomicBool::new(self.is_compound_file()), directory: Arc::clone(&self.directory), id: self.id, codec: self.codec.as_ref().map(|c| Arc::clone(c)), diagnostics: self.diagnostics.clone(), attributes: self.attributes.clone(), version: self.version, set_files: self.set_files.clone(), index_sort: self.index_sort.clone(), } } } impl<D: Directory, C: Codec> Hash for SegmentInfo<D, C> { fn hash<H: Hasher>(&self, state: &mut H) { state.write(self.name.as_bytes()); } } impl<D: Directory, C: Codec> Serialize for SegmentInfo<D, C> { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("SegmentInfo", 8)?; s.serialize_field("name", &self.name)?; s.serialize_field("max_doc", &self.max_doc)?; s.serialize_field("is_compound_file", &self.is_compound_file())?; s.serialize_field("id", &self.id)?; // TODO: directory? if self.codec.is_some() { let codec = self.codec.as_ref().unwrap(); s.serialize_field("codec", codec.name())?; } else { s.serialize_field("codec", "uninitialized")?; }; s.serialize_field("diagnostics", &self.diagnostics)?; s.serialize_field("attributes", &self.attributes)?; s.serialize_field("version", &self.version)?; s.end() } } impl<D: Directory, C: Codec> fmt::Debug for SegmentInfo<D, C> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Ok(s) = ::serde_json::to_string_pretty(self) { write!(f, "{}", s)?; } Ok(()) } } /// Embeds a [read-only] SegmentInfo and adds per-commit /// fields. /// @lucene.experimental */ pub struct SegmentCommitInfo<D: Directory, C: Codec> { /// The {@link SegmentInfo} that we wrap. pub info: SegmentInfo<D, C>, /// How many deleted docs in the segment: pub del_count: AtomicI32, /// Generation number of the live docs file (-1 if there /// are no deletes yet): pub del_gen: AtomicI64, /// Normally 1+delGen, unless an exception was hit on last /// attempt to write: pub next_write_del_gen: AtomicI64, /// Generation number of the FieldInfos (-1 if there are no updates) field_infos_gen: AtomicI64, /// Normally 1+fieldInfosGen, unless an exception was hit on last attempt to /// write pub next_write_field_infos_gen: AtomicI64, /// Generation number of the DocValues (-1 if there are no updates) pub doc_values_gen: AtomicI64, /// Normally 1+dvGen, unless an exception was hit on last attempt to /// write pub next_write_doc_values_gen: AtomicI64, /// Track the per-field DocValues update files pub dv_updates_files: HashMap<i32, HashSet<String>>, /// TODO should we add .files() to FieldInfosFormat, like we have on /// LiveDocsFormat? /// track the fieldInfos update files pub field_infos_files: HashSet<String>, pub size_in_bytes: AtomicI64, // NOTE: only used in-RAM by IW to track buffered deletes; // this is never written to/read from the Directory pub buffered_deletes_gen: AtomicI64, } impl<D: Directory, C: Codec> Hash for SegmentCommitInfo<D, C> { fn hash<H: Hasher>(&self, state: &mut H) { self.info.hash(state); } } impl<D: Directory, C: Codec> SegmentCommitInfo<D, C> { pub fn new( info: SegmentInfo<D, C>, del_count: i32, del_gen: i64, field_infos_gen: i64, doc_values_gen: i64, dv_updates_files: HashMap<i32, HashSet<String>>, field_infos_files: HashSet<String>, ) -> SegmentCommitInfo<D, C> { let field_info_gen = if field_infos_gen == -1 { 1 } else { field_infos_gen + 1 }; SegmentCommitInfo { info, del_count: AtomicI32::new(del_count), del_gen: AtomicI64::new(del_gen), next_write_del_gen: AtomicI64::new(if del_gen == -1 { 1i64 } else { del_gen + 1 }), field_infos_gen: AtomicI64::new(field_infos_gen), next_write_field_infos_gen: AtomicI64::new(field_info_gen), doc_values_gen: AtomicI64::new(doc_values_gen), next_write_doc_values_gen: AtomicI64::new(if doc_values_gen == -1 { 1 } else { doc_values_gen + 1 }), dv_updates_files, field_infos_files, size_in_bytes: AtomicI64::new(-1), buffered_deletes_gen: AtomicI64::new(0), } } pub fn files(&self) -> HashSet<String> { let mut files = HashSet::new(); // Start from the wrapped info's files: for f in self.info.files() { files.insert(f.clone()); } // TODO we could rely on TrackingDir.getCreatedFiles() (like we do for // updates) and then maybe even be able to remove LiveDocsFormat.files(). // Must separately add any live docs files: self.info.codec().live_docs_format().files(self, &mut files); // must separately add any field updates files for fs in self.dv_updates_files.values() { for f in fs { files.insert(f.clone()); } } // must separately add field_infos files for f in &self.field_infos_files { files.insert(f.clone()); } files } pub fn has_deletions(&self) -> bool { self.del_gen() != -1 } pub fn del_count(&self) -> i32 { self.del_count.load(AtomicOrdering::Acquire) } pub fn set_del_count(&self, del_count: i32) -> Result<()> { if del_count < 0 || del_count > self.info.max_doc() { bail!(IllegalArgument("invalid del_count".into())); } self.del_count.store(del_count, AtomicOrdering::Release); Ok(()) } pub fn has_field_updates(&self) -> bool { self.field_infos_gen() != -1 } pub fn set_field_infos_files(&mut self, field_infos_files: HashSet<String>) { self.field_infos_files = field_infos_files; } pub fn get_doc_values_updates_files(&self) -> &HashMap<i32, HashSet<String>> { &self.dv_updates_files } pub fn set_doc_values_updates_files( &mut self, dv_updates_files: HashMap<i32, HashSet<String>>, ) { self.dv_updates_files = dv_updates_files; } pub fn field_infos_gen(&self) -> i64 { self.field_infos_gen.load(AtomicOrdering::Acquire) } pub fn next_write_field_infos_gen(&self) -> i64 { self.next_write_field_infos_gen .load(AtomicOrdering::Acquire) } pub fn set_next_write_field_infos_gen(&self, gen: i64) { self.next_write_field_infos_gen .store(gen, AtomicOrdering::Release) } pub fn next_write_doc_values_gen(&self) -> i64 { self.next_write_doc_values_gen.load(AtomicOrdering::Acquire) } pub fn doc_values_gen(&self) -> i64 { self.doc_values_gen.load(AtomicOrdering::Acquire) } pub fn set_next_write_doc_values_gen(&self, gen: i64) { self.next_write_doc_values_gen .store(gen, AtomicOrdering::Release); } pub fn advance_field_infos_gen(&self) { self.field_infos_gen .store(self.next_field_infos_gen(), AtomicOrdering::Release); self.next_write_field_infos_gen .store(self.field_infos_gen() + 1, AtomicOrdering::Release); self.size_in_bytes.store(-1, AtomicOrdering::Release); } pub fn advance_doc_values_gen(&self) { self.doc_values_gen .store(self.next_write_doc_values_gen(), AtomicOrdering::Release); self.next_write_doc_values_gen .store(self.doc_values_gen() + 1, AtomicOrdering::Release); self.size_in_bytes.store(-1, AtomicOrdering::Release); } pub fn next_write_del_gen(&self) -> i64 { self.next_write_del_gen.load(AtomicOrdering::Acquire) } pub fn set_next_write_del_gen(&self, gen: i64) { self.next_write_del_gen.store(gen, AtomicOrdering::Release) } pub fn next_field_infos_gen(&self) -> i64 { self.next_write_field_infos_gen .load(AtomicOrdering::Acquire) } pub fn advance_next_write_del_gen(&self) { self.next_write_del_gen .fetch_add(1, AtomicOrdering::Acquire); } pub fn del_gen(&self) -> i64 { self.del_gen.load(AtomicOrdering::Acquire) } pub fn advance_del_gen(&self) { self.del_gen.store( self.next_write_del_gen.load(AtomicOrdering::Acquire), AtomicOrdering::Release, ); self.next_write_del_gen.fetch_add(1, AtomicOrdering::AcqRel); self.size_in_bytes.store(-1, AtomicOrdering::Release); } pub fn size_in_bytes(&self) -> i64 { let mut size = self.size_in_bytes.load(AtomicOrdering::Acquire); if size == -1 { let mut sum = 0; for name in self.files() { match self.info.directory.file_length(&name) { Ok(l) => { sum += l; } Err(e) => { warn!("get file '{}' length failed by '{:?}'", name, e); } } } size = sum; self.size_in_bytes.store(size, AtomicOrdering::Release); } size } pub fn buffered_deletes_gen(&self) -> i64 { self.buffered_deletes_gen.load(AtomicOrdering::Acquire) } pub fn set_buffered_deletes_gen(&self, v: i64) { self.buffered_deletes_gen.store(v, AtomicOrdering::Release); self.size_in_bytes.store(-1, AtomicOrdering::Release); } } impl<D: Directory, C: Codec> Clone for SegmentCommitInfo<D, C> { fn clone(&self) -> Self { let infos = SegmentCommitInfo::new( self.info.clone(), self.del_count(), self.del_gen(), self.field_infos_gen(), self.doc_values_gen(), self.dv_updates_files.clone(), self.field_infos_files.clone(), ); // Not clear that we need to carry over nextWriteDelGen // (i.e. do we ever clone after a failed write and // before the next successful write?), but just do it to // be safe: infos .next_write_del_gen .store(self.next_write_del_gen(), AtomicOrdering::Release); infos .next_write_field_infos_gen .store(self.next_write_field_infos_gen(), AtomicOrdering::Release); infos.set_next_write_doc_values_gen(self.next_write_doc_values_gen()); infos } } impl<D: Directory, C: Codec> Eq for SegmentCommitInfo<D, C> {} // WARN: only compare the segment name, maybe we should compare the raw pointer or the full struct? impl<D: Directory, C: Codec> PartialEq for SegmentCommitInfo<D, C> { fn eq(&self, other: &SegmentCommitInfo<D, C>) -> bool { self.info.name.eq(&other.info.name) } } impl<D: Directory, C: Codec> Serialize for SegmentCommitInfo<D, C> { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer, { let mut s = serializer.serialize_struct("SegmentCommitInfo", 11)?; s.serialize_field("info", &self.info)?; s.serialize_field("del_count", &self.del_count())?; s.serialize_field("del_gen", &self.del_gen())?; s.serialize_field("next_write_del_gen", &self.next_write_del_gen())?; s.serialize_field("field_infos_gen", &self.field_infos_gen())?; s.serialize_field( "next_write_field_infos_gen", &self .next_write_field_infos_gen .load(AtomicOrdering::Acquire), )?; s.serialize_field("doc_values_gen", &self.doc_values_gen())?; s.serialize_field( "next_write_doc_values_gen", &self.next_write_doc_values_gen(), )?; s.serialize_field("dv_updates_files", &self.dv_updates_files)?; s.serialize_field("field_infos_files", &self.field_infos_files)?; s.serialize_field("size_in_bytes", &self.size_in_bytes())?; s.end() } } impl<D: Directory, C: Codec> fmt::Display for SegmentCommitInfo<D, C> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Ok(s) = ::serde_json::to_string_pretty(self) { write!(f, "{}", s)?; } Ok(()) } } impl<D: Directory, C: Codec> fmt::Debug for SegmentCommitInfo<D, C> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if let Ok(s) = ::serde_json::to_string_pretty(self) { write!(f, "{}", s)?; } Ok(()) } } /// Holder class for common parameters used during write. /// @lucene.experimental pub struct SegmentWriteState<D: Directory, DW: Directory, C: Codec> { /// {@link InfoStream} used for debugging messages. */ // info_stream: InfoStream, /// {@link Directory} where this segment will be written /// to. pub directory: Arc<DW>, /// {@link SegmentInfo} describing this segment. */ pub segment_info: SegmentInfo<D, C>, /// {@link FieldInfos} describing all fields in this /// segment. */ pub field_infos: FieldInfos, /// Number of deleted documents set while flushing the /// segment. */ pub del_count_on_flush: u32, /// Deletes and updates to apply while we are flushing the segment. A Term is /// enrolled in here if it was deleted/updated at one point, and it's mapped to /// the docIDUpto, meaning any docID &lt; docIDUpto containing this term should /// be deleted/updated. pub seg_updates: Option<*const BufferedUpdates<C>>, /// {@link MutableBits} recording live documents; this is /// only set if there is one or more deleted documents. */ pub live_docs: FixedBitSet, /// Unique suffix for any postings files written for this /// segment. {@link PerFieldPostingsFormat} sets this for /// each of the postings formats it wraps. If you create /// a new {@link PostingsFormat} then any files you /// write/read must be derived using this suffix (use /// {@link IndexFileNames#segmentFileName(String,String,String)}). /// /// Note: the suffix must be either empty, or be a textual suffix contain exactly two parts /// (separated by underscore), or be a base36 generation. */ pub segment_suffix: String, /// {@link IOContext} for all writes; you should pass this /// to {@link Directory#createOutput(String,IOContext)}. */ pub context: IOContext, } impl<D: Directory, DW: Directory, C: Codec> SegmentWriteState<D, DW, C> { pub fn new( directory: Arc<DW>, segment_info: SegmentInfo<D, C>, field_infos: FieldInfos, seg_updates: Option<*const BufferedUpdates<C>>, context: IOContext, segment_suffix: String, ) -> Self { debug_assert!(Self::assert_segment_suffix(&segment_suffix)); SegmentWriteState { directory, segment_info, field_infos, del_count_on_flush: 0, seg_updates, live_docs: FixedBitSet::default(), segment_suffix, context, } } pub fn seg_updates(&self) -> &BufferedUpdates<C> { debug_assert!(self.seg_updates.is_some()); unsafe { &*self.seg_updates.unwrap() } } // currently only used by assert? clean up and make real check? // either it's a segment suffix (_X_Y) or it's a parseable generation // TODO: this is very confusing how ReadersAndUpdates passes generations via // this mechanism, maybe add 'generation' explicitly to ctor create the 'actual suffix' here? fn assert_segment_suffix(segment_suffix: &str) -> bool { if !segment_suffix.is_empty() { let parts: Vec<&str> = segment_suffix.split('_').collect(); if parts.len() == 2 { true } else if parts.len() == 1 { i64::from_str_radix(segment_suffix, 36).is_ok() } else { false // invalid } } else { true } } } impl<D: Directory, DW: Directory, C: Codec> Clone for SegmentWriteState<D, DW, C> { fn clone(&self) -> Self { SegmentWriteState { directory: Arc::clone(&self.directory), segment_info: self.segment_info.clone(), field_infos: self.field_infos.clone(), del_count_on_flush: self.del_count_on_flush, seg_updates: None, // no used live_docs: FixedBitSet::default(), // TODO, fake clone segment_suffix: self.segment_suffix.clone(), context: self.context, } } } /// Holder class for common parameters used during read. /// @lucene.experimental pub struct SegmentReadState<'a, D: Directory, DW: Directory, C: Codec> { /// {@link Directory} where this segment is read from. pub directory: Arc<DW>, /// {@link SegmentInfo} describing this segment. pub segment_info: &'a SegmentInfo<D, C>, /// {@link FieldInfos} describing all fields in this /// segment. */ pub field_infos: Arc<FieldInfos>, /// {@link IOContext} to pass to {@link /// Directory#openInput(String,IOContext)}. pub context: &'a IOContext, /// Unique suffix for any postings files read for this /// segment. {@link PerFieldPostingsFormat} sets this for /// each of the postings formats it wraps. If you create /// a new {@link PostingsFormat} then any files you /// write/read must be derived using this suffix (use /// {@link IndexFileNames#segmentFileName(String,String,String)}). pub segment_suffix: String, } impl<'a, D: Directory, DW: Directory, C: Codec> SegmentReadState<'a, D, DW, C> { pub fn new( directory: Arc<DW>, segment_info: &'a SegmentInfo<D, C>, field_infos: Arc<FieldInfos>, context: &'a IOContext, segment_suffix: String, ) -> SegmentReadState<'a, D, DW, C> { SegmentReadState { directory, segment_info, field_infos, context, segment_suffix, } } pub fn with_suffix( state: &'a SegmentReadState<D, DW, C>, suffix: &str, ) -> SegmentReadState<'a, D, DW, C> { Self::new( state.directory.clone(), state.segment_info, state.field_infos.clone(), state.context, String::from(suffix), ) } }
32.408066
99
0.602562
87e281c7eacda280ebd8131344322bb0cd40d1ce
12,180
pub trait Output { fn cur(&self) -> &vt100::Parser; fn cur_mut(&mut self) -> &mut vt100::Parser; fn next(&self) -> &vt100::Parser; fn next_mut(&mut self) -> &mut vt100::Parser; fn write_u16(&mut self, i: u16) { let mut itoa_buf = itoa::Buffer::new(); self.next_mut().process(itoa_buf.format(i).as_bytes()); } fn write_u8(&mut self, i: u8) { let mut itoa_buf = itoa::Buffer::new(); self.next_mut().process(itoa_buf.format(i).as_bytes()); } } pub trait Input { fn buf(&self) -> &[u8]; fn buf_mut(&mut self) -> &mut [u8]; fn buf_mut_vec(&mut self) -> &mut Vec<u8>; fn consume(&mut self, n: usize); fn unconsume(&mut self, n: usize); fn buf_is_empty(&self) -> bool; fn buf_at_beginning(&self) -> bool; fn should_parse_utf8(&self) -> bool; fn should_parse_ctrl(&self) -> bool; fn should_parse_meta(&self) -> bool; fn should_parse_special_keys(&self) -> bool; fn should_parse_single(&self) -> bool; fn try_read_string(&mut self) -> Option<crate::Key> { if !self.should_parse_utf8() { return None; } let prefix: Vec<_> = self .buf() .iter() .copied() .take_while(|&c| matches!(c, 32..=126 | 128..=247)) .collect(); if !prefix.is_empty() { match std::string::String::from_utf8_lossy(&prefix) { std::borrow::Cow::Borrowed(s) => { self.consume(s.len()); return Some(crate::Key::String(s.to_string())); } std::borrow::Cow::Owned(mut s) => { for (i, window) in s.as_bytes().windows(3).enumerate() { if window == [0xef, 0xbf, 0xbd] { if i > 0 { self.consume(i); s.truncate(i); return Some(crate::Key::String(s)); } // not quite correct, but figuring out how to // take only the invalid utf8 seems hard (and // this should come up very rarely) self.consume(prefix.len()); return Some(crate::Key::Bytes(prefix)); } } self.consume(s.len()); return Some(crate::Key::String(s)); } } } None } fn try_read_bytes(&mut self) -> Option<crate::Key> { #[allow(clippy::match_same_arms)] let prefix: Vec<_> = self .buf() .iter() .copied() .take_while(|&c| match c { 0 => true, 1..=26 => !self.should_parse_ctrl(), 27 => { !self.should_parse_meta() && !self.should_parse_special_keys() } 28..=31 => true, 32..=126 => !self.should_parse_utf8(), 127 => !self.should_parse_special_keys(), 128..=247 => !self.should_parse_utf8(), 248..=255 => true, }) .collect(); if !prefix.is_empty() { self.consume(prefix.len()); return Some(crate::Key::Bytes(prefix)); } None } fn normalize_to_bytes(&self, key: crate::Key) -> crate::Key { if let crate::Key::Byte(c) = key { crate::Key::Bytes(vec![c]) } else { key } } fn read_single_key(&mut self) -> Option<crate::Key> { match self.getc() { Some(0) => Some(crate::Key::Byte(0)), Some(c @ 1..=26) => { if self.should_parse_ctrl() { Some(crate::Key::Ctrl(b'a' + c - 1)) } else { Some(crate::Key::Byte(c)) } } Some(27) => { if self.should_parse_meta() || self.should_parse_special_keys() { self.read_escape_sequence() } else { Some(crate::Key::Byte(27)) } } Some(c @ 28..=31) => Some(crate::Key::Byte(c)), Some(c @ 32..=126) => { if self.should_parse_utf8() { Some(crate::Key::Char(char::from(c))) } else { Some(crate::Key::Byte(c)) } } Some(127) => { if self.should_parse_special_keys() { Some(crate::Key::Backspace) } else { Some(crate::Key::Byte(127)) } } Some(c @ 128..=255) => { if self.should_parse_utf8() { self.read_utf8_char(c) } else { Some(crate::Key::Byte(c)) } } None => None, } } fn read_escape_sequence(&mut self) -> Option<crate::Key> { enum EscapeState { Escape, Csi(Vec<u8>), Ckm, } let mut seen = vec![b'\x1b']; macro_rules! fail { () => {{ for &c in seen.iter().skip(1).rev() { self.ungetc(c); } if self.should_parse_special_keys() { return Some(crate::Key::Escape); } return Some(crate::Key::Byte(27)); }}; } macro_rules! next_byte { () => { match self.getc() { Some(c) => c, None => { fail!() } } }; } let mut state = EscapeState::Escape; loop { let c = next_byte!(); seen.push(c); match state { EscapeState::Escape => match c { b'[' => { if self.should_parse_special_keys() { state = EscapeState::Csi(vec![]); } else { fail!() } } b'O' => { if self.should_parse_special_keys() { state = EscapeState::Ckm; } else { fail!() } } b' '..=b'N' | b'P'..=b'Z' | b'\\'..=b'~' => { if self.should_parse_meta() { return Some(crate::Key::Meta(c)); } fail!() } _ => fail!(), }, EscapeState::Csi(ref mut param) => match c { b'A' => return Some(crate::Key::Up), b'B' => return Some(crate::Key::Down), b'C' => return Some(crate::Key::Right), b'D' => return Some(crate::Key::Left), b'H' => return Some(crate::Key::Home), b'F' => return Some(crate::Key::End), b'0'..=b'9' => param.push(c), b'~' => match param.as_slice() { [b'2'] => return Some(crate::Key::Insert), [b'3'] => return Some(crate::Key::Delete), [b'5'] => return Some(crate::Key::PageUp), [b'6'] => return Some(crate::Key::PageDown), [b'1', b'5'] => return Some(crate::Key::F(5)), [b'1', b'7'] => return Some(crate::Key::F(6)), [b'1', b'8'] => return Some(crate::Key::F(7)), [b'1', b'9'] => return Some(crate::Key::F(8)), [b'2', b'0'] => return Some(crate::Key::F(9)), [b'2', b'1'] => return Some(crate::Key::F(10)), [b'2', b'3'] => return Some(crate::Key::F(11)), [b'2', b'4'] => return Some(crate::Key::F(12)), [b'2', b'5'] => return Some(crate::Key::F(13)), [b'2', b'6'] => return Some(crate::Key::F(14)), [b'2', b'8'] => return Some(crate::Key::F(15)), [b'2', b'9'] => return Some(crate::Key::F(16)), [b'3', b'1'] => return Some(crate::Key::F(17)), [b'3', b'2'] => return Some(crate::Key::F(18)), [b'3', b'3'] => return Some(crate::Key::F(19)), [b'3', b'4'] => return Some(crate::Key::F(20)), _ => fail!(), }, _ => fail!(), }, EscapeState::Ckm => match c { b'A' => return Some(crate::Key::KeypadUp), b'B' => return Some(crate::Key::KeypadDown), b'C' => return Some(crate::Key::KeypadRight), b'D' => return Some(crate::Key::KeypadLeft), b'P' => return Some(crate::Key::F(1)), b'Q' => return Some(crate::Key::F(2)), b'R' => return Some(crate::Key::F(3)), b'S' => return Some(crate::Key::F(4)), _ => fail!(), }, } } } fn read_utf8_char(&mut self, initial: u8) -> Option<crate::Key> { let mut buf = vec![initial]; macro_rules! fail { () => {{ for &c in buf.iter().skip(1).rev() { self.ungetc(c); } return Some(crate::Key::Byte(initial)); }}; } macro_rules! next_byte { () => { match self.getc() { Some(c) => { if (0b1000_0000..=0b1011_1111).contains(&c) { c } else { self.ungetc(c); fail!() } } None => return None, } }; } match initial { 0b0000_0000..=0b0111_1111 => {} 0b1100_0000..=0b1101_1111 => { buf.push(next_byte!()); } 0b1110_0000..=0b1110_1111 => { buf.push(next_byte!()); buf.push(next_byte!()); } 0b1111_0000..=0b1111_0111 => { buf.push(next_byte!()); buf.push(next_byte!()); buf.push(next_byte!()); } _ => fail!(), } match std::string::String::from_utf8(buf) { Ok(s) => Some(crate::Key::Char( // buf always contains at least the initial character, and we // have already done the parsing to ensure that it contains a // valid utf8 character before getting here s.chars().next().unwrap(), )), Err(e) => { buf = e.into_bytes(); fail!() } } } fn getc(&mut self) -> Option<u8> { self.buf().get(0).copied().map(|c| { self.consume(1); c }) } fn ungetc(&mut self, c: u8) { if self.buf_at_beginning() { self.buf_mut_vec().insert(0, c); } else { self.unconsume(1); self.buf_mut()[0] = c; } } #[allow(clippy::match_same_arms)] fn expected_leading_utf8_bytes(&self, c: u8) -> usize { match c { 0b0000_0000..=0b0111_1111 => 1, 0b1100_0000..=0b1101_1111 => 2, 0b1110_0000..=0b1110_1111 => 3, 0b1111_0000..=0b1111_0111 => 4, _ => 1, } } }
35.100865
77
0.380706
ff85f22f67986fefb8768bb0b4c5d5f18609dce5
13,006
// Bitcoin Dev Kit // Written in 2020 by Alekos Filini <[email protected]> // // Copyright (c) 2020-2021 Bitcoin Dev Kit Developers // // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option. // You may not use this file except in accordance with one or both of these // licenses. //! Esplora //! //! This module defines a [`Blockchain`] struct that can query an Esplora backend //! populate the wallet's [database](crate::database::Database) by //! //! ## Example //! //! ```no_run //! # use bdk::blockchain::esplora::EsploraBlockchain; //! let blockchain = EsploraBlockchain::new("https://blockstream.info/testnet/api", None); //! # Ok::<(), bdk::Error>(()) //! ``` use std::collections::{HashMap, HashSet}; use std::fmt; use futures::stream::{self, FuturesOrdered, StreamExt, TryStreamExt}; #[allow(unused_imports)] use log::{debug, error, info, trace}; use serde::Deserialize; use reqwest::{Client, StatusCode}; use bitcoin::consensus::{self, deserialize, serialize}; use bitcoin::hashes::hex::{FromHex, ToHex}; use bitcoin::hashes::{sha256, Hash}; use bitcoin::{BlockHash, BlockHeader, Script, Transaction, Txid}; use self::utils::{ElectrumLikeSync, ElsGetHistoryRes}; use super::*; use crate::database::BatchDatabase; use crate::error::Error; use crate::wallet::utils::ChunksIterator; use crate::FeeRate; const DEFAULT_CONCURRENT_REQUESTS: u8 = 4; #[derive(Debug)] struct UrlClient { url: String, // We use the async client instead of the blocking one because it automatically uses `fetch` // when the target platform is wasm32. client: Client, concurrency: u8, } /// Structure that implements the logic to sync with Esplora /// /// ## Example /// See the [`blockchain::esplora`](crate::blockchain::esplora) module for a usage example. #[derive(Debug)] pub struct EsploraBlockchain(UrlClient); impl std::convert::From<UrlClient> for EsploraBlockchain { fn from(url_client: UrlClient) -> Self { EsploraBlockchain(url_client) } } impl EsploraBlockchain { /// Create a new instance of the client from a base URL pub fn new(base_url: &str, concurrency: Option<u8>) -> Self { EsploraBlockchain(UrlClient { url: base_url.to_string(), client: Client::new(), concurrency: concurrency.unwrap_or(DEFAULT_CONCURRENT_REQUESTS), }) } } #[maybe_async] impl Blockchain for EsploraBlockchain { fn get_capabilities(&self) -> HashSet<Capability> { vec![ Capability::FullHistory, Capability::GetAnyTx, Capability::AccurateFees, ] .into_iter() .collect() } fn setup<D: BatchDatabase, P: Progress>( &self, stop_gap: Option<usize>, database: &mut D, progress_update: P, ) -> Result<(), Error> { maybe_await!(self .0 .electrum_like_setup(stop_gap, database, progress_update)) } fn get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, Error> { Ok(await_or_block!(self.0._get_tx(txid))?) } fn broadcast(&self, tx: &Transaction) -> Result<(), Error> { Ok(await_or_block!(self.0._broadcast(tx))?) } fn get_height(&self) -> Result<u32, Error> { Ok(await_or_block!(self.0._get_height())?) } fn estimate_fee(&self, target: usize) -> Result<FeeRate, Error> { let estimates = await_or_block!(self.0._get_fee_estimates())?; let fee_val = estimates .into_iter() .map(|(k, v)| Ok::<_, std::num::ParseIntError>((k.parse::<usize>()?, v))) .collect::<Result<Vec<_>, _>>() .map_err(|e| Error::Generic(e.to_string()))? .into_iter() .take_while(|(k, _)| k <= &target) .map(|(_, v)| v) .last() .unwrap_or(1.0); Ok(FeeRate::from_sat_per_vb(fee_val as f32)) } } impl UrlClient { fn script_to_scripthash(script: &Script) -> String { sha256::Hash::hash(script.as_bytes()).into_inner().to_hex() } async fn _get_tx(&self, txid: &Txid) -> Result<Option<Transaction>, EsploraError> { let resp = self .client .get(&format!("{}/tx/{}/raw", self.url, txid)) .send() .await?; if let StatusCode::NOT_FOUND = resp.status() { return Ok(None); } Ok(Some(deserialize(&resp.error_for_status()?.bytes().await?)?)) } async fn _get_tx_no_opt(&self, txid: &Txid) -> Result<Transaction, EsploraError> { match self._get_tx(txid).await { Ok(Some(tx)) => Ok(tx), Ok(None) => Err(EsploraError::TransactionNotFound(*txid)), Err(e) => Err(e), } } async fn _get_header(&self, block_height: u32) -> Result<BlockHeader, EsploraError> { let resp = self .client .get(&format!("{}/block-height/{}", self.url, block_height)) .send() .await?; if let StatusCode::NOT_FOUND = resp.status() { return Err(EsploraError::HeaderHeightNotFound(block_height)); } let bytes = resp.bytes().await?; let hash = std::str::from_utf8(&bytes) .map_err(|_| EsploraError::HeaderHeightNotFound(block_height))?; let resp = self .client .get(&format!("{}/block/{}/header", self.url, hash)) .send() .await?; let header = deserialize(&Vec::from_hex(&resp.text().await?)?)?; Ok(header) } async fn _broadcast(&self, transaction: &Transaction) -> Result<(), EsploraError> { self.client .post(&format!("{}/tx", self.url)) .body(serialize(transaction).to_hex()) .send() .await? .error_for_status()?; Ok(()) } async fn _get_height(&self) -> Result<u32, EsploraError> { let req = self .client .get(&format!("{}/blocks/tip/height", self.url)) .send() .await?; Ok(req.error_for_status()?.text().await?.parse()?) } async fn _script_get_history( &self, script: &Script, ) -> Result<Vec<ElsGetHistoryRes>, EsploraError> { let mut result = Vec::new(); let scripthash = Self::script_to_scripthash(script); // Add the unconfirmed transactions first result.extend( self.client .get(&format!( "{}/scripthash/{}/txs/mempool", self.url, scripthash )) .send() .await? .error_for_status()? .json::<Vec<EsploraGetHistory>>() .await? .into_iter() .map(|x| ElsGetHistoryRes { tx_hash: x.txid, height: x.status.block_height.unwrap_or(0) as i32, }), ); debug!( "Found {} mempool txs for {} - {:?}", result.len(), scripthash, script ); // Then go through all the pages of confirmed transactions let mut last_txid = String::new(); loop { let response = self .client .get(&format!( "{}/scripthash/{}/txs/chain/{}", self.url, scripthash, last_txid )) .send() .await? .error_for_status()? .json::<Vec<EsploraGetHistory>>() .await?; let len = response.len(); if let Some(elem) = response.last() { last_txid = elem.txid.to_hex(); } debug!("... adding {} confirmed transactions", len); result.extend(response.into_iter().map(|x| ElsGetHistoryRes { tx_hash: x.txid, height: x.status.block_height.unwrap_or(0) as i32, })); if len < 25 { break; } } Ok(result) } async fn _get_fee_estimates(&self) -> Result<HashMap<String, f64>, EsploraError> { Ok(self .client .get(&format!("{}/fee-estimates", self.url,)) .send() .await? .error_for_status()? .json::<HashMap<String, f64>>() .await?) } } #[maybe_async] impl ElectrumLikeSync for UrlClient { fn els_batch_script_get_history<'s, I: IntoIterator<Item = &'s Script>>( &self, scripts: I, ) -> Result<Vec<Vec<ElsGetHistoryRes>>, Error> { let future = async { let mut results = vec![]; for chunk in ChunksIterator::new(scripts.into_iter(), self.concurrency as usize) { let mut futs = FuturesOrdered::new(); for script in chunk { futs.push(self._script_get_history(&script)); } let partial_results: Vec<Vec<ElsGetHistoryRes>> = futs.try_collect().await?; results.extend(partial_results); } Ok(stream::iter(results).collect().await) }; await_or_block!(future) } fn els_batch_transaction_get<'s, I: IntoIterator<Item = &'s Txid>>( &self, txids: I, ) -> Result<Vec<Transaction>, Error> { let future = async { let mut results = vec![]; for chunk in ChunksIterator::new(txids.into_iter(), self.concurrency as usize) { let mut futs = FuturesOrdered::new(); for txid in chunk { futs.push(self._get_tx_no_opt(&txid)); } let partial_results: Vec<Transaction> = futs.try_collect().await?; results.extend(partial_results); } Ok(stream::iter(results).collect().await) }; await_or_block!(future) } fn els_batch_block_header<I: IntoIterator<Item = u32>>( &self, heights: I, ) -> Result<Vec<BlockHeader>, Error> { let future = async { let mut results = vec![]; for chunk in ChunksIterator::new(heights.into_iter(), self.concurrency as usize) { let mut futs = FuturesOrdered::new(); for height in chunk { futs.push(self._get_header(height)); } let partial_results: Vec<BlockHeader> = futs.try_collect().await?; results.extend(partial_results); } Ok(stream::iter(results).collect().await) }; await_or_block!(future) } } #[derive(Deserialize)] struct EsploraGetHistoryStatus { block_height: Option<usize>, } #[derive(Deserialize)] struct EsploraGetHistory { txid: Txid, status: EsploraGetHistoryStatus, } /// Configuration for an [`EsploraBlockchain`] #[derive(Debug, serde::Deserialize, serde::Serialize, Clone, PartialEq)] pub struct EsploraBlockchainConfig { /// Base URL of the esplora service /// /// eg. `https://blockstream.info/api/` pub base_url: String, /// Number of parallel requests sent to the esplora service (default: 4) pub concurrency: Option<u8>, } impl ConfigurableBlockchain for EsploraBlockchain { type Config = EsploraBlockchainConfig; fn from_config(config: &Self::Config) -> Result<Self, Error> { Ok(EsploraBlockchain::new( config.base_url.as_str(), config.concurrency, )) } } /// Errors that can happen during a sync with [`EsploraBlockchain`] #[derive(Debug)] pub enum EsploraError { /// Error with the HTTP call Reqwest(reqwest::Error), /// Invalid number returned Parsing(std::num::ParseIntError), /// Invalid Bitcoin data returned BitcoinEncoding(bitcoin::consensus::encode::Error), /// Invalid Hex data returned Hex(bitcoin::hashes::hex::Error), /// Transaction not found TransactionNotFound(Txid), /// Header height not found HeaderHeightNotFound(u32), /// Header hash not found HeaderHashNotFound(BlockHash), } impl fmt::Display for EsploraError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self) } } impl std::error::Error for EsploraError {} impl_error!(reqwest::Error, Reqwest, EsploraError); impl_error!(std::num::ParseIntError, Parsing, EsploraError); impl_error!(consensus::encode::Error, BitcoinEncoding, EsploraError); impl_error!(bitcoin::hashes::hex::Error, Hex, EsploraError); #[cfg(feature = "test-blockchains")] crate::bdk_blockchain_tests! { fn test_instance() -> EsploraBlockchain { EsploraBlockchain::new(std::env::var("BDK_ESPLORA_URL").unwrap_or("127.0.0.1:3002".into()).as_str(), None) } }
30.674528
114
0.570967
f57986a985cdea63face6a3d361ba95a0abd0663
20,875
//! Orphan checker: every impl either implements a trait defined in this //! crate or pertains to a type defined in this crate. use rustc_data_structures::fx::FxHashSet; use rustc_errors::struct_span_err; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; use rustc_index::bit_set::GrowableBitSet; use rustc_infer::infer::TyCtxtInferExt; use rustc_middle::ty::subst::GenericArgKind; use rustc_middle::ty::subst::{GenericArg, InternalSubsts}; use rustc_middle::ty::{self, ImplPolarity, Ty, TyCtxt, TypeFoldable, TypeVisitor}; use rustc_session::lint; use rustc_span::def_id::{DefId, LocalDefId}; use rustc_span::Span; use rustc_trait_selection::traits; use std::ops::ControlFlow; pub(super) fn orphan_check_crate(tcx: TyCtxt<'_>, (): ()) -> &[LocalDefId] { let mut errors = Vec::new(); for (&trait_def_id, impls_of_trait) in tcx.all_local_trait_impls(()) { for &impl_of_trait in impls_of_trait { match orphan_check_impl(tcx, impl_of_trait) { Ok(()) => {} Err(_) => errors.push(impl_of_trait), } } if tcx.trait_is_auto(trait_def_id) { lint_auto_trait_impls(tcx, trait_def_id, impls_of_trait); } } tcx.arena.alloc_slice(&errors) } #[instrument(skip(tcx), level = "debug")] fn orphan_check_impl(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), ErrorGuaranteed> { let trait_ref = tcx.impl_trait_ref(def_id).unwrap(); let trait_def_id = trait_ref.def_id; let item = tcx.hir().item(hir::ItemId { def_id }); let hir::ItemKind::Impl(ref impl_) = item.kind else { bug!("{:?} is not an impl: {:?}", def_id, item); }; let sp = tcx.sess.source_map().guess_head_span(item.span); let tr = impl_.of_trait.as_ref().unwrap(); // Ensure no opaque types are present in this impl header. See issues #76202 and #86411 for examples, // and #84660 where it would otherwise allow unsoundness. if trait_ref.has_opaque_types() { trace!("{:#?}", item); // First we find the opaque type in question. for ty in trait_ref.substs { for ty in ty.walk() { let ty::subst::GenericArgKind::Type(ty) = ty.unpack() else { continue }; let ty::Opaque(def_id, _) = *ty.kind() else { continue }; trace!(?def_id); // Then we search for mentions of the opaque type's type alias in the HIR struct SpanFinder<'tcx> { sp: Span, def_id: DefId, tcx: TyCtxt<'tcx>, } impl<'v, 'tcx> hir::intravisit::Visitor<'v> for SpanFinder<'tcx> { #[instrument(level = "trace", skip(self, _id))] fn visit_path(&mut self, path: &'v hir::Path<'v>, _id: hir::HirId) { // You can't mention an opaque type directly, so we look for type aliases if let hir::def::Res::Def(hir::def::DefKind::TyAlias, def_id) = path.res { // And check if that type alias's type contains the opaque type we're looking for for arg in self.tcx.type_of(def_id).walk() { if let GenericArgKind::Type(ty) = arg.unpack() { if let ty::Opaque(def_id, _) = *ty.kind() { if def_id == self.def_id { // Finally we update the span to the mention of the type alias self.sp = path.span; return; } } } } } hir::intravisit::walk_path(self, path) } } let mut visitor = SpanFinder { sp, def_id, tcx }; hir::intravisit::walk_item(&mut visitor, item); let reported = tcx .sess .struct_span_err(visitor.sp, "cannot implement trait on type alias impl trait") .span_note(tcx.def_span(def_id), "type alias impl trait defined here") .emit(); return Err(reported); } } span_bug!(sp, "opaque type not found, but `has_opaque_types` is set") } match traits::orphan_check(tcx, item.def_id.to_def_id()) { Ok(()) => {} Err(err) => emit_orphan_check_error( tcx, sp, tr.path.span, trait_ref.self_ty(), impl_.self_ty.span, &impl_.generics, err, )?, } // In addition to the above rules, we restrict impls of auto traits // so that they can only be implemented on nominal types, such as structs, // enums or foreign types. To see why this restriction exists, consider the // following example (#22978). Imagine that crate A defines an auto trait // `Foo` and a fn that operates on pairs of types: // // ``` // // Crate A // auto trait Foo { } // fn two_foos<A:Foo,B:Foo>(..) { // one_foo::<(A,B)>(..) // } // fn one_foo<T:Foo>(..) { .. } // ``` // // This type-checks fine; in particular the fn // `two_foos` is able to conclude that `(A,B):Foo` // because `A:Foo` and `B:Foo`. // // Now imagine that crate B comes along and does the following: // // ``` // struct A { } // struct B { } // impl Foo for A { } // impl Foo for B { } // impl !Send for (A, B) { } // ``` // // This final impl is legal according to the orphan // rules, but it invalidates the reasoning from // `two_foos` above. debug!( "trait_ref={:?} trait_def_id={:?} trait_is_auto={}", trait_ref, trait_def_id, tcx.trait_is_auto(trait_def_id) ); if tcx.trait_is_auto(trait_def_id) && !trait_def_id.is_local() { let self_ty = trait_ref.self_ty(); let opt_self_def_id = match *self_ty.kind() { ty::Adt(self_def, _) => Some(self_def.did()), ty::Foreign(did) => Some(did), _ => None, }; let msg = match opt_self_def_id { // We only want to permit nominal types, but not *all* nominal types. // They must be local to the current crate, so that people // can't do `unsafe impl Send for Rc<SomethingLocal>` or // `impl !Send for Box<SomethingLocalAndSend>`. Some(self_def_id) => { if self_def_id.is_local() { None } else { Some(( format!( "cross-crate traits with a default impl, like `{}`, \ can only be implemented for a struct/enum type \ defined in the current crate", tcx.def_path_str(trait_def_id) ), "can't implement cross-crate trait for type in another crate", )) } } _ => Some(( format!( "cross-crate traits with a default impl, like `{}`, can \ only be implemented for a struct/enum type, not `{}`", tcx.def_path_str(trait_def_id), self_ty ), "can't implement cross-crate trait with a default impl for \ non-struct/enum type", )), }; if let Some((msg, label)) = msg { let reported = struct_span_err!(tcx.sess, sp, E0321, "{}", msg).span_label(sp, label).emit(); return Err(reported); } } Ok(()) } fn emit_orphan_check_error<'tcx>( tcx: TyCtxt<'tcx>, sp: Span, trait_span: Span, self_ty: Ty<'tcx>, self_ty_span: Span, generics: &hir::Generics<'tcx>, err: traits::OrphanCheckErr<'tcx>, ) -> Result<!, ErrorGuaranteed> { Err(match err { traits::OrphanCheckErr::NonLocalInputType(tys) => { let msg = match self_ty.kind() { ty::Adt(..) => "can be implemented for types defined outside of the crate", _ if self_ty.is_primitive() => "can be implemented for primitive types", _ => "can be implemented for arbitrary types", }; let mut err = struct_span_err!( tcx.sess, sp, E0117, "only traits defined in the current crate {msg}" ); err.span_label(sp, "impl doesn't use only types from inside the current crate"); for (ty, is_target_ty) in &tys { let mut ty = *ty; tcx.infer_ctxt().enter(|infcx| { // Remove the lifetimes unnecessary for this error. ty = infcx.freshen(ty); }); ty = match ty.kind() { // Remove the type arguments from the output, as they are not relevant. // You can think of this as the reverse of `resolve_vars_if_possible`. // That way if we had `Vec<MyType>`, we will properly attribute the // problem to `Vec<T>` and avoid confusing the user if they were to see // `MyType` in the error. ty::Adt(def, _) => tcx.mk_adt(*def, ty::List::empty()), _ => ty, }; let this = "this".to_string(); let (ty, postfix) = match &ty.kind() { ty::Slice(_) => (this, " because slices are always foreign"), ty::Array(..) => (this, " because arrays are always foreign"), ty::Tuple(..) => (this, " because tuples are always foreign"), _ => (format!("`{}`", ty), ""), }; let msg = format!("{} is not defined in the current crate{}", ty, postfix); if *is_target_ty { // Point at `D<A>` in `impl<A, B> for C<B> in D<A>` err.span_label(self_ty_span, &msg); } else { // Point at `C<B>` in `impl<A, B> for C<B> in D<A>` err.span_label(trait_span, &msg); } } err.note("define and implement a trait or new type instead"); err.emit() } traits::OrphanCheckErr::UncoveredTy(param_ty, local_type) => { let mut sp = sp; for param in generics.params { if param.name.ident().to_string() == param_ty.to_string() { sp = param.span; } } match local_type { Some(local_type) => struct_span_err!( tcx.sess, sp, E0210, "type parameter `{}` must be covered by another type \ when it appears before the first local type (`{}`)", param_ty, local_type ) .span_label( sp, format!( "type parameter `{}` must be covered by another type \ when it appears before the first local type (`{}`)", param_ty, local_type ), ) .note( "implementing a foreign trait is only possible if at \ least one of the types for which it is implemented is local, \ and no uncovered type parameters appear before that first \ local type", ) .note( "in this case, 'before' refers to the following order: \ `impl<..> ForeignTrait<T1, ..., Tn> for T0`, \ where `T0` is the first and `Tn` is the last", ) .emit(), None => struct_span_err!( tcx.sess, sp, E0210, "type parameter `{}` must be used as the type parameter for some \ local type (e.g., `MyStruct<{}>`)", param_ty, param_ty ) .span_label( sp, format!( "type parameter `{}` must be used as the type parameter for some \ local type", param_ty, ), ) .note( "implementing a foreign trait is only possible if at \ least one of the types for which it is implemented is local", ) .note( "only traits defined in the current crate can be \ implemented for a type parameter", ) .emit(), } } }) } #[derive(Default)] struct AreUniqueParamsVisitor { seen: GrowableBitSet<u32>, } #[derive(Copy, Clone)] enum NotUniqueParam<'tcx> { DuplicateParam(GenericArg<'tcx>), NotParam(GenericArg<'tcx>), } impl<'tcx> TypeVisitor<'tcx> for AreUniqueParamsVisitor { type BreakTy = NotUniqueParam<'tcx>; fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { match t.kind() { ty::Param(p) => { if self.seen.insert(p.index) { ControlFlow::CONTINUE } else { ControlFlow::Break(NotUniqueParam::DuplicateParam(t.into())) } } _ => ControlFlow::Break(NotUniqueParam::NotParam(t.into())), } } fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> { // We don't drop candidates during candidate assembly because of region // constraints, so the behavior for impls only constrained by regions // will not change. ControlFlow::CONTINUE } fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> { match c.val() { ty::ConstKind::Param(p) => { if self.seen.insert(p.index) { ControlFlow::CONTINUE } else { ControlFlow::Break(NotUniqueParam::DuplicateParam(c.into())) } } _ => ControlFlow::Break(NotUniqueParam::NotParam(c.into())), } } } /// Lint impls of auto traits if they are likely to have /// unsound or surprising effects on auto impls. fn lint_auto_trait_impls(tcx: TyCtxt<'_>, trait_def_id: DefId, impls: &[LocalDefId]) { let mut non_covering_impls = Vec::new(); for &impl_def_id in impls { let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); if trait_ref.references_error() { return; } if tcx.impl_polarity(impl_def_id) != ImplPolarity::Positive { return; } assert_eq!(trait_ref.substs.len(), 1); let self_ty = trait_ref.self_ty(); let (self_type_did, substs) = match self_ty.kind() { ty::Adt(def, substs) => (def.did(), substs), _ => { // FIXME: should also lint for stuff like `&i32` but // considering that auto traits are unstable, that // isn't too important for now as this only affects // crates using `nightly`, and std. continue; } }; // Impls which completely cover a given root type are fine as they // disable auto impls entirely. So only lint if the substs // are not a permutation of the identity substs. match substs.visit_with(&mut AreUniqueParamsVisitor::default()) { ControlFlow::Continue(()) => {} // ok ControlFlow::Break(arg) => { // Ideally: // // - compute the requirements for the auto impl candidate // - check whether these are implied by the non covering impls // - if not, emit the lint // // What we do here is a bit simpler: // // - badly check if an auto impl candidate definitely does not apply // for the given simplified type // - if so, do not lint if fast_reject_auto_impl(tcx, trait_def_id, self_ty) { // ok } else { non_covering_impls.push((impl_def_id, self_type_did, arg)); } } } } for &(impl_def_id, self_type_did, arg) in &non_covering_impls { tcx.struct_span_lint_hir( lint::builtin::SUSPICIOUS_AUTO_TRAIT_IMPLS, tcx.hir().local_def_id_to_hir_id(impl_def_id), tcx.def_span(impl_def_id), |err| { let mut err = err.build(&format!( "cross-crate traits with a default impl, like `{}`, \ should not be specialized", tcx.def_path_str(trait_def_id), )); let item_span = tcx.def_span(self_type_did); let self_descr = tcx.def_kind(self_type_did).descr(self_type_did); err.span_note( item_span, &format!( "try using the same sequence of generic parameters as the {} definition", self_descr, ), ); match arg { NotUniqueParam::DuplicateParam(arg) => { err.note(&format!("`{}` is mentioned multiple times", arg)); } NotUniqueParam::NotParam(arg) => { err.note(&format!("`{}` is not a generic parameter", arg)); } } err.emit(); }, ); } } fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty: Ty<'tcx>) -> bool { struct DisableAutoTraitVisitor<'tcx> { tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty_root: Ty<'tcx>, seen: FxHashSet<DefId>, } impl<'tcx> TypeVisitor<'tcx> for DisableAutoTraitVisitor<'tcx> { type BreakTy = (); fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { let tcx = self.tcx; if t != self.self_ty_root { for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, t) { match tcx.impl_polarity(impl_def_id) { ImplPolarity::Negative => return ControlFlow::BREAK, ImplPolarity::Reservation => {} // FIXME(@lcnr): That's probably not good enough, idk // // We might just want to take the rustdoc code and somehow avoid // explicit impls for `Self`. ImplPolarity::Positive => return ControlFlow::CONTINUE, } } } match t.kind() { ty::Adt(def, substs) if def.is_phantom_data() => substs.super_visit_with(self), ty::Adt(def, substs) => { // @lcnr: This is the only place where cycles can happen. We avoid this // by only visiting each `DefId` once. // // This will be is incorrect in subtle cases, but I don't care :) if self.seen.insert(def.did()) { for ty in def.all_fields().map(|field| field.ty(tcx, substs)) { ty.visit_with(self)?; } } ControlFlow::CONTINUE } _ => t.super_visit_with(self), } } } let self_ty_root = match self_ty.kind() { ty::Adt(def, _) => tcx.mk_adt(*def, InternalSubsts::identity_for_item(tcx, def.did())), _ => unimplemented!("unexpected self ty {:?}", self_ty), }; self_ty_root .visit_with(&mut DisableAutoTraitVisitor { tcx, self_ty_root, trait_def_id, seen: FxHashSet::default(), }) .is_break() }
40.144231
109
0.489341
9b2191b4d2e71d34989a4fb83051eb1a65ec2066
740
// Copyright 2022 RisingLight Project Authors. Licensed under Apache-2.0. use super::SecondaryIteratorImpl; use crate::storage::{StorageChunk, StorageResult}; pub struct TestIterator { chunks: Vec<StorageChunk>, cnt: usize, } impl TestIterator { pub fn new(chunks: Vec<StorageChunk>) -> Self { Self { chunks, cnt: 0 } } } impl TestIterator { pub async fn next_batch( &mut self, _expected_size: Option<usize>, ) -> StorageResult<Option<StorageChunk>> { if self.cnt >= self.chunks.len() { return Ok(None); } let chunk = self.chunks[self.cnt].clone(); self.cnt += 1; Ok(Some(chunk)) } } impl SecondaryIteratorImpl for TestIterator {}
23.125
73
0.625676
7a45d807bf7e6fbc52d4ae0b83db123e0fa66bf3
983
// Copyright 2020 EinsteinDB Project Authors. Licensed under Apache-2.0. use std::local_path::local_path; pub trait CompactionJobInfo { type TableGreedoidsCollectionView; type CompactionReason; fn status(&self) -> Result<(), String>; fn namespaced_name(&self) -> &str; fn input_file_count(&self) -> usize; fn num_input_filefs_at_output_l_naught(&self) -> usize; fn input_file_at(&self, pos: usize) -> &local_path; fn output_file_count(&self) -> usize; fn output_file_at(&self, pos: usize) -> &local_path; fn table_greedoids(&self) -> &Self::TableGreedoidsCollectionView; fn base_input_l_naught(&self) -> i32; fn elapsed_micros(&self) -> u64; fn num_corrupt_keys(&self) -> u64; fn output_l_naught(&self) -> i32; fn input_records(&self) -> u64; fn output_records(&self) -> u64; fn total_input_bytes(&self) -> u64; fn total_output_bytes(&self) -> u64; fn jet_bundle_reason(&self) -> Self::CompactionReason; }
39.32
72
0.690743
bf779b3b3d49d2996828b7f2cf7721d2d7eca2c7
3,512
/* * Asana * * This is the interface for interacting with the [Asana Platform](https://developers.asana.com). Our API reference is generated from our [OpenAPI spec] (https://raw.githubusercontent.com/Asana/developer-docs/master/defs/asana_oas.yaml). * * The version of the OpenAPI document: 1.0 * * Generated by: https://openapi-generator.tech */ #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProjectResponseAllOf { /// Array of Custom Fields. #[serde(rename = "custom_fields", skip_serializing_if = "Option::is_none")] pub custom_fields: Option<Vec<crate::models::CustomFieldCompact>>, /// Array of users following this project. Followers are a subset of members who receive all notifications for a project, the default notification setting when adding members to a project in-product. #[serde(rename = "followers", skip_serializing_if = "Option::is_none")] pub followers: Option<Vec<crate::models::UserCompact>>, /// The current owner of the project, may be null. #[serde(rename = "owner", skip_serializing_if = "Option::is_none")] pub owner: Option<Box<crate::models::UserCompact>>, #[serde(rename = "team", skip_serializing_if = "Option::is_none")] pub team: Option<Box<crate::models::TeamCompact>>, /// The icon for a project. #[serde(rename = "icon", skip_serializing_if = "Option::is_none")] pub icon: Option<Icon>, /// A url that points directly to the object within Asana. #[serde(rename = "permalink_url", skip_serializing_if = "Option::is_none")] pub permalink_url: Option<String>, } impl ProjectResponseAllOf { pub fn new() -> ProjectResponseAllOf { ProjectResponseAllOf { custom_fields: None, followers: None, owner: None, team: None, icon: None, permalink_url: None, } } } /// The icon for a project. #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum Icon { #[serde(rename = "list")] List, #[serde(rename = "board")] Board, #[serde(rename = "timeline")] Timeline, #[serde(rename = "calendar")] Calendar, #[serde(rename = "rocket")] Rocket, #[serde(rename = "people")] People, #[serde(rename = "graph")] Graph, #[serde(rename = "star")] Star, #[serde(rename = "bug")] Bug, #[serde(rename = "light_bulb")] LightBulb, #[serde(rename = "globe")] Globe, #[serde(rename = "gear")] Gear, #[serde(rename = "notebook")] Notebook, #[serde(rename = "computer")] Computer, #[serde(rename = "check")] Check, #[serde(rename = "target")] Target, #[serde(rename = "html")] Html, #[serde(rename = "megaphone")] Megaphone, #[serde(rename = "chat_bubbles")] ChatBubbles, #[serde(rename = "briefcase")] Briefcase, #[serde(rename = "page_layout")] PageLayout, #[serde(rename = "mountain_flag")] MountainFlag, #[serde(rename = "puzzle")] Puzzle, #[serde(rename = "presentation")] Presentation, #[serde(rename = "line_and_symbols")] LineAndSymbols, #[serde(rename = "speed_dial")] SpeedDial, #[serde(rename = "ribbon")] Ribbon, #[serde(rename = "shoe")] Shoe, #[serde(rename = "shopping_basket")] ShoppingBasket, #[serde(rename = "map")] Map, #[serde(rename = "ticket")] Ticket, #[serde(rename = "coins")] Coins, }
31.079646
237
0.625285
284515ae558aaa3695d07170efa5a9d21051fae8
12,573
use crate::prelude::*; use nu_engine::WholeStreamCommand; use nu_errors::ShellError; use nu_protocol::{Signature, SyntaxShape, UntaggedValue}; pub struct Tutor; impl WholeStreamCommand for Tutor { fn name(&self) -> &str { "tutor" } fn signature(&self) -> Signature { Signature::build("tutor") .optional( "search", SyntaxShape::String, "item to search for, or 'list' to list available tutorials", ) .named( "find", SyntaxShape::String, "Search tutorial for a phrase", Some('f'), ) } fn usage(&self) -> &str { "Run the tutorial. To begin, run: tutor" } fn run(&self, args: CommandArgs) -> Result<OutputStream, ShellError> { tutor(args) } fn examples(&self) -> Vec<Example> { vec![ Example { description: "Begin the tutorial", example: "tutor begin", result: None, }, Example { description: "Search a tutorial by phrase", example: "tutor -f \"$in\"", result: None, }, ] } } fn tutor(args: CommandArgs) -> Result<OutputStream, ShellError> { let tag = args.name_tag(); let scope = args.scope().clone(); let search: Option<String> = args.opt(0).unwrap_or(None); let find: Option<String> = args.get_flag("find")?; let search_space = [ (vec!["begin"], begin_tutor()), ( vec!["table", "tables", "row", "rows", "column", "columns"], table_tutor(), ), (vec!["cell", "cells"], cell_tutor()), ( vec![ "expr", "exprs", "expressions", "subexpression", "subexpressions", "sub-expression", "sub-expressions", ], expression_tutor(), ), (vec!["echo"], echo_tutor()), (vec!["each", "iteration", "iter"], each_tutor()), ( vec!["var", "vars", "variable", "variables"], variable_tutor(), ), (vec!["engine-q", "e-q"], engineq_tutor()), (vec!["block", "blocks"], block_tutor()), (vec!["shorthand", "shorthands"], shorthand_tutor()), ]; if let Some(find) = find { let mut results = vec![]; for search_group in search_space { if search_group.1.contains(&find) { results.push(search_group.0[0].to_string()) } } let message = format!("You can find '{}' in the following topics:\n{}\n\nYou can learn about a topic using `tutor` followed by the name of the topic.\nFor example: `tutor table` to open the table topic.\n\n", find, results.into_iter().map(|x| format!("- {}", x)).join("\n") ); return Ok(display(tag, &scope, &message)); } else if let Some(search) = search { for search_group in search_space { if search_group.0.contains(&search.as_str()) { return Ok(display(tag, &scope, search_group.1)); } } } Ok(display(tag, &scope, default_tutor())) } fn default_tutor() -> &'static str { r#" Welcome to the Nushell tutorial! With the `tutor` command, you'll be able to learn a lot about how Nushell works along with many fun tips and tricks to speed up everyday tasks. To get started, you can use `tutor begin`. "# } fn begin_tutor() -> &'static str { r#" Nushell is a structured shell and programming language. One way to begin using it is to try a few of the commands. The first command to try is `ls`. The `ls` command will show you a list of the files in the current directory. Notice that these files are shown as a table. Each column of this table not only tells us what is being shown, but also gives us a way to work with the data. You can combine the `ls` command with other commands using the pipeline symbol '|'. This allows data to flow from one command to the next. For example, if we only wanted the name column, we could do: ``` ls | select name ``` Notice that we still get a table, but this time it only has one column: the name column. You can continue to learn more about tables by running: ``` tutor tables ``` If at any point, you'd like to restart this tutorial, you can run: ``` tutor begin ``` "# } fn table_tutor() -> &'static str { r#" The most common form of data in Nushell is the table. Tables contain rows and columns of data. In each cell of the table, there is data that you can access using Nushell commands. To get the 3rd row in the table, you can use the `nth` command: ``` ls | nth 2 ``` This will get the 3rd (note that `nth` is zero-based) row in the table created by the `ls` command. You can use `nth` on any table created by other commands as well. You can also access the column of data in one of two ways. If you want to keep the column as part of a new table, you can use `select`. ``` ls | select name ``` This runs `ls` and returns only the "name" column of the table. If, instead, you'd like to get access to the values inside of the column, you can use the `get` command. ``` ls | get name ``` This allows us to get to the list of strings that are the filenames rather than having a full table. In some cases, this can make the names easier to work with. You can continue to learn more about working with cells of the table by running: ``` tutor cells ``` "# } fn cell_tutor() -> &'static str { r#" Working with cells of data in the table is a key part of working with data in Nushell. Because of this, there is a rich list of commands to work with cells as well as handy shorthands for accessing cells. Cells can hold simple values like strings and numbers, or more complex values like lists and tables. To reach a cell of data from a table, you can combine a row operation and a column operation. ``` ls | nth 4 | get name ``` You can combine these operations into one step using a shortcut. ``` (ls).4.name ``` Names/strings represent columns names and numbers represent row numbers. The `(ls)` is a form of expression. You can continue to learn more about expressions by running: ``` tutor expressions ``` You can also learn about these cell shorthands by running: ``` tutor shorthands ``` "# } fn expression_tutor() -> &'static str { r#" Expressions give you the power to mix calls to commands with math. The simplest expression is a single value like a string or number. ``` 3 ``` Expressions can also include math operations like addition or division. ``` 10 / 2 ``` Normally, an expression is one type of operation: math or commands. You can mix these types by using subexpressions. Subexpressions are just like expressions, but they're wrapped in parentheses `()`. ``` 10 * (3 + 4) ``` Here we use parentheses to create a higher math precedence in the math expression. ``` echo (2 + 3) ``` You can continue to learn more about the `echo` command by running: ``` tutor echo ``` "# } fn echo_tutor() -> &'static str { r#" The `echo` command in Nushell is a powerful tool for not only seeing values, but also for creating new ones. ``` echo "Hello" ``` You can echo output. This output, if it's not redirected using a "|" pipeline will be displayed to the screen. ``` echo 1..10 ``` You can also use echo to work with individual values of a range. In this example, `echo` will create the values from 1 to 10 as a list. ``` echo 1 2 3 4 5 ``` You can also create lists of values by passing `echo` multiple arguments. This can be helpful if you want to later processes these values. The `echo` command can pair well with the `each` command which can run code on each row, or item, of input. You can continue to learn more about the `each` command by running: ``` tutor each ``` "# } fn each_tutor() -> &'static str { r#" The `each` command gives us a way of working with each individual row or element of a list one at a time. It reads these in from the pipeline and runs a block on each element. A block is a group of pipelines. ``` echo 1 2 3 | each { $it + 10} ``` This example iterates over each element sent by `echo`, giving us three new values that are the original value + 10. Here, the `$it` is a variable that is the name given to the block's parameter by default. You can learn more about blocks by running: ``` tutor blocks ``` You can also learn more about variables by running: ``` tutor variables ``` "# } fn variable_tutor() -> &'static str { r#" Variables are an important way to store values to be used later. To create a variable, you can use the `let` keyword. The `let` command will create a variable and then assign it a value in one step. ``` let $x = 3 ``` Once created, we can refer to this variable by name. ``` $x ``` Nushell also comes with built-in variables. The `$nu` variable is a reserved variable that contains a lot of information about the currently running instance of Nushell. The `$it` variable is the name given to block parameters if you don't specify one. And `$in` is the variable that allows you to work with all of the data coming in from the pipeline in one place. "# } fn block_tutor() -> &'static str { r#" Blocks are a special form of expression that hold code to be run at a later time. Often, you'll see blocks as one of the arguments given to commands like `each` and `if`. ``` ls | each {|x| $x.name} ``` The above will create a list of the filenames in the directory. ``` if $true { echo "it's true" } { echo "it's not true" } ``` This `if` call will run the first block if the expression is true, or the second block if the expression is false. "# } fn shorthand_tutor() -> &'static str { r#" You can access cells in a table using a shorthand notation sometimes called a "column path" or "cell path". These paths allow you to go from a table to rows, columns, or cells inside of the table. Shorthand paths are made from rows numbers, column names, or both. You can use them on any variable or subexpression. ``` $nu.cwd ``` The above accesses the built-in `$nu` variable, gets its table, and then uses the shorthand path to retrieve only the cell data inside the "cwd" column. ``` (ls).name.4 ``` This will retrieve the cell data in the "name" column on the 5th row (note: row numbers are zero-based). Rows and columns don't need to come in any specific order. You can get the same value using: ``` (ls).4.name ``` "# } fn engineq_tutor() -> &'static str { r#" Engine-q is the upcoming engine for Nushell. Build for speed and correctness, it also comes with a set of changes from Nushell versions prior to 0.60. To get ready for engine-q look for some of these changes that might impact your current scripts: * Engine-q now uses a few new data structures, including a record syntax that allows you to model key-value pairs similar to JSON objects. * Environment variables can now contain more than just strings. Structured values are converted to strings for external commands using converters. * `if` will now use an `else` keyword before the else block. * We're moving from "config.toml" to "config.nu". This means startup will now be a script file. * `config` and its subcommands are being replaced by a record that you can update in the shell which contains all the settings under the variable `$config`. * bigint/bigdecimal values are now machine i64 and f64 values * And more, you can read more about upcoming changes in the up-to-date list at: https://github.com/nushell/engine-q/issues/522 "# } fn display(tag: Tag, scope: &Scope, help: &str) -> OutputStream { let help = help.split('`'); let mut build = String::new(); let mut code_mode = false; let palette = nu_engine::DefaultPalette {}; for item in help { if code_mode { code_mode = false; //TODO: support no-color mode let colored_example = nu_engine::Painter::paint_string(item, scope, &palette); build.push_str(&colored_example); } else { code_mode = true; build.push_str(item); } } OutputStream::one(UntaggedValue::string(build).into_value(tag)) } #[cfg(test)] mod tests { use super::ShellError; use super::Tutor; #[test] fn examples_work_as_expected() -> Result<(), ShellError> { use crate::examples::test as test_examples; test_examples(Tutor {}) } }
29.104167
216
0.652987
9cdc51f021c2b775303cac3eb87c737316512165
8,828
use anchor_lang::{prelude::*, solana_program::keccak}; use gummyroll::{program::Gummyroll, state::node::Node}; declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); #[derive(Accounts)] #[instruction(max_depth: u32, max_buffer_size: u32)] pub struct CreateTree<'info> { pub authority: Signer<'info>, #[account( seeds = [ b"gummyroll-crud-authority-pda", merkle_roll.key().as_ref(), authority.key().as_ref(), ], bump, )] /// CHECK: This account is neither written to nor read from. pub authority_pda: UncheckedAccount<'info>, pub gummyroll_program: Program<'info, Gummyroll>, #[account(mut)] /// CHECK: unsafe pub merkle_roll: UncheckedAccount<'info>, } #[derive(Accounts)] pub struct Add<'info> { pub authority: Signer<'info>, #[account( seeds = [ b"gummyroll-crud-authority-pda", merkle_roll.key().as_ref(), authority.key().as_ref(), ], bump, )] /// CHECK: This account is neither written to nor read from. pub authority_pda: UncheckedAccount<'info>, pub gummyroll_program: Program<'info, Gummyroll>, #[account(mut)] /// CHECK: unsafe pub merkle_roll: UncheckedAccount<'info>, } #[derive(Accounts)] pub struct Remove<'info> { pub authority: Signer<'info>, #[account( seeds = [ b"gummyroll-crud-authority-pda", merkle_roll.key().as_ref(), authority.key().as_ref(), ], bump, )] /// CHECK: This account is neither written to nor read from. pub authority_pda: UncheckedAccount<'info>, pub gummyroll_program: Program<'info, Gummyroll>, #[account(mut)] /// CHECK: unsafe pub merkle_roll: UncheckedAccount<'info>, } #[derive(Accounts)] pub struct Transfer<'info> { /// CHECK: This account is neither written to nor read from. pub authority: UncheckedAccount<'info>, #[account( seeds = [ b"gummyroll-crud-authority-pda", merkle_roll.key().as_ref(), authority.key().as_ref(), ], bump, )] /// CHECK: This account is neither written to nor read from. pub authority_pda: UncheckedAccount<'info>, pub gummyroll_program: Program<'info, Gummyroll>, #[account(mut)] /// CHECK: unsafe pub merkle_roll: UncheckedAccount<'info>, pub owner: Signer<'info>, /// CHECK: This account is neither written to nor read from. pub new_owner: UncheckedAccount<'info>, } pub enum InstructionName { Unknown, CreateTree, Add, Transfer, Remove } pub fn get_instruction_type(full_bytes: &Vec<u8>) -> InstructionName { let disc: [u8; 8] = { let mut disc = [0; 8]; disc.copy_from_slice(&full_bytes[..8]); disc }; match disc { [165, 83, 136, 142, 89, 202, 47, 220] => InstructionName::CreateTree, [163, 52, 200, 231, 140, 3, 69, 186] => InstructionName::Transfer, [199, 186, 9, 79, 96, 129, 24, 106] => InstructionName::Remove, [41, 249, 249, 146, 197, 111, 56, 181] => InstructionName::Add, _ => InstructionName::Unknown } } #[program] pub mod gummyroll_crud { use super::*; pub fn create_tree( ctx: Context<CreateTree>, max_depth: u32, max_buffer_size: u32, ) -> Result<()> { let gummyroll_program = ctx.accounts.gummyroll_program.to_account_info(); let merkle_roll = ctx.accounts.merkle_roll.to_account_info(); let authority = ctx.accounts.authority.to_account_info(); let authority_pda = ctx.accounts.authority_pda.to_account_info(); let authority_pda_bump_seed = &[*ctx.bumps.get("authority_pda").unwrap()]; let seeds = &[ b"gummyroll-crud-authority-pda", merkle_roll.key.as_ref(), authority.key.as_ref(), authority_pda_bump_seed, ]; let authority_pda_signer = &[&seeds[..]]; let cpi_ctx = CpiContext::new_with_signer( gummyroll_program, gummyroll::cpi::accounts::Initialize { authority: authority_pda.clone(), append_authority: authority_pda.clone(), merkle_roll, }, authority_pda_signer, ); gummyroll::cpi::init_empty_gummyroll(cpi_ctx, max_depth, max_buffer_size) } pub fn add(ctx: Context<Add>, message: Vec<u8>) -> Result<()> { let authority = ctx.accounts.authority.to_account_info(); let authority_pda = ctx.accounts.authority_pda.to_account_info(); let gummyroll_program = ctx.accounts.gummyroll_program.to_account_info(); let merkle_roll = ctx.accounts.merkle_roll.to_account_info(); let authority_pda_bump_seed = &[*ctx.bumps.get("authority_pda").unwrap()]; let seeds = &[ b"gummyroll-crud-authority-pda", merkle_roll.key.as_ref(), authority.key.as_ref(), authority_pda_bump_seed, ]; let authority_pda_signer = &[&seeds[..]]; let cpi_ctx = CpiContext::new_with_signer( gummyroll_program, gummyroll::cpi::accounts::Append { authority: authority_pda.clone(), append_authority: authority_pda.clone(), merkle_roll, }, authority_pda_signer, ); let leaf = Node::new(get_message_hash(&authority, &message).to_bytes()); gummyroll::cpi::append(cpi_ctx, leaf) } pub fn transfer<'info>( ctx: Context<'_, '_, '_, 'info, Transfer<'info>>, root: [u8; 32], message: Vec<u8>, index: u32, ) -> Result<()> { let authority = ctx.accounts.authority.to_account_info(); let authority_pda = ctx.accounts.authority_pda.to_account_info(); let gummyroll_program = ctx.accounts.gummyroll_program.to_account_info(); let merkle_roll = ctx.accounts.merkle_roll.to_account_info(); let owner = ctx.accounts.owner.to_account_info(); let new_owner = ctx.accounts.new_owner.to_account_info(); let authority_pda_bump_seed = &[*ctx.bumps.get("authority_pda").unwrap()]; let seeds = &[ b"gummyroll-crud-authority-pda", merkle_roll.key.as_ref(), authority.key.as_ref(), authority_pda_bump_seed, ]; let authority_pda_signer = &[&seeds[..]]; let cpi_ctx = CpiContext::new_with_signer( gummyroll_program, gummyroll::cpi::accounts::Modify { authority: authority_pda.clone(), merkle_roll, }, authority_pda_signer, ) .with_remaining_accounts(ctx.remaining_accounts.to_vec()); // It's important to synthesize the previous leaf ourselves, rather than to // accept it as an arg, so that we can ensure the message hasn't been modified. let previous_leaf_node = Node::new(get_message_hash(&owner, &message).to_bytes()); let leaf_node = Node::new(get_message_hash(&new_owner, &message).to_bytes()); let root_node = Node::new(root); gummyroll::cpi::replace_leaf(cpi_ctx, root_node, previous_leaf_node, leaf_node, index) } pub fn remove<'info>( ctx: Context<'_, '_, '_, 'info, Remove<'info>>, root: [u8; 32], leaf_hash: [u8; 32], index: u32, ) -> Result<()> { let authority = ctx.accounts.authority.to_account_info(); let authority_pda = ctx.accounts.authority_pda.to_account_info(); let gummyroll_program = ctx.accounts.gummyroll_program.to_account_info(); let merkle_roll = ctx.accounts.merkle_roll.to_account_info(); let authority_pda_bump_seed = &[*ctx.bumps.get("authority_pda").unwrap()]; let seeds = &[ b"gummyroll-crud-authority-pda", merkle_roll.key.as_ref(), authority.key.as_ref(), authority_pda_bump_seed, ]; let authority_pda_signer = &[&seeds[..]]; let cpi_ctx = CpiContext::new_with_signer( gummyroll_program, gummyroll::cpi::accounts::Modify { authority: authority_pda.clone(), merkle_roll, }, authority_pda_signer, ) .with_remaining_accounts(ctx.remaining_accounts.to_vec()); let previous_leaf_node = Node::new(leaf_hash); let leaf_node = Node::default(); let root_node = Node::new(root); gummyroll::cpi::replace_leaf(cpi_ctx, root_node, previous_leaf_node, leaf_node, index) } } pub fn get_message_hash(owner: &AccountInfo, message: &Vec<u8>) -> keccak::Hash { keccak::hashv(&[&owner.key().to_bytes(), message.as_slice()]) }
36.032653
94
0.607839
7a810679ab4f16db170cb27a71b6b35781374fdb
2,841
use crate::commands::WholeStreamCommand; use crate::prelude::*; use nu_errors::ShellError; use nu_protocol::{Signature, SyntaxShape, UntaggedValue, Value}; use nu_source::Tagged; use nu_value_ext::get_data_by_key; pub struct SortBy; #[derive(Deserialize)] pub struct SortByArgs { rest: Vec<Tagged<String>>, } impl WholeStreamCommand for SortBy { fn name(&self) -> &str { "sort-by" } fn signature(&self) -> Signature { Signature::build("sort-by").rest(SyntaxShape::String, "the column(s) to sort by") } fn usage(&self) -> &str { "Sort by the given columns, in increasing order." } fn run( &self, args: CommandArgs, registry: &CommandRegistry, ) -> Result<OutputStream, ShellError> { sort_by(args, registry) } fn examples(&self) -> Vec<Example> { vec![ Example { description: "Sort list by increasing value", example: "echo [4 2 3 1] | sort-by", result: Some(vec![ UntaggedValue::int(1).into(), UntaggedValue::int(2).into(), UntaggedValue::int(3).into(), UntaggedValue::int(4).into(), ]), }, Example { description: "Sort output by increasing file size", example: "ls | sort-by size", result: None, }, Example { description: "Sort output by type, and then by file size for each type", example: "ls | sort-by type size", result: None, }, ] } } fn sort_by(args: CommandArgs, registry: &CommandRegistry) -> Result<OutputStream, ShellError> { let registry = registry.clone(); let stream = async_stream! { let (SortByArgs { rest }, mut input) = args.process(&registry).await?; let mut vec = input.drain_vec().await; if vec.is_empty() { return; } match &vec[0] { Value { value: UntaggedValue::Primitive(_), .. } => { vec.sort(); }, _ => { let calc_key = |item: &Value| { rest.iter() .map(|f| get_data_by_key(item, f.borrow_spanned())) .collect::<Vec<Option<Value>>>() }; vec.sort_by_cached_key(calc_key); }, }; for item in vec { yield item.into(); } }; Ok(stream.to_output_stream()) } #[cfg(test)] mod tests { use super::SortBy; #[test] fn examples_work_as_expected() { use crate::examples::test as test_examples; test_examples(SortBy {}) } }
26.305556
95
0.501584
bf58f73a6b3f2c157911b94c1a68c73ebb416956
27,429
#![feature(decl_macro)] extern crate sgx_types; extern crate sgx_urts; extern crate mio; #[macro_use] extern crate lazy_static; #[macro_use] extern crate rocket; #[macro_use] extern crate rocket_contrib; extern crate rocket_cors; extern crate serde; extern crate serde_json; #[macro_use] extern crate serde_derive; #[cfg(test)] mod tests; #[cfg(test)] extern crate ring_compat; #[cfg(test)] extern crate base64; #[cfg(test)] extern crate hex_literal; mod attestation; mod contract_input; mod contract_output; use sgx_types::*; use sgx_urts::SgxEnclave; use std::fs; use std::path; use std::net::SocketAddr; use std::str; use std::io::Read; use std::sync::{Arc, RwLock}; use std::env; use rocket::http::Method; use rocket_contrib::json::{Json, JsonValue}; use rocket_cors::{AllowedHeaders, AllowedOrigins, AllowedMethods, Cors, CorsOptions}; use contract_input::ContractInput; use contract_output::ContractOutput; use attestation::Attestation; static ENCLAVE_FILE: &'static str = "enclave.signed.so"; static ENCLAVE_STATE_FILE: &'static str = "enclave.token"; const ENCLAVE_OUTPUT_BUF_MAX_LEN: usize = 2*2048*1024 as usize; lazy_static! { static ref ENCLAVE: RwLock<Option<SgxEnclave>> = RwLock::new(None); static ref ENCLAVE_STATE_FILE_PATH: &'static str = { Box::leak( env::var("STATE_FILE_PATH").unwrap_or_else(|_| "./".to_string()).into_boxed_str() ) }; static ref ALLOW_CORS: bool = { env::var("ALLOW_CORS").unwrap_or_else(|_| "".to_string()) != "" }; } fn destroy_enclave() { let enclave = ENCLAVE.write().unwrap().take().unwrap(); enclave.destroy(); } fn get_eid() -> u64 { ENCLAVE.read().unwrap().as_ref().unwrap().geteid() } extern { fn ecall_handle( eid: sgx_enclave_id_t, retval: *mut sgx_status_t, action: u8, input_ptr: *const u8, input_len: usize, output_ptr : *mut u8, output_len_ptr: *mut usize, output_buf_len: usize ) -> sgx_status_t; fn ecall_init( eid: sgx_enclave_id_t, retval: *mut sgx_status_t ) -> sgx_status_t; } const IAS_SPID_STR: &str = env!("IAS_SPID"); const IAS_API_KEY_STR: &str = env!("IAS_API_KEY"); #[no_mangle] pub extern "C" fn ocall_load_ias_spid( key_ptr : *mut u8, key_len_ptr: *mut usize, key_buf_len: usize ) -> sgx_status_t { let key_len = IAS_SPID_STR.len(); unsafe { if key_len <= key_buf_len { std::ptr::copy_nonoverlapping(IAS_SPID_STR.as_ptr(), key_ptr, key_len); } else { panic!("IAS_SPID_STR too long. Buffer overflow."); } std::ptr::copy_nonoverlapping(&key_len as *const usize, key_len_ptr, std::mem::size_of_val(&key_len)); } sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn ocall_load_ias_key( key_ptr : *mut u8, key_len_ptr: *mut usize, key_buf_len: usize ) -> sgx_status_t { let key_len = IAS_API_KEY_STR.len(); unsafe { if key_len <= key_buf_len { std::ptr::copy_nonoverlapping(IAS_API_KEY_STR.as_ptr(), key_ptr, key_len); } else { panic!("IAS_API_KEY_STR too long. Buffer overflow."); } std::ptr::copy_nonoverlapping(&key_len as *const usize, key_len_ptr, std::mem::size_of_val(&key_len)); } sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn ocall_sgx_init_quote(ret_ti: *mut sgx_target_info_t, ret_gid : *mut sgx_epid_group_id_t) -> sgx_status_t { println!("Entering ocall_sgx_init_quote"); unsafe { sgx_init_quote(ret_ti, ret_gid) } } #[no_mangle] pub extern "C" fn ocall_get_quote (p_sigrl : *const u8, sigrl_len : u32, p_report : *const sgx_report_t, quote_type : sgx_quote_sign_type_t, p_spid : *const sgx_spid_t, p_nonce : *const sgx_quote_nonce_t, p_qe_report : *mut sgx_report_t, p_quote : *mut u8, _maxlen : u32, p_quote_len : *mut u32) -> sgx_status_t { println!("Entering ocall_get_quote"); let mut real_quote_len : u32 = 0; let ret = unsafe { sgx_calc_quote_size(p_sigrl, sigrl_len, &mut real_quote_len as *mut u32) }; if ret != sgx_status_t::SGX_SUCCESS { println!("sgx_calc_quote_size returned {}", ret); return ret; } println!("quote size = {}", real_quote_len); unsafe { *p_quote_len = real_quote_len; } let ret = unsafe { sgx_get_quote(p_report, quote_type, p_spid, p_nonce, p_sigrl, sigrl_len, p_qe_report, p_quote as *mut sgx_quote_t, real_quote_len) }; if ret != sgx_status_t::SGX_SUCCESS { println!("sgx_calc_quote_size returned {}", ret); return ret; } println!("sgx_calc_quote_size returned {}", ret); ret } #[no_mangle] pub extern "C" fn ocall_get_update_info( platform_blob: * const sgx_platform_info_t, enclave_trusted: i32, update_info: * mut sgx_update_info_bit_t ) -> sgx_status_t { unsafe{ sgx_report_attestation_status(platform_blob, enclave_trusted, update_info) } } #[no_mangle] pub extern "C" fn ocall_dump_state( _output_ptr : *mut u8, _output_len_ptr: *mut usize, _output_buf_len: usize ) -> sgx_status_t { // TODO: sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn ocall_save_persistent_data( input_ptr: *const u8, input_len: usize ) -> sgx_status_t { let input_slice = unsafe { std::slice::from_raw_parts(input_ptr, input_len) }; println!("Sealed data {:}: {:?}", input_len, hex::encode(input_slice)); let executable = env::current_exe().unwrap(); let path = executable.parent().unwrap(); let state_path: path::PathBuf = path.join(*ENCLAVE_STATE_FILE_PATH).join(ENCLAVE_STATE_FILE); println!("Save seal data to {}", state_path.as_path().to_str().unwrap()); fs::write(state_path.as_path().to_str().unwrap(), input_slice) .expect("Failed to write persistent data"); sgx_status_t::SGX_SUCCESS } #[no_mangle] pub extern "C" fn ocall_load_persistent_data( output_ptr : *mut u8, output_len_ptr: *mut usize, output_buf_len: usize ) -> sgx_status_t { let executable = env::current_exe().unwrap(); let path = executable.parent().unwrap(); let state_path: path::PathBuf = path.join(*ENCLAVE_STATE_FILE_PATH).join(ENCLAVE_STATE_FILE); let state = match fs::read(state_path.as_path().to_str().unwrap()) { Ok(data) => data, _ => Vec::<u8>::new() }; let state_len = state.len(); if state_len == 0 { return sgx_status_t::SGX_SUCCESS } println!("Loaded sealed data {:}: {:?}", state_len, state); unsafe { if state_len <= output_buf_len { std::ptr::copy_nonoverlapping(state.as_ptr(), output_ptr, state_len); } else { panic!("State too long. Buffer overflow."); } std::ptr::copy_nonoverlapping(&state_len as *const usize, output_len_ptr, std::mem::size_of_val(&state_len)); } sgx_status_t::SGX_SUCCESS } fn init_enclave() -> SgxResult<SgxEnclave> { let mut launch_token: sgx_launch_token_t = [0; 1024]; let mut launch_token_updated: i32 = 0; // call sgx_create_enclave to initialize an enclave instance // Debug Support: set 2nd parameter to 1 let debug = option_env!("SGX_DEBUG").unwrap_or("1"); let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t {flags:0, xfrm:0}, misc_select:0}; SgxEnclave::create(ENCLAVE_FILE, if debug == "0" { 0 } else { 1 }, &mut launch_token, &mut launch_token_updated, &mut misc_attr) } #[post("/test", format = "json", data = "<contract_input>")] fn test(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 0, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/init_runtime", format = "json", data = "<contract_input>")] fn init_runtime(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 1, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/get_info", format = "json", data = "<contract_input>")] fn get_info(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 2, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/get_runtime_info", format = "json", data = "<contract_input>")] fn get_runtime_info(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 10, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/dump_states", format = "json", data = "<contract_input>")] fn dump_states(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 3, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/load_states", format = "json", data = "<contract_input>")] fn load_states(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 4, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/sync_header", format = "json", data = "<contract_input>")] fn sync_header(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 5, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/query", format = "json", data = "<contract_input>")] fn query(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 6, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/dispatch_block", format = "json", data = "<contract_input>")] fn dispatch_block(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 7, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/set", format = "json", data = "<contract_input>")] fn set(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 21, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/get", format = "json", data = "<contract_input>")] fn get(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 22, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } #[post("/test_ink", format = "json", data = "<contract_input>")] fn test_ink(contract_input: Json<ContractInput>) -> JsonValue { println!("{}", ::serde_json::to_string_pretty(&*contract_input).unwrap()); let eid = get_eid(); let input_string = serde_json::to_string(&*contract_input).unwrap(); let mut return_output_buf = vec![0; ENCLAVE_OUTPUT_BUF_MAX_LEN].into_boxed_slice(); let mut output_len : usize = 0; let output_slice = &mut return_output_buf; let output_ptr = output_slice.as_mut_ptr(); let output_len_ptr = &mut output_len as *mut usize; let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_handle( eid, &mut retval, 100, input_string.as_ptr(), input_string.len(), output_ptr, output_len_ptr, ENCLAVE_OUTPUT_BUF_MAX_LEN ) }; match result { sgx_status_t::SGX_SUCCESS => { let output_slice = unsafe { std::slice::from_raw_parts(output_ptr, output_len) }; let output_value: serde_json::value::Value = serde_json::from_slice(output_slice).unwrap(); json!(output_value) }, _ => { println!("[-] ECALL Enclave Failed {}!", result.as_str()); json!({ "status": "error", "payload": format!("[-] ECALL Enclave Failed {}!", result.as_str()) }) } } } fn cors_options() -> CorsOptions { let allowed_origins = AllowedOrigins::all(); let allowed_methods: AllowedMethods = vec![Method::Get, Method::Post].into_iter().map(From::from).collect(); // You can also deserialize this rocket_cors::CorsOptions { allowed_origins, allowed_methods, allowed_headers: AllowedHeaders::all(), allow_credentials: true, ..Default::default() } } fn rocket() -> rocket::Rocket { let mut server = rocket::ignite() .mount("/", routes![ test, init_runtime, get_info, dump_states, load_states, sync_header, dispatch_block, query, set, get, get_runtime_info, test_ink]); if *ALLOW_CORS { println!("Allow CORS"); server .mount("/", rocket_cors::catch_all_options_routes()) // mount the catch all routes .attach(cors_options().to_cors().expect("To not fail")) .manage(cors_options().to_cors().expect("To not fail")) } else { server } } fn main() { env::set_var("RUST_BACKTRACE", "1"); env::set_var("ROCKET_ENV", "dev"); let enclave = match init_enclave() { Ok(r) => { println!("[+] Init Enclave Successful {}!", r.geteid()); r }, Err(x) => { panic!("[-] Init Enclave Failed {}!", x.as_str()); }, }; ENCLAVE.write().unwrap().replace(enclave); let eid = get_eid(); let mut retval = sgx_status_t::SGX_SUCCESS; let result = unsafe { ecall_init(eid, &mut retval) }; if result != sgx_status_t::SGX_SUCCESS { panic!("Initialize Failed"); } rocket().launch(); println!("Quit signal received, destroying enclave..."); destroy_enclave(); std::process::exit(0); }
32.731504
112
0.594262
08f3728593cfaa1785bf740fbe3be146879794f9
13,907
use crate::helpers::connection::{ build_connection_create_cmd, build_connection_id, node_type_name_alternative_pairs, node_type_name_pairs, setup_repo_with_all_node_types, NON_VALID_SELECTOR, NON_VALID_TYPE, SELECTOR, SOURCE_TYPE, TRANSFORMATION_TYPE, }; use crate::helpers::source::SOURCE_NAME; use crate::helpers::transformation::TRANSFORMATION_NAME; use assert_cmd::Command; use predicates::prelude::predicate; #[test] fn help_available() { let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd.arg("connection").arg("create").arg("--help").assert(); // Check success assert.success(); } #[test] fn cannot_create_connection_without_any_positional_arg() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without positional argument let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--tail-type")) .stderr(predicate::str::contains("--tail-name")) .stderr(predicate::str::contains("--tail-selector")) .stderr(predicate::str::contains("--head-type")) .stderr(predicate::str::contains("--head-name")) .stderr(predicate::str::contains("--head-selector")); } #[test] fn cannot_create_connection_without_tail_type() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without tail type let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .arg("--tail-name") .arg(SOURCE_NAME) .arg("--tail-selector") .arg(SELECTOR) .arg("--head-type") .arg(TRANSFORMATION_TYPE) .arg("--head-name") .arg(TRANSFORMATION_NAME) .arg("--head-selector") .arg(SELECTOR) .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--tail-type")); } #[test] fn cannot_create_connection_without_tail_name() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without tail name let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .arg("--tail-type") .arg(SOURCE_TYPE) .arg("--tail-selector") .arg(SELECTOR) .arg("--head-type") .arg(TRANSFORMATION_TYPE) .arg("--head-name") .arg(TRANSFORMATION_NAME) .arg("--head-selector") .arg(SELECTOR) .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--tail-name")); } #[test] fn cannot_create_connection_without_tail_selector() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without tail selector let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .arg("--tail-type") .arg(SOURCE_TYPE) .arg("--tail-name") .arg(SOURCE_NAME) .arg("--head-type") .arg(TRANSFORMATION_TYPE) .arg("--head-name") .arg(TRANSFORMATION_NAME) .arg("--head-selector") .arg(SELECTOR) .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--tail-selector")); } #[test] fn cannot_create_connection_without_head_type() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without head type let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .arg("--tail-type") .arg(SOURCE_TYPE) .arg("--tail-name") .arg(SOURCE_NAME) .arg("--tail-selector") .arg(SELECTOR) .arg("--head-name") .arg(TRANSFORMATION_NAME) .arg("--head-selector") .arg(SELECTOR) .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--head-type")); } #[test] fn cannot_create_connection_without_head_name() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without head name let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .arg("--tail-type") .arg(SOURCE_TYPE) .arg("--tail-name") .arg(SOURCE_NAME) .arg("--tail-selector") .arg(SELECTOR) .arg("--head-type") .arg(TRANSFORMATION_TYPE) .arg("--head-selector") .arg(SELECTOR) .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--head-name")); } #[test] fn cannot_create_connection_without_head_selector() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection without head selector let mut cmd = Command::cargo_bin("holium").unwrap(); let assert = cmd .current_dir(repo_path) .arg("connection") .arg("create") .arg("--tail-type") .arg(SOURCE_TYPE) .arg("--tail-name") .arg(SOURCE_NAME) .arg("--tail-selector") .arg(SELECTOR) .arg("--head-type") .arg(TRANSFORMATION_TYPE) .arg("--head-name") .arg(TRANSFORMATION_NAME) .assert(); // check output assert .failure() .stderr(predicate::str::contains( "required arguments were not provided", )) .stderr(predicate::str::contains("--head-selector")); } #[test] fn cannot_create_connection_with_non_valid_tail_type() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection with non valid tail type let assert = build_connection_create_cmd( repo_path, NON_VALID_TYPE, SOURCE_NAME, SELECTOR, TRANSFORMATION_TYPE, TRANSFORMATION_NAME, SELECTOR, ); // check output assert.failure().stderr(predicate::str::contains(format!( "\'{}\' isn\'t a valid value for \'--tail-type <TYPE>\'", NON_VALID_TYPE ))); } #[test] fn cannot_create_connection_with_non_valid_head_type() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection with non valid tail type let assert = build_connection_create_cmd( repo_path, SOURCE_TYPE, SOURCE_NAME, SELECTOR, NON_VALID_TYPE, TRANSFORMATION_NAME, SELECTOR, ); // check output assert.failure().stderr(predicate::str::contains(format!( "\'{}\' isn\'t a valid value for \'--head-type <TYPE>\'", NON_VALID_TYPE ))); } #[test] fn cannot_create_connection_with_non_existent_tail_node() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // create vec of all possible types and some non existent names let node_type_name_alternative_pairs = node_type_name_alternative_pairs(); for (node_type, node_name) in node_type_name_alternative_pairs { // try to create connection with non existing tail node let assert = build_connection_create_cmd( repo_path, node_type, node_name, SELECTOR, TRANSFORMATION_TYPE, TRANSFORMATION_NAME, SELECTOR, ); // check output assert .failure() .stderr(predicate::str::contains(format!( "no {} node found with name", node_type ))) .stderr(predicate::str::contains(node_name)); } } #[test] fn cannot_create_connection_with_non_existent_head_node() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // create vec of all possible types and some non existent names let node_type_name_alternative_pairs = node_type_name_alternative_pairs(); for (node_type, node_name) in node_type_name_alternative_pairs { // try to create connection with non existing head node let assert = build_connection_create_cmd( repo_path, SOURCE_TYPE, SOURCE_NAME, SELECTOR, node_type, node_name, SELECTOR, ); // check output assert .failure() .stderr(predicate::str::contains(format!( "no {} node found with name", node_type ))) .stderr(predicate::str::contains(node_name)); } } #[test] fn cannot_create_connection_with_non_valid_tail_selector() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection with non valid tail type let assert = build_connection_create_cmd( repo_path, SOURCE_TYPE, SOURCE_NAME, NON_VALID_SELECTOR, TRANSFORMATION_TYPE, TRANSFORMATION_NAME, SELECTOR, ); // check output assert .failure() .stderr(predicate::str::contains("invalid holium selector")); } #[test] fn cannot_create_connection_with_non_valid_head_selector() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection with non valid tail type let assert = build_connection_create_cmd( repo_path, SOURCE_TYPE, SOURCE_NAME, SELECTOR, TRANSFORMATION_TYPE, TRANSFORMATION_NAME, NON_VALID_SELECTOR, ); // check output assert .failure() .stderr(predicate::str::contains("invalid holium selector")); } #[test] fn cannot_create_connection_with_non_parsable_tail_selector() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection with non valid tail type let assert = build_connection_create_cmd( repo_path, SOURCE_TYPE, SOURCE_NAME, "", TRANSFORMATION_TYPE, TRANSFORMATION_NAME, SELECTOR, ); // check output assert.failure().stderr(predicate::str::contains( "invalid string can not be parsed to json", )); } #[test] fn cannot_create_connection_with_non_parsable_head_selector() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // try to create connection with non valid tail type let assert = build_connection_create_cmd( repo_path, SOURCE_TYPE, SOURCE_NAME, SELECTOR, TRANSFORMATION_TYPE, TRANSFORMATION_NAME, "", ); // check output assert.failure().stderr(predicate::str::contains( "invalid string can not be parsed to json", )); } #[test] fn can_create_connection() { // initialize a repository let repo = setup_repo_with_all_node_types(); let repo_path = repo.path(); // create vec of all possible types and some non existent names let node_type_name_pairs = node_type_name_pairs(); for (tail_node_type, tail_node_name) in node_type_name_pairs.iter() { for (head_node_type, head_node_name) in node_type_name_pairs.iter() { if tail_node_name != head_node_name { // try to create connection let assert = build_connection_create_cmd( repo_path, tail_node_type, tail_node_name, SELECTOR, head_node_type, head_node_name, SELECTOR, ); // check output assert .success() .stdout(predicate::str::contains("new object created")) .stdout(predicate::str::contains( build_connection_id( tail_node_type, tail_node_name, head_node_type, head_node_name, ) .as_str(), )); } } } }
28.912682
93
0.595527
76db063b198bdafc9221df05284ac65dc89b8a1f
1,468
use r2d2::{Pool, PooledConnection}; use r2d2_redis::RedisConnectionManager; use redis::Connection; use rocket::{ Request, State, Outcome, http::Status, request::{self, FromRequest}, }; use std::{ env, ops::{Deref, DerefMut}, }; pub type RedisPool = Pool<RedisConnectionManager>; lazy_static! { static ref REDIS_URL: String = env::var("REDIS_URL").expect("missing REDIS_URL env var"); static ref SIDEKIQ_URL: String = env::var("SIDEKIQ_URL").expect("missing SIDEKIQ_URL env var"); } pub fn init_pool() -> RedisPool { pool(REDIS_URL.as_str()) } pub fn init_sidekiq() -> sidekiq::Client { sidekiq::Client::new(pool(SIDEKIQ_URL.as_str()), Default::default()) } fn pool(path: &str) -> RedisPool { let manager = RedisConnectionManager::new(path).expect("could not connect to redis"); Pool::new(manager).expect("redis pool") } pub struct Redis(pub PooledConnection<RedisConnectionManager>); impl FromRequest<'a, 'r> for Redis { type Error = (); fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> { let pool: State<RedisPool> = request.guard()?; match pool.get() { Ok(conn) => Outcome::Success(Redis(conn)), Err(_) => Outcome::Failure((Status::ServiceUnavailable, ())), } } } impl Deref for Redis { type Target = Connection; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for Redis { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } }
22.584615
97
0.669619
292071a55b40008ac3cdf408eea89b16bdd04e7b
131,037
use crate::{ ancestors::Ancestors, contains::Contains, inline_spl_token_v2_0::{self, SPL_TOKEN_ACCOUNT_MINT_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET}, secondary_index::*, }; use bv::BitVec; use log::*; use ouroboros::self_referencing; use solana_measure::measure::Measure; use solana_sdk::{ clock::{BankId, Slot}, pubkey::{Pubkey, PUBKEY_BYTES}, }; use std::{ collections::{ btree_map::{self, BTreeMap, Entry}, HashSet, }, ops::{ Bound, Bound::{Excluded, Included, Unbounded}, Range, RangeBounds, }, sync::{ atomic::{AtomicU64, Ordering}, Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, }, }; use thiserror::Error; pub const ITER_BATCH_SIZE: usize = 1000; pub type ScanResult<T> = Result<T, ScanError>; pub type SlotList<T> = Vec<(Slot, T)>; pub type SlotSlice<'s, T> = &'s [(Slot, T)]; pub type RefCount = u64; pub type AccountMap<K, V> = BTreeMap<K, V>; type AccountMapEntry<T> = Arc<AccountMapEntryInner<T>>; pub trait IsCached { fn is_cached(&self) -> bool; } impl IsCached for bool { fn is_cached(&self) -> bool { false } } impl IsCached for u64 { fn is_cached(&self) -> bool { false } } #[derive(Error, Debug, PartialEq)] pub enum ScanError { #[error("Node detected it replayed bad version of slot {slot:?} with id {bank_id:?}, thus the scan on said slot was aborted")] SlotRemoved { slot: Slot, bank_id: BankId }, } enum ScanTypes<R: RangeBounds<Pubkey>> { Unindexed(Option<R>), Indexed(IndexKey), } #[derive(Debug, Clone, Copy)] pub enum IndexKey { ProgramId(Pubkey), SplTokenMint(Pubkey), SplTokenOwner(Pubkey), } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum AccountIndex { ProgramId, SplTokenMint, SplTokenOwner, } #[derive(Debug, PartialEq, Eq, Clone)] pub struct AccountSecondaryIndexesIncludeExclude { pub exclude: bool, pub keys: HashSet<Pubkey>, } #[derive(Debug, Default, Clone)] pub struct AccountSecondaryIndexes { pub keys: Option<AccountSecondaryIndexesIncludeExclude>, pub indexes: HashSet<AccountIndex>, } impl AccountSecondaryIndexes { pub fn is_empty(&self) -> bool { self.indexes.is_empty() } pub fn contains(&self, index: &AccountIndex) -> bool { self.indexes.contains(index) } pub fn include_key(&self, key: &Pubkey) -> bool { match &self.keys { Some(options) => options.exclude ^ options.keys.contains(key), None => true, // include all keys } } } #[derive(Debug)] pub struct AccountMapEntryInner<T> { ref_count: AtomicU64, pub slot_list: RwLock<SlotList<T>>, } impl<T> AccountMapEntryInner<T> { pub fn ref_count(&self) -> u64 { self.ref_count.load(Ordering::Relaxed) } } pub enum AccountIndexGetResult<'a, T: 'static> { Found(ReadAccountMapEntry<T>, usize), NotFoundOnFork, Missing(AccountMapsReadLock<'a, T>), } #[self_referencing] pub struct ReadAccountMapEntry<T: 'static> { owned_entry: AccountMapEntry<T>, #[borrows(owned_entry)] #[covariant] slot_list_guard: RwLockReadGuard<'this, SlotList<T>>, } impl<T: Clone> ReadAccountMapEntry<T> { pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self { ReadAccountMapEntryBuilder { owned_entry: account_map_entry, slot_list_guard_builder: |lock| lock.slot_list.read().unwrap(), } .build() } pub fn slot_list(&self) -> &SlotList<T> { &*self.borrow_slot_list_guard() } pub fn ref_count(&self) -> &AtomicU64 { &self.borrow_owned_entry().ref_count } pub fn unref(&self) { self.ref_count().fetch_sub(1, Ordering::Relaxed); } pub fn addref(&self) { self.ref_count().fetch_add(1, Ordering::Relaxed); } } #[self_referencing] pub struct WriteAccountMapEntry<T: 'static> { owned_entry: AccountMapEntry<T>, #[borrows(owned_entry)] #[covariant] slot_list_guard: RwLockWriteGuard<'this, SlotList<T>>, } impl<T: 'static + Clone + IsCached> WriteAccountMapEntry<T> { pub fn from_account_map_entry(account_map_entry: AccountMapEntry<T>) -> Self { WriteAccountMapEntryBuilder { owned_entry: account_map_entry, slot_list_guard_builder: |lock| lock.slot_list.write().unwrap(), } .build() } pub fn slot_list(&mut self) -> &SlotList<T> { &*self.borrow_slot_list_guard() } pub fn slot_list_mut<RT>( &mut self, user: impl for<'this> FnOnce(&mut RwLockWriteGuard<'this, SlotList<T>>) -> RT, ) -> RT { self.with_slot_list_guard_mut(user) } pub fn ref_count(&self) -> &AtomicU64 { &self.borrow_owned_entry().ref_count } // create an entry that is equivalent to this process: // 1. new empty (refcount=0, slot_list={}) // 2. update(slot, account_info) // This code is called when the first entry [ie. (slot,account_info)] for a pubkey is inserted into the index. pub fn new_entry_after_update(slot: Slot, account_info: T) -> AccountMapEntry<T> { let ref_count = if account_info.is_cached() { 0 } else { 1 }; Arc::new(AccountMapEntryInner { ref_count: AtomicU64::new(ref_count), slot_list: RwLock::new(vec![(slot, account_info)]), }) } // Try to update an item in the slot list the given `slot` If an item for the slot // already exists in the list, remove the older item, add it to `reclaims`, and insert // the new item. pub fn update(&mut self, slot: Slot, account_info: T, reclaims: &mut SlotList<T>) { let mut addref = !account_info.is_cached(); self.slot_list_mut(|list| { // find other dirty entries from the same slot for list_index in 0..list.len() { let (s, previous_update_value) = &list[list_index]; if *s == slot { addref = addref && previous_update_value.is_cached(); let mut new_item = (slot, account_info); std::mem::swap(&mut new_item, &mut list[list_index]); reclaims.push(new_item); list[(list_index + 1)..] .iter() .for_each(|item| assert!(item.0 != slot)); return; // this returns from self.slot_list_mut above } } // if we make it here, we did not find the slot in the list list.push((slot, account_info)); }); if addref { // If it's the first non-cache insert, also bump the stored ref count self.ref_count().fetch_add(1, Ordering::Relaxed); } } } #[derive(Debug, Default, AbiExample, Clone)] pub struct RollingBitField { max_width: u64, min: u64, max: u64, // exclusive bits: BitVec, count: usize, // These are items that are true and lower than min. // They would cause us to exceed max_width if we stored them in our bit field. // We only expect these items in conditions where there is some other bug in the system // or in testing when large ranges are created. excess: HashSet<u64>, } impl PartialEq<RollingBitField> for RollingBitField { fn eq(&self, other: &Self) -> bool { // 2 instances could have different internal data for the same values, // so we have to compare data. self.len() == other.len() && { for item in self.get_all() { if !other.contains(&item) { return false; } } true } } } // functionally similar to a hashset // Relies on there being a sliding window of key values. The key values continue to increase. // Old key values are removed from the lesser values and do not accumulate. impl RollingBitField { pub fn new(max_width: u64) -> Self { assert!(max_width > 0); assert!(max_width.is_power_of_two()); // power of 2 to make dividing a shift let bits = BitVec::new_fill(false, max_width); Self { max_width, bits, count: 0, min: 0, max: 0, excess: HashSet::new(), } } // find the array index fn get_address(&self, key: &u64) -> u64 { key % self.max_width } pub fn range_width(&self) -> u64 { // note that max isn't updated on remove, so it can be above the current max self.max - self.min } pub fn min(&self) -> Option<u64> { if self.is_empty() { None } else if self.excess.is_empty() { Some(self.min) } else { let mut min = if self.all_items_in_excess() { u64::MAX } else { self.min }; for item in &self.excess { min = std::cmp::min(min, *item); } Some(min) } } pub fn insert(&mut self, key: u64) { let mut bits_empty = self.count == 0 || self.all_items_in_excess(); let update_bits = if bits_empty { true // nothing in bits, so in range } else if key < self.min { // bits not empty and this insert is before min, so add to excess if self.excess.insert(key) { self.count += 1; } false } else if key < self.max { true // fits current bit field range } else { // key is >= max let new_max = key + 1; loop { let new_width = new_max.saturating_sub(self.min); if new_width <= self.max_width { // this key will fit the max range break; } // move the min item from bits to excess and then purge from min to make room for this new max let inserted = self.excess.insert(self.min); assert!(inserted); let key = self.min; let address = self.get_address(&key); self.bits.set(address, false); self.purge(&key); if self.all_items_in_excess() { // if we moved the last existing item to excess, then we are ready to insert the new item in the bits bits_empty = true; break; } } true // moved things to excess if necessary, so update bits with the new entry }; if update_bits { let address = self.get_address(&key); let value = self.bits.get(address); if !value { self.bits.set(address, true); if bits_empty { self.min = key; self.max = key + 1; } else { self.min = std::cmp::min(self.min, key); self.max = std::cmp::max(self.max, key + 1); assert!( self.min + self.max_width >= self.max, "min: {}, max: {}, max_width: {}", self.min, self.max, self.max_width ); } self.count += 1; } } } pub fn remove(&mut self, key: &u64) -> bool { if key >= &self.min { // if asked to remove something bigger than max, then no-op if key < &self.max { let address = self.get_address(key); let get = self.bits.get(address); if get { self.count -= 1; self.bits.set(address, false); self.purge(key); } get } else { false } } else { // asked to remove something < min. would be in excess if it exists let remove = self.excess.remove(key); if remove { self.count -= 1; } remove } } fn all_items_in_excess(&self) -> bool { self.excess.len() == self.count } // after removing 'key' where 'key' = min, make min the correct new min value fn purge(&mut self, key: &u64) { if self.count > 0 && !self.all_items_in_excess() { if key == &self.min { let start = self.min + 1; // min just got removed for key in start..self.max { if self.contains_assume_in_range(&key) { self.min = key; break; } } } } else { // The idea is that there are no items in the bitfield anymore. // But, there MAY be items in excess. The model works such that items < min go into excess. // So, after purging all items from bitfield, we hold max to be what it previously was, but set min to max. // Thus, if we lookup >= max, answer is always false without having to look in excess. // If we changed max here to 0, we would lose the ability to know the range of items in excess (if any). // So, now, with min updated = max: // If we lookup < max, then we first check min. // If >= min, then we look in bitfield. // Otherwise, we look in excess since the request is < min. // So, resetting min like this after a remove results in the correct behavior for the model. // Later, if we insert and there are 0 items total (excess + bitfield), then we reset min/max to reflect the new item only. self.min = self.max; } } fn contains_assume_in_range(&self, key: &u64) -> bool { // the result may be aliased. Caller is responsible for determining key is in range. let address = self.get_address(key); self.bits.get(address) } // This is the 99% use case. // This needs be fast for the most common case of asking for key >= min. pub fn contains(&self, key: &u64) -> bool { if key < &self.max { if key >= &self.min { // in the bitfield range self.contains_assume_in_range(key) } else { self.excess.contains(key) } } else { false } } pub fn len(&self) -> usize { self.count } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn clear(&mut self) { let mut n = Self::new(self.max_width); std::mem::swap(&mut n, self); } pub fn max(&self) -> u64 { self.max } pub fn get_all(&self) -> Vec<u64> { let mut all = Vec::with_capacity(self.count); self.excess.iter().for_each(|slot| all.push(*slot)); for key in self.min..self.max { if self.contains_assume_in_range(&key) { all.push(key); } } all } } #[derive(Debug)] pub struct RootsTracker { roots: RollingBitField, max_root: Slot, uncleaned_roots: HashSet<Slot>, previous_uncleaned_roots: HashSet<Slot>, } impl Default for RootsTracker { fn default() -> Self { // we expect to keep a rolling set of 400k slots around at a time // 4M gives us plenty of extra(?!) room to handle a width 10x what we should need. // cost is 4M bits of memory, which is .5MB RootsTracker::new(4194304) } } impl RootsTracker { pub fn new(max_width: u64) -> Self { Self { roots: RollingBitField::new(max_width), max_root: 0, uncleaned_roots: HashSet::new(), previous_uncleaned_roots: HashSet::new(), } } pub fn min_root(&self) -> Option<Slot> { self.roots.min() } } #[derive(Debug, Default)] pub struct AccountsIndexRootsStats { pub roots_len: usize, pub uncleaned_roots_len: usize, pub previous_uncleaned_roots_len: usize, pub roots_range: u64, pub rooted_cleaned_count: usize, pub unrooted_cleaned_count: usize, } pub struct AccountsIndexIterator<'a, T> { account_maps: &'a LockMapType<T>, start_bound: Bound<Pubkey>, end_bound: Bound<Pubkey>, is_finished: bool, } impl<'a, T> AccountsIndexIterator<'a, T> { fn clone_bound(bound: Bound<&Pubkey>) -> Bound<Pubkey> { match bound { Unbounded => Unbounded, Included(k) => Included(*k), Excluded(k) => Excluded(*k), } } pub fn new<R>(account_maps: &'a LockMapType<T>, range: Option<R>) -> Self where R: RangeBounds<Pubkey>, { Self { start_bound: range .as_ref() .map(|r| Self::clone_bound(r.start_bound())) .unwrap_or(Unbounded), end_bound: range .as_ref() .map(|r| Self::clone_bound(r.end_bound())) .unwrap_or(Unbounded), account_maps, is_finished: false, } } } impl<'a, T: 'static + Clone> Iterator for AccountsIndexIterator<'a, T> { type Item = Vec<(Pubkey, AccountMapEntry<T>)>; fn next(&mut self) -> Option<Self::Item> { if self.is_finished { return None; } let chunk: Vec<(Pubkey, AccountMapEntry<T>)> = self .account_maps .read() .unwrap() .range((self.start_bound, self.end_bound)) .map(|(pubkey, account_map_entry)| (*pubkey, account_map_entry.clone())) .take(ITER_BATCH_SIZE) .collect(); if chunk.is_empty() { self.is_finished = true; return None; } self.start_bound = Excluded(chunk.last().unwrap().0); Some(chunk) } } pub trait ZeroLamport { fn is_zero_lamport(&self) -> bool; } type MapType<T> = AccountMap<Pubkey, AccountMapEntry<T>>; type LockMapType<T> = RwLock<MapType<T>>; type AccountMapsWriteLock<'a, T> = RwLockWriteGuard<'a, MapType<T>>; type AccountMapsReadLock<'a, T> = RwLockReadGuard<'a, MapType<T>>; #[derive(Debug, Default)] pub struct ScanSlotTracker { is_removed: bool, ref_count: u64, } impl ScanSlotTracker { pub fn is_removed(&self) -> bool { self.is_removed } pub fn mark_removed(&mut self) { self.is_removed = true; } } #[derive(Debug)] pub struct AccountsIndex<T> { pub account_maps: LockMapType<T>, program_id_index: SecondaryIndex<DashMapSecondaryIndexEntry>, spl_token_mint_index: SecondaryIndex<DashMapSecondaryIndexEntry>, spl_token_owner_index: SecondaryIndex<RwLockSecondaryIndexEntry>, roots_tracker: RwLock<RootsTracker>, ongoing_scan_roots: RwLock<BTreeMap<Slot, u64>>, // Each scan has some latest slot `S` that is the tip of the fork the scan // is iterating over. The unique id of that slot `S` is recorded here (note we don't use // `S` as the id because there can be more than one version of a slot `S`). If a fork // is abandoned, all of the slots on that fork up to `S` will be removed via // `AccountsDb::remove_unrooted_slots()`. When the scan finishes, it'll realize that the // results of the scan may have been corrupted by `remove_unrooted_slots` and abort its results. // // `removed_bank_ids` tracks all the slot ids that were removed via `remove_unrooted_slots()` so any attempted scans // on any of these slots fails. This is safe to purge once the associated Bank is dropped and // scanning the fork with that Bank at the tip is no longer possible. pub removed_bank_ids: Mutex<HashSet<BankId>>, } impl<T> Default for AccountsIndex<T> { fn default() -> Self { Self { account_maps: LockMapType::<T>::default(), program_id_index: SecondaryIndex::<DashMapSecondaryIndexEntry>::new( "program_id_index_stats", ), spl_token_mint_index: SecondaryIndex::<DashMapSecondaryIndexEntry>::new( "spl_token_mint_index_stats", ), spl_token_owner_index: SecondaryIndex::<RwLockSecondaryIndexEntry>::new( "spl_token_owner_index_stats", ), roots_tracker: RwLock::<RootsTracker>::default(), ongoing_scan_roots: RwLock::<BTreeMap<Slot, u64>>::default(), removed_bank_ids: Mutex::<HashSet<BankId>>::default(), } } } impl<T: 'static + Clone + IsCached + ZeroLamport> AccountsIndex<T> { fn iter<R>(&self, range: Option<R>) -> AccountsIndexIterator<T> where R: RangeBounds<Pubkey>, { AccountsIndexIterator::new(&self.account_maps, range) } fn do_checked_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, scan_bank_id: BankId, func: F, scan_type: ScanTypes<R>, ) -> Result<(), ScanError> where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { { let locked_removed_bank_ids = self.removed_bank_ids.lock().unwrap(); if locked_removed_bank_ids.contains(&scan_bank_id) { return Err(ScanError::SlotRemoved { slot: ancestors.max_slot(), bank_id: scan_bank_id, }); } } let max_root = { let mut w_ongoing_scan_roots = self // This lock is also grabbed by clean_accounts(), so clean // has at most cleaned up to the current `max_root` (since // clean only happens *after* BankForks::set_root() which sets // the `max_root`) .ongoing_scan_roots .write() .unwrap(); // `max_root()` grabs a lock while // the `ongoing_scan_roots` lock is held, // make sure inverse doesn't happen to avoid // deadlock let max_root = self.max_root(); *w_ongoing_scan_roots.entry(max_root).or_default() += 1; max_root }; // First we show that for any bank `B` that is a descendant of // the current `max_root`, it must be true that and `B.ancestors.contains(max_root)`, // regardless of the pattern of `squash()` behavior, where `ancestors` is the set // of ancestors that is tracked in each bank. // // Proof: At startup, if starting from a snapshot, generate_index() adds all banks // in the snapshot to the index via `add_root()` and so `max_root` will be the // greatest of these. Thus, so the claim holds at startup since there are no // descendants of `max_root`. // // Now we proceed by induction on each `BankForks::set_root()`. // Assume the claim holds when the `max_root` is `R`. Call the set of // descendants of `R` present in BankForks `R_descendants`. // // Then for any banks `B` in `R_descendants`, it must be that `B.ancestors.contains(S)`, // where `S` is any ancestor of `B` such that `S >= R`. // // For example: // `R` -> `A` -> `C` -> `B` // Then `B.ancestors == {R, A, C}` // // Next we call `BankForks::set_root()` at some descendant of `R`, `R_new`, // where `R_new > R`. // // When we squash `R_new`, `max_root` in the AccountsIndex here is now set to `R_new`, // and all nondescendants of `R_new` are pruned. // // Now consider any outstanding references to banks in the system that are descended from // `max_root == R_new`. Take any one of these references and call it `B`. Because `B` is // a descendant of `R_new`, this means `B` was also a descendant of `R`. Thus `B` // must be a member of `R_descendants` because `B` was constructed and added to // BankForks before the `set_root`. // // This means by the guarantees of `R_descendants` described above, because // `R_new` is an ancestor of `B`, and `R < R_new < B`, then `B.ancestors.contains(R_new)`. // // Now until the next `set_root`, any new banks constructed from `new_from_parent` will // also have `max_root == R_new` in their ancestor set, so the claim holds for those descendants // as well. Once the next `set_root` happens, we once again update `max_root` and the same // inductive argument can be applied again to show the claim holds. // Check that the `max_root` is present in `ancestors`. From the proof above, if // `max_root` is not present in `ancestors`, this means the bank `B` with the // given `ancestors` is not descended from `max_root, which means // either: // 1) `B` is on a different fork or // 2) `B` is an ancestor of `max_root`. // In both cases we can ignore the given ancestors and instead just rely on the roots // present as `max_root` indicates the roots present in the index are more up to date // than the ancestors given. let empty = Ancestors::default(); let ancestors = if ancestors.contains_key(&max_root) { ancestors } else { /* This takes of edge cases like: Diagram 1: slot 0 | slot 1 / \ slot 2 | | slot 3 (max root) slot 4 (scan) By the time the scan on slot 4 is called, slot 2 may already have been cleaned by a clean on slot 3, but slot 4 may not have been cleaned. The state in slot 2 would have been purged and is not saved in any roots. In this case, a scan on slot 4 wouldn't accurately reflect the state when bank 4 was frozen. In cases like this, we default to a scan on the latest roots by removing all `ancestors`. */ &empty }; /* Now there are two cases, either `ancestors` is empty or nonempty: 1) If ancestors is empty, then this is the same as a scan on a rooted bank, and `ongoing_scan_roots` provides protection against cleanup of roots necessary for the scan, and passing `Some(max_root)` to `do_scan_accounts()` ensures newer roots don't appear in the scan. 2) If ancestors is non-empty, then from the `ancestors_contains(&max_root)` above, we know that the fork structure must look something like: Diagram 2: Build fork structure: slot 0 | slot 1 (max_root) / \ slot 2 | | slot 3 (potential newer max root) slot 4 | slot 5 (scan) Consider both types of ancestors, ancestor <= `max_root` and ancestor > `max_root`, where `max_root == 1` as illustrated above. a) The set of `ancestors <= max_root` are all rooted, which means their state is protected by the same guarantees as 1). b) As for the `ancestors > max_root`, those banks have at least one reference discoverable through the chain of `Bank::BankRc::parent` starting from the calling bank. For instance bank 5's parent reference keeps bank 4 alive, which will prevent the `Bank::drop()` from running and cleaning up bank 4. Furthermore, no cleans can happen past the saved max_root == 1, so a potential newer max root at 3 will not clean up any of the ancestors > 1, so slot 4 will not be cleaned in the middle of the scan either. (NOTE similar reasoning is employed for assert!() justification in AccountsDb::retry_to_get_account_accessor) */ match scan_type { ScanTypes::Unindexed(range) => { // Pass "" not to log metrics, so RPC doesn't get spammy self.do_scan_accounts(metric_name, ancestors, func, range, Some(max_root)); } ScanTypes::Indexed(IndexKey::ProgramId(program_id)) => { self.do_scan_secondary_index( ancestors, func, &self.program_id_index, &program_id, Some(max_root), ); } ScanTypes::Indexed(IndexKey::SplTokenMint(mint_key)) => { self.do_scan_secondary_index( ancestors, func, &self.spl_token_mint_index, &mint_key, Some(max_root), ); } ScanTypes::Indexed(IndexKey::SplTokenOwner(owner_key)) => { self.do_scan_secondary_index( ancestors, func, &self.spl_token_owner_index, &owner_key, Some(max_root), ); } } { let mut ongoing_scan_roots = self.ongoing_scan_roots.write().unwrap(); let count = ongoing_scan_roots.get_mut(&max_root).unwrap(); *count -= 1; if *count == 0 { ongoing_scan_roots.remove(&max_root); } } // If the fork with tip at bank `scan_bank_id` was removed during our scan, then the scan // may have been corrupted, so abort the results. let was_scan_corrupted = self .removed_bank_ids .lock() .unwrap() .contains(&scan_bank_id); if was_scan_corrupted { Err(ScanError::SlotRemoved { slot: ancestors.max_slot(), bank_id: scan_bank_id, }) } else { Ok(()) } } fn do_unchecked_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, func: F, range: Option<R>, ) where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { self.do_scan_accounts(metric_name, ancestors, func, range, None); } // Scan accounts and return latest version of each account that is either: // 1) rooted or // 2) present in ancestors fn do_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, mut func: F, range: Option<R>, max_root: Option<Slot>, ) where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { // TODO: expand to use mint index to find the `pubkey_list` below more efficiently // instead of scanning the entire range let mut total_elapsed_timer = Measure::start("total"); let mut num_keys_iterated = 0; let mut latest_slot_elapsed = 0; let mut load_account_elapsed = 0; let mut read_lock_elapsed = 0; let mut iterator_elapsed = 0; let mut iterator_timer = Measure::start("iterator_elapsed"); for pubkey_list in self.iter(range) { iterator_timer.stop(); iterator_elapsed += iterator_timer.as_us(); for (pubkey, list) in pubkey_list { num_keys_iterated += 1; let mut read_lock_timer = Measure::start("read_lock"); let list_r = &list.slot_list.read().unwrap(); read_lock_timer.stop(); read_lock_elapsed += read_lock_timer.as_us(); let mut latest_slot_timer = Measure::start("latest_slot"); if let Some(index) = self.latest_slot(Some(ancestors), list_r, max_root) { latest_slot_timer.stop(); latest_slot_elapsed += latest_slot_timer.as_us(); let mut load_account_timer = Measure::start("load_account"); func(&pubkey, (&list_r[index].1, list_r[index].0)); load_account_timer.stop(); load_account_elapsed += load_account_timer.as_us(); } } iterator_timer = Measure::start("iterator_elapsed"); } total_elapsed_timer.stop(); if !metric_name.is_empty() { datapoint_info!( metric_name, ("total_elapsed", total_elapsed_timer.as_us(), i64), ("latest_slot_elapsed", latest_slot_elapsed, i64), ("read_lock_elapsed", read_lock_elapsed, i64), ("load_account_elapsed", load_account_elapsed, i64), ("iterator_elapsed", iterator_elapsed, i64), ("num_keys_iterated", num_keys_iterated, i64), ) } } fn do_scan_secondary_index< F, SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( &self, ancestors: &Ancestors, mut func: F, index: &SecondaryIndex<SecondaryIndexEntryType>, index_key: &Pubkey, max_root: Option<Slot>, ) where F: FnMut(&Pubkey, (&T, Slot)), { for pubkey in index.get(index_key) { // Maybe these reads from the AccountsIndex can be batched every time it // grabs the read lock as well... if let AccountIndexGetResult::Found(list_r, index) = self.get(&pubkey, Some(ancestors), max_root) { func( &pubkey, (&list_r.slot_list()[index].1, list_r.slot_list()[index].0), ); } } } pub fn get_account_read_entry(&self, pubkey: &Pubkey) -> Option<ReadAccountMapEntry<T>> { let lock = self.get_account_maps_read_lock(); self.get_account_read_entry_with_lock(pubkey, &lock) } pub fn get_account_read_entry_with_lock( &self, pubkey: &Pubkey, lock: &AccountMapsReadLock<'_, T>, ) -> Option<ReadAccountMapEntry<T>> { lock.get(pubkey) .cloned() .map(ReadAccountMapEntry::from_account_map_entry) } fn get_account_write_entry(&self, pubkey: &Pubkey) -> Option<WriteAccountMapEntry<T>> { self.account_maps .read() .unwrap() .get(pubkey) .cloned() .map(WriteAccountMapEntry::from_account_map_entry) } fn insert_new_entry_if_missing( &self, pubkey: &Pubkey, slot: Slot, info: T, w_account_maps: Option<&mut AccountMapsWriteLock<T>>, ) -> Option<(WriteAccountMapEntry<T>, T)> { let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, info); match w_account_maps { Some(w_account_maps) => { self.insert_new_entry_if_missing_with_lock(pubkey, w_account_maps, new_entry) } None => { let mut w_account_maps = self.get_account_maps_write_lock(); self.insert_new_entry_if_missing_with_lock(pubkey, &mut w_account_maps, new_entry) } } } // return None if item was created new // if entry for pubkey already existed, return Some(entry). Caller needs to call entry.update. fn insert_new_entry_if_missing_with_lock( &self, pubkey: &Pubkey, w_account_maps: &mut AccountMapsWriteLock<T>, new_entry: AccountMapEntry<T>, ) -> Option<(WriteAccountMapEntry<T>, T)> { let account_entry = w_account_maps.entry(*pubkey); match account_entry { Entry::Occupied(account_entry) => Some(( WriteAccountMapEntry::from_account_map_entry(account_entry.get().clone()), // extract the new account_info from the unused 'new_entry' new_entry.slot_list.write().unwrap().remove(0).1, )), Entry::Vacant(account_entry) => { account_entry.insert(new_entry); None } } } fn get_account_write_entry_else_create( &self, pubkey: &Pubkey, slot: Slot, info: T, ) -> Option<(WriteAccountMapEntry<T>, T)> { match self.get_account_write_entry(pubkey) { Some(w_account_entry) => Some((w_account_entry, info)), None => self.insert_new_entry_if_missing(pubkey, slot, info, None), } } pub fn handle_dead_keys( &self, dead_keys: &[&Pubkey], account_indexes: &AccountSecondaryIndexes, ) { if !dead_keys.is_empty() { for key in dead_keys.iter() { let mut w_index = self.get_account_maps_write_lock(); if let btree_map::Entry::Occupied(index_entry) = w_index.entry(**key) { if index_entry.get().slot_list.read().unwrap().is_empty() { index_entry.remove(); // Note it's only safe to remove all the entries for this key // because we have the lock for this key's entry in the AccountsIndex, // so no other thread is also updating the index self.purge_secondary_indexes_by_inner_key(key, account_indexes); } } } } } /// call func with every pubkey and index visible from a given set of ancestors pub(crate) fn scan_accounts<F>( &self, ancestors: &Ancestors, scan_bank_id: BankId, func: F, ) -> Result<(), ScanError> where F: FnMut(&Pubkey, (&T, Slot)), { // Pass "" not to log metrics, so RPC doesn't get spammy self.do_checked_scan_accounts( "", ancestors, scan_bank_id, func, ScanTypes::Unindexed(None::<Range<Pubkey>>), ) } pub(crate) fn unchecked_scan_accounts<F>( &self, metric_name: &'static str, ancestors: &Ancestors, func: F, ) where F: FnMut(&Pubkey, (&T, Slot)), { self.do_unchecked_scan_accounts(metric_name, ancestors, func, None::<Range<Pubkey>>); } /// call func with every pubkey and index visible from a given set of ancestors with range pub(crate) fn range_scan_accounts<F, R>( &self, metric_name: &'static str, ancestors: &Ancestors, range: R, func: F, ) where F: FnMut(&Pubkey, (&T, Slot)), R: RangeBounds<Pubkey>, { // Only the rent logic should be calling this, which doesn't need the safety checks self.do_unchecked_scan_accounts(metric_name, ancestors, func, Some(range)); } /// call func with every pubkey and index visible from a given set of ancestors pub(crate) fn index_scan_accounts<F>( &self, ancestors: &Ancestors, scan_bank_id: BankId, index_key: IndexKey, func: F, ) -> Result<(), ScanError> where F: FnMut(&Pubkey, (&T, Slot)), { // Pass "" not to log metrics, so RPC doesn't get spammy self.do_checked_scan_accounts( "", ancestors, scan_bank_id, func, ScanTypes::<Range<Pubkey>>::Indexed(index_key), ) } pub fn get_rooted_entries(&self, slice: SlotSlice<T>, max: Option<Slot>) -> SlotList<T> { let max = max.unwrap_or(Slot::MAX); let lock = &self.roots_tracker.read().unwrap().roots; slice .iter() .filter(|(slot, _)| *slot <= max && lock.contains(slot)) .cloned() .collect() } // returns the rooted entries and the storage ref count pub fn roots_and_ref_count( &self, locked_account_entry: &ReadAccountMapEntry<T>, max: Option<Slot>, ) -> (SlotList<T>, RefCount) { ( self.get_rooted_entries(locked_account_entry.slot_list(), max), locked_account_entry.ref_count().load(Ordering::Relaxed), ) } pub fn purge_exact<'a, C>( &'a self, pubkey: &Pubkey, slots_to_purge: &'a C, reclaims: &mut SlotList<T>, ) -> bool where C: Contains<'a, Slot>, { if let Some(mut write_account_map_entry) = self.get_account_write_entry(pubkey) { write_account_map_entry.slot_list_mut(|slot_list| { slot_list.retain(|(slot, item)| { let should_purge = slots_to_purge.contains(slot); if should_purge { reclaims.push((*slot, item.clone())); false } else { true } }); slot_list.is_empty() }) } else { true } } pub fn min_ongoing_scan_root(&self) -> Option<Slot> { self.ongoing_scan_roots .read() .unwrap() .keys() .next() .cloned() } // Given a SlotSlice `L`, a list of ancestors and a maximum slot, find the latest element // in `L`, where the slot `S` is an ancestor or root, and if `S` is a root, then `S <= max_root` fn latest_slot( &self, ancestors: Option<&Ancestors>, slice: SlotSlice<T>, max_root: Option<Slot>, ) -> Option<usize> { let mut current_max = 0; let mut rv = None; if let Some(ancestors) = ancestors { if !ancestors.is_empty() { for (i, (slot, _t)) in slice.iter().rev().enumerate() { if (rv.is_none() || *slot > current_max) && ancestors.contains_key(slot) { rv = Some(i); current_max = *slot; } } } } let max_root = max_root.unwrap_or(Slot::MAX); let mut tracker = None; for (i, (slot, _t)) in slice.iter().rev().enumerate() { if (rv.is_none() || *slot > current_max) && *slot <= max_root { let lock = match tracker { Some(inner) => inner, None => self.roots_tracker.read().unwrap(), }; if lock.roots.contains(slot) { rv = Some(i); current_max = *slot; } tracker = Some(lock); } } rv.map(|index| slice.len() - 1 - index) } /// Get an account /// The latest account that appears in `ancestors` or `roots` is returned. pub(crate) fn get( &self, pubkey: &Pubkey, ancestors: Option<&Ancestors>, max_root: Option<Slot>, ) -> AccountIndexGetResult<'_, T> { let read_lock = self.account_maps.read().unwrap(); let account = read_lock .get(pubkey) .cloned() .map(ReadAccountMapEntry::from_account_map_entry); match account { Some(locked_entry) => { drop(read_lock); let slot_list = locked_entry.slot_list(); let found_index = self.latest_slot(ancestors, slot_list, max_root); match found_index { Some(found_index) => AccountIndexGetResult::Found(locked_entry, found_index), None => AccountIndexGetResult::NotFoundOnFork, } } None => AccountIndexGetResult::Missing(read_lock), } } // Get the maximum root <= `max_allowed_root` from the given `slice` fn get_newest_root_in_slot_list( roots: &RollingBitField, slice: SlotSlice<T>, max_allowed_root: Option<Slot>, ) -> Slot { let mut max_root = 0; for (f, _) in slice.iter() { if let Some(max_allowed_root) = max_allowed_root { if *f > max_allowed_root { continue; } } if *f > max_root && roots.contains(f) { max_root = *f; } } max_root } pub(crate) fn update_secondary_indexes( &self, pubkey: &Pubkey, account_owner: &Pubkey, account_data: &[u8], account_indexes: &AccountSecondaryIndexes, ) { if account_indexes.is_empty() { return; } if account_indexes.contains(&AccountIndex::ProgramId) && account_indexes.include_key(account_owner) { self.program_id_index.insert(account_owner, pubkey); } // Note because of the below check below on the account data length, when an // account hits zero lamports and is reset to AccountSharedData::Default, then we skip // the below updates to the secondary indexes. // // Skipping means not updating secondary index to mark the account as missing. // This doesn't introduce false positives during a scan because the caller to scan // provides the ancestors to check. So even if a zero-lamport account is not yet // removed from the secondary index, the scan function will: // 1) consult the primary index via `get(&pubkey, Some(ancestors), max_root)` // and find the zero-lamport version // 2) When the fetch from storage occurs, it will return AccountSharedData::Default // (as persisted tombstone for snapshots). This will then ultimately be // filtered out by post-scan filters, like in `get_filtered_spl_token_accounts_by_owner()`. if *account_owner == inline_spl_token_v2_0::id() && account_data.len() == inline_spl_token_v2_0::state::Account::get_packed_len() { if account_indexes.contains(&AccountIndex::SplTokenOwner) { let owner_key = Pubkey::new( &account_data[SPL_TOKEN_ACCOUNT_OWNER_OFFSET ..SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES], ); if account_indexes.include_key(&owner_key) { self.spl_token_owner_index.insert(&owner_key, pubkey); } } if account_indexes.contains(&AccountIndex::SplTokenMint) { let mint_key = Pubkey::new( &account_data[SPL_TOKEN_ACCOUNT_MINT_OFFSET ..SPL_TOKEN_ACCOUNT_MINT_OFFSET + PUBKEY_BYTES], ); if account_indexes.include_key(&mint_key) { self.spl_token_mint_index.insert(&mint_key, pubkey); } } } } fn get_account_maps_write_lock(&self) -> AccountMapsWriteLock<T> { self.account_maps.write().unwrap() } pub(crate) fn get_account_maps_read_lock(&self) -> AccountMapsReadLock<T> { self.account_maps.read().unwrap() } // Same functionally to upsert, but: // 1. operates on a batch of items // 2. holds the write lock for the duration of adding the items // Can save time when inserting lots of new keys. // But, does NOT update secondary index // This is designed to be called at startup time. #[allow(clippy::needless_collect)] pub(crate) fn insert_new_if_missing_into_primary_index<'a>( &'a self, slot: Slot, item_len: usize, items: impl Iterator<Item = (&'a Pubkey, T)>, ) -> (Vec<Pubkey>, u64) { // returns (duplicate pubkey mask, insertion time us) let potentially_new_items = items .map(|(pubkey, account_info)| { // this value is equivalent to what update() below would have created if we inserted a new item ( pubkey, WriteAccountMapEntry::new_entry_after_update(slot, account_info), ) }) .collect::<Vec<_>>(); // collect here so we have created all data prior to obtaining lock let mut _reclaims = SlotList::new(); let mut duplicate_keys = Vec::with_capacity(item_len / 100); // just an estimate let mut w_account_maps = self.get_account_maps_write_lock(); let mut insert_time = Measure::start("insert_into_primary_index"); potentially_new_items .into_iter() .for_each(|(pubkey, new_item)| { let already_exists = self.insert_new_entry_if_missing_with_lock( pubkey, &mut w_account_maps, new_item, ); if let Some((mut w_account_entry, account_info)) = already_exists { w_account_entry.update(slot, account_info, &mut _reclaims); duplicate_keys.push(*pubkey); } }); insert_time.stop(); (duplicate_keys, insert_time.as_us()) } // Updates the given pubkey at the given slot with the new account information. // Returns true if the pubkey was newly inserted into the index, otherwise, if the // pubkey updates an existing entry in the index, returns false. pub fn upsert( &self, slot: Slot, pubkey: &Pubkey, account_owner: &Pubkey, account_data: &[u8], account_indexes: &AccountSecondaryIndexes, account_info: T, reclaims: &mut SlotList<T>, ) -> bool { let is_newly_inserted = { // We don't atomically update both primary index and secondary index together. // This certainly creates small time window with inconsistent state across the two indexes. // However, this is acceptable because: // // - A strict consistent view at any given moment of time is not necessary, because the only // use case for the secondary index is `scan`, and `scans` are only supported/require consistency // on frozen banks, and this inconsistency is only possible on working banks. // // - The secondary index is never consulted as primary source of truth for gets/stores. // So, what the accounts_index sees alone is sufficient as a source of truth for other non-scan // account operations. if let Some((mut w_account_entry, account_info)) = self.get_account_write_entry_else_create(pubkey, slot, account_info) { w_account_entry.update(slot, account_info, reclaims); false } else { true } }; self.update_secondary_indexes(pubkey, account_owner, account_data, account_indexes); is_newly_inserted } pub fn unref_from_storage(&self, pubkey: &Pubkey) { if let Some(locked_entry) = self.get_account_read_entry(pubkey) { locked_entry.unref(); } } pub fn ref_count_from_storage(&self, pubkey: &Pubkey) -> RefCount { if let Some(locked_entry) = self.get_account_read_entry(pubkey) { locked_entry.ref_count().load(Ordering::Relaxed) } else { 0 } } fn purge_secondary_indexes_by_inner_key<'a>( &'a self, inner_key: &Pubkey, account_indexes: &AccountSecondaryIndexes, ) { if account_indexes.contains(&AccountIndex::ProgramId) { self.program_id_index.remove_by_inner_key(inner_key); } if account_indexes.contains(&AccountIndex::SplTokenOwner) { self.spl_token_owner_index.remove_by_inner_key(inner_key); } if account_indexes.contains(&AccountIndex::SplTokenMint) { self.spl_token_mint_index.remove_by_inner_key(inner_key); } } fn purge_older_root_entries( &self, slot_list: &mut SlotList<T>, reclaims: &mut SlotList<T>, max_clean_root: Option<Slot>, ) { let roots_tracker = &self.roots_tracker.read().unwrap(); let newest_root_in_slot_list = Self::get_newest_root_in_slot_list(&roots_tracker.roots, slot_list, max_clean_root); let max_clean_root = max_clean_root.unwrap_or(roots_tracker.max_root); slot_list.retain(|(slot, value)| { let should_purge = Self::can_purge_older_entries(max_clean_root, newest_root_in_slot_list, *slot) && !value.is_cached(); if should_purge { reclaims.push((*slot, value.clone())); } !should_purge }); } pub fn clean_rooted_entries( &self, pubkey: &Pubkey, reclaims: &mut SlotList<T>, max_clean_root: Option<Slot>, ) { let mut is_slot_list_empty = false; if let Some(mut locked_entry) = self.get_account_write_entry(pubkey) { locked_entry.slot_list_mut(|slot_list| { self.purge_older_root_entries(slot_list, reclaims, max_clean_root); is_slot_list_empty = slot_list.is_empty(); }); } // If the slot list is empty, remove the pubkey from `account_maps`. Make sure to grab the // lock and double check the slot list is still empty, because another writer could have // locked and inserted the pubkey inbetween when `is_slot_list_empty=true` and the call to // remove() below. if is_slot_list_empty { let mut w_maps = self.get_account_maps_write_lock(); if let Some(x) = w_maps.get(pubkey) { if x.slot_list.read().unwrap().is_empty() { w_maps.remove(pubkey); } } } } /// When can an entry be purged? /// /// If we get a slot update where slot != newest_root_in_slot_list for an account where slot < /// max_clean_root, then we know it's safe to delete because: /// /// a) If slot < newest_root_in_slot_list, then we know the update is outdated by a later rooted /// update, namely the one in newest_root_in_slot_list /// /// b) If slot > newest_root_in_slot_list, then because slot < max_clean_root and we know there are /// no roots in the slot list between newest_root_in_slot_list and max_clean_root, (otherwise there /// would be a bigger newest_root_in_slot_list, which is a contradiction), then we know slot must be /// an unrooted slot less than max_clean_root and thus safe to clean as well. fn can_purge_older_entries( max_clean_root: Slot, newest_root_in_slot_list: Slot, slot: Slot, ) -> bool { slot < max_clean_root && slot != newest_root_in_slot_list } /// Given a list of slots, return a new list of only the slots that are rooted pub fn get_rooted_from_list<'a>(&self, slots: impl Iterator<Item = &'a Slot>) -> Vec<Slot> { let roots_tracker = self.roots_tracker.read().unwrap(); slots .filter_map(|s| { if roots_tracker.roots.contains(s) { Some(*s) } else { None } }) .collect() } pub fn is_root(&self, slot: Slot) -> bool { self.roots_tracker.read().unwrap().roots.contains(&slot) } pub fn add_root(&self, slot: Slot, caching_enabled: bool) { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.roots.insert(slot); // we delay cleaning until flushing! if !caching_enabled { w_roots_tracker.uncleaned_roots.insert(slot); } // `AccountsDb::flush_accounts_cache()` relies on roots being added in order assert!(slot >= w_roots_tracker.max_root); w_roots_tracker.max_root = slot; } pub fn add_uncleaned_roots<I>(&self, roots: I) where I: IntoIterator<Item = Slot>, { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.uncleaned_roots.extend(roots); } pub fn max_root(&self) -> Slot { self.roots_tracker.read().unwrap().max_root } /// Remove the slot when the storage for the slot is freed /// Accounts no longer reference this slot. pub fn clean_dead_slot(&self, slot: Slot) -> Option<AccountsIndexRootsStats> { let (roots_len, uncleaned_roots_len, previous_uncleaned_roots_len, roots_range) = { let mut w_roots_tracker = self.roots_tracker.write().unwrap(); let removed_from_unclean_roots = w_roots_tracker.uncleaned_roots.remove(&slot); let removed_from_previous_uncleaned_roots = w_roots_tracker.previous_uncleaned_roots.remove(&slot); if !w_roots_tracker.roots.remove(&slot) { if removed_from_unclean_roots { error!("clean_dead_slot-removed_from_unclean_roots: {}", slot); inc_new_counter_error!("clean_dead_slot-removed_from_unclean_roots", 1, 1); } if removed_from_previous_uncleaned_roots { error!( "clean_dead_slot-removed_from_previous_uncleaned_roots: {}", slot ); inc_new_counter_error!( "clean_dead_slot-removed_from_previous_uncleaned_roots", 1, 1 ); } return None; } ( w_roots_tracker.roots.len(), w_roots_tracker.uncleaned_roots.len(), w_roots_tracker.previous_uncleaned_roots.len(), w_roots_tracker.roots.range_width(), ) }; Some(AccountsIndexRootsStats { roots_len, uncleaned_roots_len, previous_uncleaned_roots_len, roots_range, rooted_cleaned_count: 0, unrooted_cleaned_count: 0, }) } pub fn min_root(&self) -> Option<Slot> { self.roots_tracker.read().unwrap().min_root() } pub fn reset_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> { let mut cleaned_roots = HashSet::new(); let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.uncleaned_roots.retain(|root| { let is_cleaned = max_clean_root .map(|max_clean_root| *root <= max_clean_root) .unwrap_or(true); if is_cleaned { cleaned_roots.insert(*root); } // Only keep the slots that have yet to be cleaned !is_cleaned }); std::mem::replace(&mut w_roots_tracker.previous_uncleaned_roots, cleaned_roots) } #[cfg(test)] pub fn clear_uncleaned_roots(&self, max_clean_root: Option<Slot>) -> HashSet<Slot> { let mut cleaned_roots = HashSet::new(); let mut w_roots_tracker = self.roots_tracker.write().unwrap(); w_roots_tracker.uncleaned_roots.retain(|root| { let is_cleaned = max_clean_root .map(|max_clean_root| *root <= max_clean_root) .unwrap_or(true); if is_cleaned { cleaned_roots.insert(*root); } // Only keep the slots that have yet to be cleaned !is_cleaned }); cleaned_roots } pub fn is_uncleaned_root(&self, slot: Slot) -> bool { self.roots_tracker .read() .unwrap() .uncleaned_roots .contains(&slot) } pub fn num_roots(&self) -> usize { self.roots_tracker.read().unwrap().roots.len() } pub fn all_roots(&self) -> Vec<Slot> { let tracker = self.roots_tracker.read().unwrap(); tracker.roots.get_all() } #[cfg(test)] pub fn clear_roots(&self) { self.roots_tracker.write().unwrap().roots.clear() } #[cfg(test)] pub fn uncleaned_roots_len(&self) -> usize { self.roots_tracker.read().unwrap().uncleaned_roots.len() } #[cfg(test)] // filter any rooted entries and return them along with a bool that indicates // if this account has no more entries. Note this does not update the secondary // indexes! pub fn purge_roots(&self, pubkey: &Pubkey) -> (SlotList<T>, bool) { let mut write_account_map_entry = self.get_account_write_entry(pubkey).unwrap(); write_account_map_entry.slot_list_mut(|slot_list| { let reclaims = self.get_rooted_entries(slot_list, None); slot_list.retain(|(slot, _)| !self.is_root(*slot)); (reclaims, slot_list.is_empty()) }) } } #[cfg(test)] pub mod tests { use super::*; use solana_sdk::signature::{Keypair, Signer}; pub enum SecondaryIndexTypes<'a> { RwLock(&'a SecondaryIndex<RwLockSecondaryIndexEntry>), DashMap(&'a SecondaryIndex<DashMapSecondaryIndexEntry>), } pub fn spl_token_mint_index_enabled() -> AccountSecondaryIndexes { let mut account_indexes = HashSet::new(); account_indexes.insert(AccountIndex::SplTokenMint); AccountSecondaryIndexes { indexes: account_indexes, keys: None, } } pub fn spl_token_owner_index_enabled() -> AccountSecondaryIndexes { let mut account_indexes = HashSet::new(); account_indexes.insert(AccountIndex::SplTokenOwner); AccountSecondaryIndexes { indexes: account_indexes, keys: None, } } impl<'a, T: 'static> AccountIndexGetResult<'a, T> { pub fn unwrap(self) -> (ReadAccountMapEntry<T>, usize) { match self { AccountIndexGetResult::Found(lock, size) => (lock, size), _ => { panic!("trying to unwrap AccountIndexGetResult with non-Success result"); } } } pub fn is_none(&self) -> bool { !self.is_some() } pub fn is_some(&self) -> bool { matches!(self, AccountIndexGetResult::Found(_lock, _size)) } pub fn map<V, F: FnOnce((ReadAccountMapEntry<T>, usize)) -> V>(self, f: F) -> Option<V> { match self { AccountIndexGetResult::Found(lock, size) => Some(f((lock, size))), _ => None, } } } fn create_dashmap_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) { { // Check that we're actually testing the correct variant let index = AccountsIndex::<bool>::default(); let _type_check = SecondaryIndexTypes::DashMap(&index.spl_token_mint_index); } (0, PUBKEY_BYTES, spl_token_mint_index_enabled()) } fn create_rwlock_secondary_index_state() -> (usize, usize, AccountSecondaryIndexes) { { // Check that we're actually testing the correct variant let index = AccountsIndex::<bool>::default(); let _type_check = SecondaryIndexTypes::RwLock(&index.spl_token_owner_index); } ( SPL_TOKEN_ACCOUNT_OWNER_OFFSET, SPL_TOKEN_ACCOUNT_OWNER_OFFSET + PUBKEY_BYTES, spl_token_owner_index_enabled(), ) } #[test] fn test_bitfield_delete_non_excess() { solana_logger::setup(); let len = 16; let mut bitfield = RollingBitField::new(len); assert_eq!(bitfield.min(), None); bitfield.insert(0); assert_eq!(bitfield.min(), Some(0)); let too_big = len + 1; bitfield.insert(too_big); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.min(), Some(0)); assert_eq!(bitfield.max, too_big + 1); // delete the thing that is NOT in excess bitfield.remove(&too_big); assert_eq!(bitfield.min, too_big + 1); assert_eq!(bitfield.max, too_big + 1); let too_big_times_2 = too_big * 2; bitfield.insert(too_big_times_2); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big_times_2)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.min(), bitfield.excess.iter().min().copied()); assert_eq!(bitfield.min, too_big_times_2); assert_eq!(bitfield.max, too_big_times_2 + 1); bitfield.remove(&0); bitfield.remove(&too_big_times_2); assert!(bitfield.is_empty()); let other = 5; bitfield.insert(other); assert!(bitfield.contains(&other)); assert!(bitfield.excess.is_empty()); assert_eq!(bitfield.min, other); assert_eq!(bitfield.max, other + 1); } #[test] fn test_bitfield_insert_excess() { solana_logger::setup(); let len = 16; let mut bitfield = RollingBitField::new(len); bitfield.insert(0); let too_big = len + 1; bitfield.insert(too_big); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert!(bitfield.excess.contains(&0)); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.max, too_big + 1); // delete the thing that IS in excess // this does NOT affect min/max bitfield.remove(&0); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.max, too_big + 1); // re-add to excess bitfield.insert(0); assert!(bitfield.contains(&0)); assert!(bitfield.contains(&too_big)); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.min, too_big); assert_eq!(bitfield.max, too_big + 1); } #[test] fn test_bitfield_permutations() { solana_logger::setup(); let mut bitfield = RollingBitField::new(2097152); let mut hash = HashSet::new(); let min = 101_000; let width = 400_000; let dead = 19; let mut slot = min; while hash.len() < width { slot += 1; if slot % dead == 0 { continue; } hash.insert(slot); bitfield.insert(slot); } compare(&hash, &bitfield); let max = slot + 1; let mut time = Measure::start(""); let mut count = 0; for slot in (min - 10)..max + 100 { if hash.contains(&slot) { count += 1; } } time.stop(); let mut time2 = Measure::start(""); let mut count2 = 0; for slot in (min - 10)..max + 100 { if bitfield.contains(&slot) { count2 += 1; } } time2.stop(); info!( "{}ms, {}ms, {} ratio", time.as_ms(), time2.as_ms(), time.as_ns() / time2.as_ns() ); assert_eq!(count, count2); } #[test] #[should_panic(expected = "assertion failed: max_width.is_power_of_two()")] fn test_bitfield_power_2() { let _ = RollingBitField::new(3); } #[test] #[should_panic(expected = "assertion failed: max_width > 0")] fn test_bitfield_0() { let _ = RollingBitField::new(0); } fn setup_empty(width: u64) -> RollingBitFieldTester { let bitfield = RollingBitField::new(width); let hash_set = HashSet::new(); RollingBitFieldTester { bitfield, hash_set } } struct RollingBitFieldTester { pub bitfield: RollingBitField, pub hash_set: HashSet<u64>, } impl RollingBitFieldTester { fn insert(&mut self, slot: u64) { self.bitfield.insert(slot); self.hash_set.insert(slot); assert!(self.bitfield.contains(&slot)); compare(&self.hash_set, &self.bitfield); } fn remove(&mut self, slot: &u64) -> bool { let result = self.bitfield.remove(slot); assert_eq!(result, self.hash_set.remove(slot)); assert!(!self.bitfield.contains(slot)); self.compare(); result } fn compare(&self) { compare(&self.hash_set, &self.bitfield); } } fn setup_wide(width: u64, start: u64) -> RollingBitFieldTester { let mut tester = setup_empty(width); tester.compare(); tester.insert(start); tester.insert(start + 1); tester } #[test] fn test_bitfield_insert_wide() { solana_logger::setup(); let width = 16; let start = 0; let mut tester = setup_wide(width, start); let slot = start + width; let all = tester.bitfield.get_all(); // higher than max range by 1 tester.insert(slot); let bitfield = tester.bitfield; for slot in all { assert!(bitfield.contains(&slot)); } assert_eq!(bitfield.excess.len(), 1); assert_eq!(bitfield.count, 3); } #[test] fn test_bitfield_insert_wide_before() { solana_logger::setup(); let width = 16; let start = 100; let mut bitfield = setup_wide(width, start).bitfield; let slot = start + 1 - width; // assert here - would make min too low, causing too wide of a range bitfield.insert(slot); assert_eq!(1, bitfield.excess.len()); assert_eq!(3, bitfield.count); assert!(bitfield.contains(&slot)); } #[test] fn test_bitfield_insert_wide_before_ok() { solana_logger::setup(); let width = 16; let start = 100; let mut bitfield = setup_wide(width, start).bitfield; let slot = start + 2 - width; // this item would make our width exactly equal to what is allowed, but it is also inserting prior to min bitfield.insert(slot); assert_eq!(1, bitfield.excess.len()); assert!(bitfield.contains(&slot)); assert_eq!(3, bitfield.count); } #[test] fn test_bitfield_contains_wide_no_assert() { { let width = 16; let start = 0; let bitfield = setup_wide(width, start).bitfield; let mut slot = width; assert!(!bitfield.contains(&slot)); slot += 1; assert!(!bitfield.contains(&slot)); } { let width = 16; let start = 100; let bitfield = setup_wide(width, start).bitfield; // too large let mut slot = width; assert!(!bitfield.contains(&slot)); slot += 1; assert!(!bitfield.contains(&slot)); // too small, before min slot = 0; assert!(!bitfield.contains(&slot)); } } #[test] fn test_bitfield_remove_wide() { let width = 16; let start = 0; let mut tester = setup_wide(width, start); let slot = width; assert!(!tester.remove(&slot)); } #[test] fn test_bitfield_excess2() { solana_logger::setup(); let width = 16; let mut tester = setup_empty(width); let slot = 100; // insert 1st slot tester.insert(slot); assert!(tester.bitfield.excess.is_empty()); // insert a slot before the previous one. this is 'excess' since we don't use this pattern in normal operation let slot2 = slot - 1; tester.insert(slot2); assert_eq!(tester.bitfield.excess.len(), 1); // remove the 1st slot. we will be left with only excess tester.remove(&slot); assert!(tester.bitfield.contains(&slot2)); assert_eq!(tester.bitfield.excess.len(), 1); // re-insert at valid range, making sure we don't insert into excess tester.insert(slot); assert_eq!(tester.bitfield.excess.len(), 1); // remove the excess slot. tester.remove(&slot2); assert!(tester.bitfield.contains(&slot)); assert!(tester.bitfield.excess.is_empty()); // re-insert the excess slot tester.insert(slot2); assert_eq!(tester.bitfield.excess.len(), 1); } #[test] fn test_bitfield_excess() { solana_logger::setup(); // start at slot 0 or a separate, higher slot for width in [16, 4194304].iter() { let width = *width; let mut tester = setup_empty(width); for start in [0, width * 5].iter().cloned() { // recreate means create empty bitfield with each iteration, otherwise re-use for recreate in [false, true].iter().cloned() { let max = start + 3; // first root to add for slot in start..max { // subsequent roots to add for slot2 in (slot + 1)..max { // reverse_slots = 1 means add slots in reverse order (max to min). This causes us to add second and later slots to excess. for reverse_slots in [false, true].iter().cloned() { let maybe_reverse = |slot| { if reverse_slots { max - slot } else { slot } }; if recreate { let recreated = setup_empty(width); tester = recreated; } // insert for slot in slot..=slot2 { let slot_use = maybe_reverse(slot); tester.insert(slot_use); debug!( "slot: {}, bitfield: {:?}, reverse: {}, len: {}, excess: {:?}", slot_use, tester.bitfield, reverse_slots, tester.bitfield.len(), tester.bitfield.excess ); assert!( (reverse_slots && tester.bitfield.len() > 1) ^ tester.bitfield.excess.is_empty() ); } if start > width * 2 { assert!(!tester.bitfield.contains(&(start - width * 2))); } assert!(!tester.bitfield.contains(&(start + width * 2))); let len = (slot2 - slot + 1) as usize; assert_eq!(tester.bitfield.len(), len); assert_eq!(tester.bitfield.count, len); // remove for slot in slot..=slot2 { let slot_use = maybe_reverse(slot); assert!(tester.remove(&slot_use)); assert!( (reverse_slots && !tester.bitfield.is_empty()) ^ tester.bitfield.excess.is_empty() ); } assert!(tester.bitfield.is_empty()); assert_eq!(tester.bitfield.count, 0); if start > width * 2 { assert!(!tester.bitfield.contains(&(start - width * 2))); } assert!(!tester.bitfield.contains(&(start + width * 2))); } } } } } } } #[test] fn test_bitfield_remove_wide_before() { let width = 16; let start = 100; let mut tester = setup_wide(width, start); let slot = start + 1 - width; assert!(!tester.remove(&slot)); } fn compare_internal(hashset: &HashSet<u64>, bitfield: &RollingBitField) { assert_eq!(hashset.len(), bitfield.len()); assert_eq!(hashset.is_empty(), bitfield.is_empty()); if !bitfield.is_empty() { let mut min = Slot::MAX; let mut overall_min = Slot::MAX; let mut max = Slot::MIN; for item in bitfield.get_all() { assert!(hashset.contains(&item)); if !bitfield.excess.contains(&item) { min = std::cmp::min(min, item); max = std::cmp::max(max, item); } overall_min = std::cmp::min(overall_min, item); } assert_eq!(bitfield.min(), Some(overall_min)); assert_eq!(bitfield.get_all().len(), hashset.len()); // range isn't tracked for excess items if bitfield.excess.len() != bitfield.len() { let width = if bitfield.is_empty() { 0 } else { max + 1 - min }; assert!( bitfield.range_width() >= width, "hashset: {:?}, bitfield: {:?}, bitfield.range_width: {}, width: {}", hashset, bitfield.get_all(), bitfield.range_width(), width, ); } } else { assert_eq!(bitfield.min(), None); } } fn compare(hashset: &HashSet<u64>, bitfield: &RollingBitField) { compare_internal(hashset, bitfield); let clone = bitfield.clone(); compare_internal(hashset, &clone); assert!(clone.eq(bitfield)); assert_eq!(clone, *bitfield); } #[test] fn test_bitfield_functionality() { solana_logger::setup(); // bitfield sizes are powers of 2, cycle through values of 1, 2, 4, .. 2^9 for power in 0..10 { let max_bitfield_width = 2u64.pow(power) as u64; let width_iteration_max = if max_bitfield_width > 1 { // add up to 2 items so we can test out multiple items 3 } else { // 0 or 1 items is all we can fit with a width of 1 item 2 }; for width in 0..width_iteration_max { let mut tester = setup_empty(max_bitfield_width); let min = 101_000; let dead = 19; let mut slot = min; while tester.hash_set.len() < width { slot += 1; if max_bitfield_width > 2 && slot % dead == 0 { // with max_bitfield_width of 1 and 2, there is no room for dead slots continue; } tester.insert(slot); } let max = slot + 1; for slot in (min - 10)..max + 100 { assert_eq!( tester.bitfield.contains(&slot), tester.hash_set.contains(&slot) ); } if width > 0 { assert!(tester.remove(&slot)); assert!(!tester.remove(&slot)); } let all = tester.bitfield.get_all(); // remove the rest, including a call that removes slot again for item in all.iter() { assert!(tester.remove(item)); assert!(!tester.remove(item)); } let min = max + ((width * 2) as u64) + 3; let slot = min; // several widths past previous min let max = slot + 1; tester.insert(slot); for slot in (min - 10)..max + 100 { assert_eq!( tester.bitfield.contains(&slot), tester.hash_set.contains(&slot) ); } } } } fn bitfield_insert_and_test(bitfield: &mut RollingBitField, slot: Slot) { let len = bitfield.len(); let old_all = bitfield.get_all(); let (new_min, new_max) = if bitfield.is_empty() { (slot, slot + 1) } else { ( std::cmp::min(bitfield.min, slot), std::cmp::max(bitfield.max, slot + 1), ) }; bitfield.insert(slot); assert_eq!(bitfield.min, new_min); assert_eq!(bitfield.max, new_max); assert_eq!(bitfield.len(), len + 1); assert!(!bitfield.is_empty()); assert!(bitfield.contains(&slot)); // verify aliasing is what we expect assert!(bitfield.contains_assume_in_range(&(slot + bitfield.max_width))); let get_all = bitfield.get_all(); old_all .into_iter() .for_each(|slot| assert!(get_all.contains(&slot))); assert!(get_all.contains(&slot)); assert!(get_all.len() == len + 1); } #[test] fn test_bitfield_clear() { let mut bitfield = RollingBitField::new(4); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); bitfield_insert_and_test(&mut bitfield, 0); bitfield.clear(); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 1); bitfield.clear(); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 4); } #[test] fn test_bitfield_wrapping() { let mut bitfield = RollingBitField::new(4); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); bitfield_insert_and_test(&mut bitfield, 0); assert_eq!(bitfield.get_all(), vec![0]); bitfield_insert_and_test(&mut bitfield, 2); assert_eq!(bitfield.get_all(), vec![0, 2]); bitfield_insert_and_test(&mut bitfield, 3); bitfield.insert(3); // redundant insert assert_eq!(bitfield.get_all(), vec![0, 2, 3]); assert!(bitfield.remove(&0)); assert!(!bitfield.remove(&0)); assert_eq!(bitfield.min, 2); assert_eq!(bitfield.max, 4); assert_eq!(bitfield.len(), 2); assert!(!bitfield.remove(&0)); // redundant remove assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.get_all(), vec![2, 3]); bitfield.insert(4); // wrapped around value - same bit as '0' assert_eq!(bitfield.min, 2); assert_eq!(bitfield.max, 5); assert_eq!(bitfield.len(), 3); assert_eq!(bitfield.get_all(), vec![2, 3, 4]); assert!(bitfield.remove(&2)); assert_eq!(bitfield.min, 3); assert_eq!(bitfield.max, 5); assert_eq!(bitfield.len(), 2); assert_eq!(bitfield.get_all(), vec![3, 4]); assert!(bitfield.remove(&3)); assert_eq!(bitfield.min, 4); assert_eq!(bitfield.max, 5); assert_eq!(bitfield.len(), 1); assert_eq!(bitfield.get_all(), vec![4]); assert!(bitfield.remove(&4)); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 8); assert!(bitfield.remove(&8)); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); bitfield_insert_and_test(&mut bitfield, 9); assert!(bitfield.remove(&9)); assert_eq!(bitfield.len(), 0); assert!(bitfield.is_empty()); assert!(bitfield.get_all().is_empty()); } #[test] fn test_bitfield_smaller() { // smaller bitfield, fewer entries, including 0 solana_logger::setup(); for width in 0..34 { let mut bitfield = RollingBitField::new(4096); let mut hash_set = HashSet::new(); let min = 1_010_000; let dead = 19; let mut slot = min; while hash_set.len() < width { slot += 1; if slot % dead == 0 { continue; } hash_set.insert(slot); bitfield.insert(slot); } let max = slot + 1; let mut time = Measure::start(""); let mut count = 0; for slot in (min - 10)..max + 100 { if hash_set.contains(&slot) { count += 1; } } time.stop(); let mut time2 = Measure::start(""); let mut count2 = 0; for slot in (min - 10)..max + 100 { if bitfield.contains(&slot) { count2 += 1; } } time2.stop(); info!( "{}, {}, {}", time.as_ms(), time2.as_ms(), time.as_ns() / time2.as_ns() ); assert_eq!(count, count2); } } #[test] fn test_get_empty() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let ancestors = Ancestors::default(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); assert!(index.get(&key.pubkey(), None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); } #[test] fn test_secondary_index_include_exclude() { let pk1 = Pubkey::new_unique(); let pk2 = Pubkey::new_unique(); let mut index = AccountSecondaryIndexes::default(); assert!(!index.contains(&AccountIndex::ProgramId)); index.indexes.insert(AccountIndex::ProgramId); assert!(index.contains(&AccountIndex::ProgramId)); assert!(index.include_key(&pk1)); assert!(index.include_key(&pk2)); let exclude = false; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(index.include_key(&pk1)); assert!(!index.include_key(&pk2)); let exclude = true; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(!index.include_key(&pk1)); assert!(index.include_key(&pk2)); let exclude = true; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1, pk2].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(!index.include_key(&pk1)); assert!(!index.include_key(&pk2)); let exclude = false; index.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [pk1, pk2].iter().cloned().collect::<HashSet<_>>(), exclude, }); assert!(index.include_key(&pk1)); assert!(index.include_key(&pk2)); } #[test] fn test_insert_no_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let ancestors = Ancestors::default(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); assert!(index.get(&key.pubkey(), None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); } type AccountInfoTest = f64; impl IsCached for AccountInfoTest { fn is_cached(&self) -> bool { true } } impl ZeroLamport for AccountInfoTest { fn is_zero_lamport(&self) -> bool { true } } #[test] fn test_insert_new_with_lock_no_ancestors() { let key = Keypair::new(); let pubkey = &key.pubkey(); let slot = 0; let index = AccountsIndex::<bool>::default(); let account_info = true; let items = vec![(pubkey, account_info)]; index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); let mut ancestors = Ancestors::default(); assert!(index.get(pubkey, Some(&ancestors), None).is_none()); assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.get(pubkey, Some(&ancestors), None).is_some()); assert_eq!(index.ref_count_from_storage(pubkey), 1); index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); // not zero lamports let index = AccountsIndex::<AccountInfoTest>::default(); let account_info: AccountInfoTest = 0 as AccountInfoTest; let items = vec![(pubkey, account_info)]; index.insert_new_if_missing_into_primary_index(slot, items.len(), items.into_iter()); let mut ancestors = Ancestors::default(); assert!(index.get(pubkey, Some(&ancestors), None).is_none()); assert!(index.get(pubkey, None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.get(pubkey, Some(&ancestors), None).is_some()); assert_eq!(index.ref_count_from_storage(pubkey), 0); // cached, so 0 index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); } #[test] fn test_new_entry() { let slot = 0; // account_info type that IS cached let account_info = AccountInfoTest::default(); let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, account_info); assert_eq!(new_entry.ref_count.load(Ordering::Relaxed), 0); assert_eq!(new_entry.slot_list.read().unwrap().capacity(), 1); assert_eq!( new_entry.slot_list.read().unwrap().to_vec(), vec![(slot, account_info)] ); // account_info type that is NOT cached let account_info = true; let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, account_info); assert_eq!(new_entry.ref_count.load(Ordering::Relaxed), 1); assert_eq!(new_entry.slot_list.read().unwrap().capacity(), 1); assert_eq!( new_entry.slot_list.read().unwrap().to_vec(), vec![(slot, account_info)] ); } #[test] fn test_batch_insert() { let slot0 = 0; let key0 = Keypair::new().pubkey(); let key1 = Keypair::new().pubkey(); let index = AccountsIndex::<bool>::default(); let account_infos = [true, false]; let items = vec![(&key0, account_infos[0]), (&key1, account_infos[1])]; index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter()); for (i, key) in [key0, key1].iter().enumerate() { let entry = index.get_account_read_entry(key).unwrap(); assert_eq!(entry.ref_count().load(Ordering::Relaxed), 1); assert_eq!(entry.slot_list().to_vec(), vec![(slot0, account_infos[i]),]); } } fn test_new_entry_code_paths_helper< T: 'static + Clone + IsCached + ZeroLamport + std::cmp::PartialEq + std::fmt::Debug, >( account_infos: [T; 2], is_cached: bool, upsert: bool, ) { let slot0 = 0; let slot1 = 1; let key = Keypair::new().pubkey(); let index = AccountsIndex::<T>::default(); let mut gc = Vec::new(); if upsert { // insert first entry for pubkey. This will use new_entry_after_update and not call update. index.upsert( slot0, &key, &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), account_infos[0].clone(), &mut gc, ); } else { let items = vec![(&key, account_infos[0].clone())]; index.insert_new_if_missing_into_primary_index(slot0, items.len(), items.into_iter()); } assert!(gc.is_empty()); // verify the added entry matches expected { let entry = index.get_account_read_entry(&key).unwrap(); assert_eq!( entry.ref_count().load(Ordering::Relaxed), if is_cached { 0 } else { 1 } ); let expected = vec![(slot0, account_infos[0].clone())]; assert_eq!(entry.slot_list().to_vec(), expected); let new_entry = WriteAccountMapEntry::new_entry_after_update(slot0, account_infos[0].clone()); assert_eq!( entry.slot_list().to_vec(), new_entry.slot_list.read().unwrap().to_vec(), ); } // insert second entry for pubkey. This will use update and NOT use new_entry_after_update. if upsert { index.upsert( slot1, &key, &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), account_infos[1].clone(), &mut gc, ); } else { let items = vec![(&key, account_infos[1].clone())]; index.insert_new_if_missing_into_primary_index(slot1, items.len(), items.into_iter()); } assert!(gc.is_empty()); for lock in &[false, true] { let read_lock = if *lock { Some(index.get_account_maps_read_lock()) } else { None }; let entry = if *lock { index .get_account_read_entry_with_lock(&key, read_lock.as_ref().unwrap()) .unwrap() } else { index.get_account_read_entry(&key).unwrap() }; assert_eq!( entry.ref_count().load(Ordering::Relaxed), if is_cached { 0 } else { 2 } ); assert_eq!( entry.slot_list().to_vec(), vec![ (slot0, account_infos[0].clone()), (slot1, account_infos[1].clone()) ] ); let new_entry = WriteAccountMapEntry::new_entry_after_update(slot1, account_infos[1].clone()); assert_eq!(entry.slot_list()[1], new_entry.slot_list.read().unwrap()[0],); } } #[test] fn test_new_entry_and_update_code_paths() { for is_upsert in &[false, true] { // account_info type that IS cached test_new_entry_code_paths_helper([1.0, 2.0], true, *is_upsert); // account_info type that is NOT cached test_new_entry_code_paths_helper([true, false], false, *is_upsert); } } #[test] fn test_insert_with_lock_no_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let slot = 0; let account_info = true; let new_entry = WriteAccountMapEntry::new_entry_after_update(slot, account_info); let mut w_account_maps = index.get_account_maps_write_lock(); let write = index.insert_new_entry_if_missing_with_lock( &key.pubkey(), &mut w_account_maps, new_entry, ); assert!(write.is_none()); drop(w_account_maps); let mut ancestors = Ancestors::default(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); assert!(index.get(&key.pubkey(), None, None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); ancestors.insert(slot, 0); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_some()); index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 1); } #[test] fn test_insert_wrong_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let ancestors = vec![(1, 1)].into_iter().collect(); assert!(index.get(&key.pubkey(), Some(&ancestors), None).is_none()); let mut num = 0; index.unchecked_scan_accounts("", &ancestors, |_pubkey, _index| num += 1); assert_eq!(num, 0); } #[test] fn test_insert_with_ancestors() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let ancestors = vec![(0, 0)].into_iter().collect(); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); let mut num = 0; let mut found_key = false; index.unchecked_scan_accounts("", &ancestors, |pubkey, _index| { if pubkey == &key.pubkey() { found_key = true }; num += 1 }); assert_eq!(num, 1); assert!(found_key); } fn setup_accounts_index_keys(num_pubkeys: usize) -> (AccountsIndex<bool>, Vec<Pubkey>) { let index = AccountsIndex::<bool>::default(); let root_slot = 0; let mut pubkeys: Vec<Pubkey> = std::iter::repeat_with(|| { let new_pubkey = solana_sdk::pubkey::new_rand(); index.upsert( root_slot, &new_pubkey, &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut vec![], ); new_pubkey }) .take(num_pubkeys.saturating_sub(1)) .collect(); if num_pubkeys != 0 { pubkeys.push(Pubkey::default()); index.upsert( root_slot, &Pubkey::default(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut vec![], ); } index.add_root(root_slot, false); (index, pubkeys) } fn run_test_range( index: &AccountsIndex<bool>, pubkeys: &[Pubkey], start_bound: Bound<usize>, end_bound: Bound<usize>, ) { // Exclusive `index_start` let (pubkey_start, index_start) = match start_bound { Unbounded => (Unbounded, 0), Included(i) => (Included(pubkeys[i]), i), Excluded(i) => (Excluded(pubkeys[i]), i + 1), }; // Exclusive `index_end` let (pubkey_end, index_end) = match end_bound { Unbounded => (Unbounded, pubkeys.len()), Included(i) => (Included(pubkeys[i]), i + 1), Excluded(i) => (Excluded(pubkeys[i]), i), }; let pubkey_range = (pubkey_start, pubkey_end); let ancestors = Ancestors::default(); let mut scanned_keys = HashSet::new(); index.range_scan_accounts("", &ancestors, pubkey_range, |pubkey, _index| { scanned_keys.insert(*pubkey); }); let mut expected_len = 0; for key in &pubkeys[index_start..index_end] { expected_len += 1; assert!(scanned_keys.contains(key)); } assert_eq!(scanned_keys.len(), expected_len); } fn run_test_range_indexes( index: &AccountsIndex<bool>, pubkeys: &[Pubkey], start: Option<usize>, end: Option<usize>, ) { let start_options = start .map(|i| vec![Included(i), Excluded(i)]) .unwrap_or_else(|| vec![Unbounded]); let end_options = end .map(|i| vec![Included(i), Excluded(i)]) .unwrap_or_else(|| vec![Unbounded]); for start in &start_options { for end in &end_options { run_test_range(index, pubkeys, *start, *end); } } } #[test] fn test_range_scan_accounts() { let (index, mut pubkeys) = setup_accounts_index_keys(3 * ITER_BATCH_SIZE); pubkeys.sort(); run_test_range_indexes(&index, &pubkeys, None, None); run_test_range_indexes(&index, &pubkeys, Some(ITER_BATCH_SIZE), None); run_test_range_indexes(&index, &pubkeys, None, Some(2 * ITER_BATCH_SIZE as usize)); run_test_range_indexes( &index, &pubkeys, Some(ITER_BATCH_SIZE as usize), Some(2 * ITER_BATCH_SIZE as usize), ); run_test_range_indexes( &index, &pubkeys, Some(ITER_BATCH_SIZE as usize), Some(2 * ITER_BATCH_SIZE as usize - 1), ); run_test_range_indexes( &index, &pubkeys, Some(ITER_BATCH_SIZE - 1_usize), Some(2 * ITER_BATCH_SIZE as usize + 1), ); } fn run_test_scan_accounts(num_pubkeys: usize) { let (index, _) = setup_accounts_index_keys(num_pubkeys); let ancestors = Ancestors::default(); let mut scanned_keys = HashSet::new(); index.unchecked_scan_accounts("", &ancestors, |pubkey, _index| { scanned_keys.insert(*pubkey); }); assert_eq!(scanned_keys.len(), num_pubkeys); } #[test] fn test_scan_accounts() { run_test_scan_accounts(0); run_test_scan_accounts(1); run_test_scan_accounts(ITER_BATCH_SIZE * 10); run_test_scan_accounts(ITER_BATCH_SIZE * 10 - 1); run_test_scan_accounts(ITER_BATCH_SIZE * 10 + 1); } #[test] fn test_accounts_iter_finished() { let (index, _) = setup_accounts_index_keys(0); let mut iter = index.iter(None::<Range<Pubkey>>); assert!(iter.next().is_none()); let mut gc = vec![]; index.upsert( 0, &solana_sdk::pubkey::new_rand(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(iter.next().is_none()); } #[test] fn test_is_root() { let index = AccountsIndex::<bool>::default(); assert!(!index.is_root(0)); index.add_root(0, false); assert!(index.is_root(0)); } #[test] fn test_insert_with_root() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); index.add_root(0, false); let (list, idx) = index.get(&key.pubkey(), None, None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); } #[test] fn test_clean_first() { let index = AccountsIndex::<bool>::default(); index.add_root(0, false); index.add_root(1, false); index.clean_dead_slot(0); assert!(index.is_root(1)); assert!(!index.is_root(0)); } #[test] fn test_clean_last() { //this behavior might be undefined, clean up should only occur on older slots let index = AccountsIndex::<bool>::default(); index.add_root(0, false); index.add_root(1, false); index.clean_dead_slot(1); assert!(!index.is_root(1)); assert!(index.is_root(0)); } #[test] fn test_clean_and_unclean_slot() { let index = AccountsIndex::<bool>::default(); assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len()); index.add_root(0, false); index.add_root(1, false); assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 0, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.reset_uncleaned_roots(None); assert_eq!(2, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(0, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 2, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.add_root(2, false); index.add_root(3, false); assert_eq!(4, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 2, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.clean_dead_slot(1); assert_eq!(3, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(2, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 1, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); index.clean_dead_slot(2); assert_eq!(2, index.roots_tracker.read().unwrap().roots.len()); assert_eq!(1, index.roots_tracker.read().unwrap().uncleaned_roots.len()); assert_eq!( 1, index .roots_tracker .read() .unwrap() .previous_uncleaned_roots .len() ); } #[test] fn test_update_last_wins() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); drop(list); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), false, &mut gc, ); assert_eq!(gc, vec![(0, true)]); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, false)); } #[test] fn test_update_new_slot() { solana_logger::setup(); let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let ancestors = vec![(0, 0)].into_iter().collect(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), false, &mut gc, ); assert!(gc.is_empty()); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (0, true)); let ancestors = vec![(1, 0)].into_iter().collect(); let (list, idx) = index.get(&key.pubkey(), Some(&ancestors), None).unwrap(); assert_eq!(list.slot_list()[idx], (1, false)); } #[test] fn test_update_gc_purged_slot() { let key = Keypair::new(); let index = AccountsIndex::<bool>::default(); let mut gc = Vec::new(); index.upsert( 0, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); assert!(gc.is_empty()); index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), false, &mut gc, ); index.upsert( 2, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); index.upsert( 3, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); index.add_root(0, false); index.add_root(1, false); index.add_root(3, false); index.upsert( 4, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), true, &mut gc, ); // Updating index should not purge older roots, only purges // previous updates within the same slot assert_eq!(gc, vec![]); let (list, idx) = index.get(&key.pubkey(), None, None).unwrap(); assert_eq!(list.slot_list()[idx], (3, true)); let mut num = 0; let mut found_key = false; index.unchecked_scan_accounts("", &Ancestors::default(), |pubkey, _index| { if pubkey == &key.pubkey() { found_key = true; assert_eq!(_index, (&true, 3)); }; num += 1 }); assert_eq!(num, 1); assert!(found_key); } #[test] fn test_purge() { let key = Keypair::new(); let index = AccountsIndex::<u64>::default(); let mut gc = Vec::new(); assert!(index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), 12, &mut gc )); assert!(!index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), 10, &mut gc )); let purges = index.purge_roots(&key.pubkey()); assert_eq!(purges, (vec![], false)); index.add_root(1, false); let purges = index.purge_roots(&key.pubkey()); assert_eq!(purges, (vec![(1, 10)], true)); assert!(!index.upsert( 1, &key.pubkey(), &Pubkey::default(), &[], &AccountSecondaryIndexes::default(), 9, &mut gc )); } #[test] fn test_latest_slot() { let slot_slice = vec![(0, true), (5, true), (3, true), (7, true)]; let index = AccountsIndex::<bool>::default(); // No ancestors, no root, should return None assert!(index.latest_slot(None, &slot_slice, None).is_none()); // Given a root, should return the root index.add_root(5, false); assert_eq!(index.latest_slot(None, &slot_slice, None).unwrap(), 1); // Given a max_root == root, should still return the root assert_eq!(index.latest_slot(None, &slot_slice, Some(5)).unwrap(), 1); // Given a max_root < root, should filter out the root assert!(index.latest_slot(None, &slot_slice, Some(4)).is_none()); // Given a max_root, should filter out roots < max_root, but specified // ancestors should not be affected let ancestors = vec![(3, 1), (7, 1)].into_iter().collect(); assert_eq!( index .latest_slot(Some(&ancestors), &slot_slice, Some(4)) .unwrap(), 3 ); assert_eq!( index .latest_slot(Some(&ancestors), &slot_slice, Some(7)) .unwrap(), 3 ); // Given no max_root, should just return the greatest ancestor or root assert_eq!( index .latest_slot(Some(&ancestors), &slot_slice, None) .unwrap(), 3 ); } fn run_test_purge_exact_secondary_index< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( index: &AccountsIndex<bool>, secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, key_start: usize, key_end: usize, secondary_indexes: &AccountSecondaryIndexes, ) { // No roots, should be no reclaims let slots = vec![1, 2, 5, 9]; let index_key = Pubkey::new_unique(); let account_key = Pubkey::new_unique(); let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Insert slots into secondary index for slot in &slots { index.upsert( *slot, &account_key, // Make sure these accounts are added to secondary index &inline_spl_token_v2_0::id(), &account_data, secondary_indexes, true, &mut vec![], ); } // Only one top level index entry exists assert_eq!(secondary_index.index.get(&index_key).unwrap().len(), 1); // In the reverse index, one account maps across multiple slots // to the same top level key assert_eq!( secondary_index .reverse_index .get(&account_key) .unwrap() .value() .read() .unwrap() .len(), 1 ); index.purge_exact( &account_key, &slots.into_iter().collect::<HashSet<Slot>>(), &mut vec![], ); index.handle_dead_keys(&[&account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_purge_exact_dashmap_secondary_index() { let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_purge_exact_secondary_index( &index, &index.spl_token_mint_index, key_start, key_end, &secondary_indexes, ); } #[test] fn test_purge_exact_rwlock_secondary_index() { let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_purge_exact_secondary_index( &index, &index.spl_token_owner_index, key_start, key_end, &secondary_indexes, ); } #[test] fn test_purge_older_root_entries() { // No roots, should be no reclaims let index = AccountsIndex::<bool>::default(); let mut slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; let mut reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); // Add a later root, earlier slots should be reclaimed slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; index.add_root(1, false); // Note 2 is not a root index.add_root(5, false); reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Add a later root that is not in the list, should not affect the outcome slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; index.add_root(6, false); reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, None); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root >= than any root in the slot list, should not affect // outcome slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(6)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root, earlier slots should be reclaimed slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(5)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); // Pass a max root 2. This means the latest root < 2 is 1 because 2 is not a root // so nothing will be purged slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(2)); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); // Pass a max root 1. This means the latest root < 3 is 1 because 2 is not a root // so nothing will be purged slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(1)); assert!(reclaims.is_empty()); assert_eq!(slot_list, vec![(1, true), (2, true), (5, true), (9, true)]); // Pass a max root that doesn't exist in the list but is greater than // some of the roots in the list, shouldn't return those smaller roots slot_list = vec![(1, true), (2, true), (5, true), (9, true)]; reclaims = vec![]; index.purge_older_root_entries(&mut slot_list, &mut reclaims, Some(7)); assert_eq!(reclaims, vec![(1, true), (2, true)]); assert_eq!(slot_list, vec![(5, true), (9, true)]); } fn check_secondary_index_mapping_correct<SecondaryIndexEntryType>( secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, secondary_index_keys: &[Pubkey], account_key: &Pubkey, ) where SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, { // Check secondary index has unique mapping from secondary index key // to the account key and slot for secondary_index_key in secondary_index_keys { assert_eq!(secondary_index.index.len(), secondary_index_keys.len()); let account_key_map = secondary_index.get(secondary_index_key); assert_eq!(account_key_map.len(), 1); assert_eq!(account_key_map, vec![*account_key]); } // Check reverse index contains all of the `secondary_index_keys` let secondary_index_key_map = secondary_index.reverse_index.get(account_key).unwrap(); assert_eq!( &*secondary_index_key_map.value().read().unwrap(), secondary_index_keys ); } fn run_test_secondary_indexes< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( index: &AccountsIndex<bool>, secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, key_start: usize, key_end: usize, secondary_indexes: &AccountSecondaryIndexes, ) { let mut secondary_indexes = secondary_indexes.clone(); let account_key = Pubkey::new_unique(); let index_key = Pubkey::new_unique(); let mut account_data = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data[key_start..key_end].clone_from_slice(&(index_key.to_bytes())); // Wrong program id index.upsert( 0, &account_key, &Pubkey::default(), &account_data, &secondary_indexes, true, &mut vec![], ); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); // Wrong account data size index.upsert( 0, &account_key, &inline_spl_token_v2_0::id(), &account_data[1..], &secondary_indexes, true, &mut vec![], ); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); secondary_indexes.keys = None; // Just right. Inserting the same index multiple times should be ok for _ in 0..2 { index.update_secondary_indexes( &account_key, &inline_spl_token_v2_0::id(), &account_data, &secondary_indexes, ); check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); } // included assert!(!secondary_index.index.is_empty()); assert!(!secondary_index.reverse_index.is_empty()); secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [index_key].iter().cloned().collect::<HashSet<_>>(), exclude: false, }); secondary_index.index.clear(); secondary_index.reverse_index.clear(); index.update_secondary_indexes( &account_key, &inline_spl_token_v2_0::id(), &account_data, &secondary_indexes, ); assert!(!secondary_index.index.is_empty()); assert!(!secondary_index.reverse_index.is_empty()); check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); // not-excluded secondary_indexes.keys = Some(AccountSecondaryIndexesIncludeExclude { keys: [].iter().cloned().collect::<HashSet<_>>(), exclude: true, }); secondary_index.index.clear(); secondary_index.reverse_index.clear(); index.update_secondary_indexes( &account_key, &inline_spl_token_v2_0::id(), &account_data, &secondary_indexes, ); assert!(!secondary_index.index.is_empty()); assert!(!secondary_index.reverse_index.is_empty()); check_secondary_index_mapping_correct(secondary_index, &[index_key], &account_key); secondary_indexes.keys = None; index .get_account_write_entry(&account_key) .unwrap() .slot_list_mut(|slot_list| slot_list.clear()); // Everything should be deleted index.handle_dead_keys(&[&account_key], &secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_dashmap_secondary_index() { let (key_start, key_end, secondary_indexes) = create_dashmap_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes( &index, &index.spl_token_mint_index, key_start, key_end, &secondary_indexes, ); } #[test] fn test_rwlock_secondary_index() { let (key_start, key_end, secondary_indexes) = create_rwlock_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes( &index, &index.spl_token_owner_index, key_start, key_end, &secondary_indexes, ); } fn run_test_secondary_indexes_same_slot_and_forks< SecondaryIndexEntryType: SecondaryIndexEntry + Default + Sync + Send, >( index: &AccountsIndex<bool>, secondary_index: &SecondaryIndex<SecondaryIndexEntryType>, index_key_start: usize, index_key_end: usize, secondary_indexes: &AccountSecondaryIndexes, ) { let account_key = Pubkey::new_unique(); let secondary_key1 = Pubkey::new_unique(); let secondary_key2 = Pubkey::new_unique(); let slot = 1; let mut account_data1 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data1[index_key_start..index_key_end] .clone_from_slice(&(secondary_key1.to_bytes())); let mut account_data2 = vec![0; inline_spl_token_v2_0::state::Account::get_packed_len()]; account_data2[index_key_start..index_key_end] .clone_from_slice(&(secondary_key2.to_bytes())); // First write one mint index index.upsert( slot, &account_key, &inline_spl_token_v2_0::id(), &account_data1, secondary_indexes, true, &mut vec![], ); // Now write a different mint index for the same account index.upsert( slot, &account_key, &inline_spl_token_v2_0::id(), &account_data2, secondary_indexes, true, &mut vec![], ); // Both pubkeys will now be present in the index check_secondary_index_mapping_correct( secondary_index, &[secondary_key1, secondary_key2], &account_key, ); // If a later slot also introduces secondary_key1, then it should still exist in the index let later_slot = slot + 1; index.upsert( later_slot, &account_key, &inline_spl_token_v2_0::id(), &account_data1, secondary_indexes, true, &mut vec![], ); assert_eq!(secondary_index.get(&secondary_key1), vec![account_key]); // If we set a root at `later_slot`, and clean, then even though the account with secondary_key1 // was outdated by the update in the later slot, the primary account key is still alive, // so both secondary keys will still be kept alive. index.add_root(later_slot, false); index .get_account_write_entry(&account_key) .unwrap() .slot_list_mut(|slot_list| { index.purge_older_root_entries(slot_list, &mut vec![], None) }); check_secondary_index_mapping_correct( secondary_index, &[secondary_key1, secondary_key2], &account_key, ); // Removing the remaining entry for this pubkey in the index should mark the // pubkey as dead and finally remove all the secondary indexes let mut reclaims = vec![]; index.purge_exact(&account_key, &later_slot, &mut reclaims); index.handle_dead_keys(&[&account_key], secondary_indexes); assert!(secondary_index.index.is_empty()); assert!(secondary_index.reverse_index.is_empty()); } #[test] fn test_dashmap_secondary_index_same_slot_and_forks() { let (key_start, key_end, account_index) = create_dashmap_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes_same_slot_and_forks( &index, &index.spl_token_mint_index, key_start, key_end, &account_index, ); } #[test] fn test_rwlock_secondary_index_same_slot_and_forks() { let (key_start, key_end, account_index) = create_rwlock_secondary_index_state(); let index = AccountsIndex::<bool>::default(); run_test_secondary_indexes_same_slot_and_forks( &index, &index.spl_token_owner_index, key_start, key_end, &account_index, ); } impl ZeroLamport for bool { fn is_zero_lamport(&self) -> bool { false } } impl ZeroLamport for u64 { fn is_zero_lamport(&self) -> bool { false } } }
34.971177
151
0.544564
23cee3a2681cb7e5e434908c18d4ddc40499e0da
9,467
#![feature(type_alias_impl_trait)] #![feature(is_sorted, thread_local, panic_info_message)] #![feature(slice_group_by)] #![feature(generic_associated_types)] #![feature(trait_alias)] #![feature(test)] #![feature(slice_partition_dedup)] #![feature(int_log)] // #![deny(warnings)] #![allow(dead_code)] extern crate alloc; extern crate test; mod assemble_pipeline; mod benchmarks; mod config; mod hashes; #[macro_use] mod utils; mod assembler; mod assembler_generic_dispatcher; mod cmd_utils; mod colors; mod io; mod pipeline_common; mod querier; mod querier_generic_dispatcher; mod query_pipeline; mod rolling; use backtrace::Backtrace; use std::cmp::max; use crate::assembler_generic_dispatcher::dispatch_assembler_hash_type; use crate::cmd_utils::{process_cmdutils, CmdUtilsArgs}; use crate::colors::default_colors_manager::DefaultColorsManager; use crate::colors::storage::run_length::RunLengthColorsSerializer; use crate::colors::storage::serializer::ColorsSerializer; use crate::colors::ColorIndexType; use crate::io::sequences_reader::FastaSequence; use crate::querier_generic_dispatcher::dispatch_querier_hash_type; use crate::utils::compressed_read::CompressedRead; use clap::arg_enum; use parallel_processor::enable_counters_logging; use parallel_processor::memory_data_size::MemoryDataSize; use rayon::ThreadPoolBuilder; use std::fs::create_dir_all; use std::io::Write; use std::panic; use std::path::PathBuf; use std::process::exit; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use structopt::StructOpt; arg_enum! { #[derive(Debug, PartialOrd, PartialEq)] pub enum AssemblerStartingStep { MinimizerBucketing = 0, KmersMerge = 1, HashesSorting = 2, LinksCompaction = 3, ReorganizeReads = 4, BuildUnitigs = 5 } } arg_enum! { #[derive(Debug, PartialOrd, PartialEq)] pub enum QuerierStartingStep { MinimizerBucketing = 0, KmersCounting = 1, } } arg_enum! { #[derive(Copy, Clone, Debug, PartialOrd, PartialEq)] pub enum HashType { Auto = 0, SeqHash = 1, RabinKarp32 = 2, RabinKarp64 = 3, RabinKarp128 = 4 } } use crate::config::FLUSH_QUEUE_FACTOR; use crate::utils::compute_best_m; use parallel_processor::memory_fs::MemoryFs; use parallel_processor::phase_times_monitor::PHASES_TIMES_MONITOR; #[derive(StructOpt, Debug)] enum CliArgs { Build(AssemblerArgs), Matches(MatchesArgs), Query(QueryArgs), Utils(CmdUtilsArgs), } #[derive(StructOpt, Debug)] struct MatchesArgs { /// Input fasta file with associated colors file (in the same folder) input_file: PathBuf, /// Debug print matches of a color index match_color: ColorIndexType, } #[derive(StructOpt, Debug)] struct CommonArgs { /// Specifies the k-mers length #[structopt(short, default_value = "32")] pub klen: usize, /// Specifies the m-mers (minimizers) length, defaults to min(3, ceil((K + 2) / 3)) #[structopt(long)] pub mlen: Option<usize>, /// Directory for temporary files (default .temp_files) #[structopt(short = "t", long = "temp-dir", default_value = ".temp_files")] pub temp_dir: PathBuf, /// Keep intermediate temporary files for debugging purposes #[structopt(long = "keep-temp-files")] pub keep_temp_files: bool, #[structopt(short = "j", long, default_value = "16")] pub threads_count: usize, /// Hash type used to identify kmers #[structopt(short = "w", long, default_value = "Auto")] pub hash_type: HashType, /// Treats reverse complementary kmers as different #[structopt(short = "f", long)] pub forward_only: bool, /// Maximum memory usage (GB) #[structopt(short = "m", long, default_value = "2")] pub memory: f64, /// Use all the given memory before writing to disk #[structopt(short = "p", long = "prefer-memory")] pub prefer_memory: bool, /// The log2 of the number of buckets #[structopt(short = "b", long = "buckets-count-log")] pub buckets_count_log: Option<usize>, } #[derive(StructOpt, Debug)] struct AssemblerArgs { /// The input files pub input: Vec<PathBuf>, /// The lists of input files #[structopt(short = "l", long = "input-lists")] pub input_lists: Vec<PathBuf>, /// Enable colors #[structopt(short, long)] pub colors: bool, /// Minimum multiplicity required to keep a kmer #[structopt(short = "s", long = "min-multiplicity", default_value = "2")] pub min_multiplicity: usize, // /// Minimum correctness probability for each kmer (using fastq quality checks) // #[structopt(short = "q", long = "quality-threshold")] // pub quality_threshold: Option<f64>, #[structopt(short = "n", long, default_value = "0")] pub number: usize, #[structopt(short = "o", long = "output-file", default_value = "output.fasta.lz4")] pub output_file: PathBuf, #[structopt(long, default_value = "MinimizerBucketing")] pub step: AssemblerStartingStep, #[structopt(long = "last-step", default_value = "BuildUnitigs")] pub last_step: AssemblerStartingStep, #[structopt(flatten)] pub common_args: CommonArgs, } #[derive(StructOpt, Debug)] struct QueryArgs { /// The input graph pub input_graph: PathBuf, /// The input query as a .fasta file pub input_query: PathBuf, /// Enable colors #[structopt(short, long)] pub colors: bool, #[structopt(short = "o", long = "output-file", default_value = "output.csv")] pub output_file: PathBuf, #[structopt(short = "x", long, default_value = "MinimizerBucketing")] pub step: QuerierStartingStep, #[structopt(flatten)] pub common_args: CommonArgs, } static KEEP_FILES: AtomicBool = AtomicBool::new(false); static PREFER_MEMORY: AtomicBool = AtomicBool::new(false); // #[cfg(feature = "mem-analysis")] // use parallel_processor::debug_allocator::{debug_print_allocations, DebugAllocator}; // // #[cfg_attr(feature = "mem-analysis", global_allocator)] // #[cfg(feature = "mem-analysis")] // static DEBUG_ALLOCATOR: DebugAllocator = DebugAllocator::new(); fn initialize(args: &CommonArgs, out_file: &PathBuf) { // Increase the maximum allowed number of open files fdlimit::raise_fd_limit(); KEEP_FILES.store(args.keep_temp_files, Ordering::Relaxed); PREFER_MEMORY.store(args.prefer_memory, Ordering::Relaxed); ThreadPoolBuilder::new() .num_threads(args.threads_count) .thread_name(|i| format!("rayon-thread-{}", i)) .build_global() .unwrap(); create_dir_all(&args.temp_dir).unwrap(); enable_counters_logging( out_file.with_extension("stats.log"), Duration::from_millis(1000), |val| { val["phase"] = PHASES_TIMES_MONITOR.read().get_phase_desc().into(); }, ); MemoryFs::init( parallel_processor::memory_data_size::MemoryDataSize::from_bytes( (args.memory * (MemoryDataSize::OCTET_GIBIOCTET_FACTOR as f64)) as usize, ), FLUSH_QUEUE_FACTOR * args.threads_count, max(1, args.threads_count / 4), 32768, ); println!( "Using m: {} with k: {}", args.mlen.unwrap_or(compute_best_m(args.klen)), args.klen ) // #[cfg(feature = "mem-analysis")] // debug_print_allocations("/tmp/allocations", Duration::from_secs(5)); } fn main() { let args: CliArgs = CliArgs::from_args(); #[cfg(feature = "mem-analysis")] { parallel_processor::mem_tracker::init_memory_info(); parallel_processor::mem_tracker::start_info_logging(); } panic::set_hook(Box::new(move |info| { let stdout = std::io::stdout(); let mut _lock = stdout.lock(); let stderr = std::io::stderr(); let mut err_lock = stderr.lock(); let _ = writeln!( err_lock, "Thread panicked at location: {:?}", info.location() ); if let Some(message) = info.message() { let _ = writeln!(err_lock, "Error message: {}", message); } if let Some(s) = info.payload().downcast_ref::<&str>() { let _ = writeln!(err_lock, "Panic payload: {:?}", s); } println!("Backtrace: {:?}", Backtrace::new()); exit(1); })); match args { CliArgs::Build(args) => { initialize(&args.common_args, &args.output_file); if args.colors { dispatch_assembler_hash_type::<DefaultColorsManager>(args); } else { dispatch_assembler_hash_type::<()>(args); } } CliArgs::Matches(args) => { let colors_file = args.input_file.with_extension("colors.dat"); let colors = ColorsSerializer::<RunLengthColorsSerializer>::read_color( colors_file, args.match_color, ); for color in colors { println!("MATCH: {}", color); } } CliArgs::Query(args) => { initialize(&args.common_args, &args.output_file); if args.colors { dispatch_querier_hash_type::<DefaultColorsManager>(args); } else { dispatch_querier_hash_type::<()>(args); } } CliArgs::Utils(args) => { process_cmdutils(args); } } MemoryFs::terminate(); }
28.687879
87
0.64297
6a41aed7b37c7f92ee9133707190f72c7327efa1
1,267
use lazy_static::lazy_static; use contract_ffi::value::U512; use crate::{ support::test_support::{ExecuteRequestBuilder, InMemoryWasmTestBuilder}, test::{DEFAULT_ACCOUNT_ADDR, DEFAULT_GENESIS_CONFIG, DEFAULT_PAYMENT}, }; const CONTRACT_CHECK_SYSTEM_CONTRACT_UREFS_ACCESS_RIGHTS: &str = "check_system_contract_urefs_access_rights.wasm"; const CONTRACT_TRANSFER_PURSE_TO_ACCOUNT: &str = "transfer_purse_to_account.wasm"; const ACCOUNT_1_ADDR: [u8; 32] = [1u8; 32]; lazy_static! { static ref ACCOUNT_1_INITIAL_BALANCE: U512 = *DEFAULT_PAYMENT; } #[ignore] #[test] fn should_have_read_only_access_to_system_contract_urefs() { let mut builder = InMemoryWasmTestBuilder::default(); let exec_request_1 = ExecuteRequestBuilder::standard( DEFAULT_ACCOUNT_ADDR, CONTRACT_TRANSFER_PURSE_TO_ACCOUNT, (ACCOUNT_1_ADDR, *ACCOUNT_1_INITIAL_BALANCE), ) .build(); let exec_request_2 = ExecuteRequestBuilder::standard( ACCOUNT_1_ADDR, CONTRACT_CHECK_SYSTEM_CONTRACT_UREFS_ACCESS_RIGHTS, (), ) .build(); builder .run_genesis(&DEFAULT_GENESIS_CONFIG) .exec(exec_request_1) .commit() .exec(exec_request_2) .commit() .expect_success(); }
27.543478
82
0.722968
03def465406ff2d321fd0df82a0a15307be94d53
71,786
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[derive(Debug)] pub(crate) struct Handle { pub(crate) client: aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, pub(crate) conf: crate::Config, } /// Client for AWS Savings Plans /// /// Client for invoking operations on AWS Savings Plans. Each operation on AWS Savings Plans is a method on this /// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service. /// /// # Examples /// **Constructing a client and invoking an operation** /// ```rust,no_run /// # async fn docs() { /// // create a shared configuration. This can be used & shared between multiple service clients. /// let shared_config = aws_config::load_from_env().await; /// let client = aws_sdk_savingsplans::Client::new(&shared_config); /// // invoke an operation /// /* let rsp = client /// .<operation_name>(). /// .<param>("some value") /// .send().await; */ /// # } /// ``` /// **Constructing a client with custom configuration** /// ```rust,no_run /// use aws_config::RetryConfig; /// # async fn docs() { /// let shared_config = aws_config::load_from_env().await; /// let config = aws_sdk_savingsplans::config::Builder::from(&shared_config) /// .retry_config(RetryConfig::disabled()) /// .build(); /// let client = aws_sdk_savingsplans::Client::from_conf(config); /// # } #[derive(std::fmt::Debug)] pub struct Client { handle: std::sync::Arc<Handle>, } impl std::clone::Clone for Client { fn clone(&self) -> Self { Self { handle: self.handle.clone(), } } } #[doc(inline)] pub use aws_smithy_client::Builder; impl From< aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, > for Client { fn from( client: aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, ) -> Self { Self::with_config(client, crate::Config::builder().build()) } } impl Client { /// Creates a client with the given service configuration. pub fn with_config( client: aws_smithy_client::Client< aws_smithy_client::erase::DynConnector, aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>, >, conf: crate::Config, ) -> Self { Self { handle: std::sync::Arc::new(Handle { client, conf }), } } /// Returns the client's configuration. pub fn conf(&self) -> &crate::Config { &self.handle.conf } } impl Client { /// Constructs a fluent builder for the [`CreateSavingsPlan`](crate::client::fluent_builders::CreateSavingsPlan) operation. /// /// - The fluent builder is configurable: /// - [`savings_plan_offering_id(impl Into<String>)`](crate::client::fluent_builders::CreateSavingsPlan::savings_plan_offering_id) / [`set_savings_plan_offering_id(Option<String>)`](crate::client::fluent_builders::CreateSavingsPlan::set_savings_plan_offering_id): <p>The ID of the offering.</p> /// - [`commitment(impl Into<String>)`](crate::client::fluent_builders::CreateSavingsPlan::commitment) / [`set_commitment(Option<String>)`](crate::client::fluent_builders::CreateSavingsPlan::set_commitment): <p>The hourly commitment, in USD. This is a value between 0.001 and 1 million. You cannot specify more than five digits after the decimal point.</p> /// - [`upfront_payment_amount(impl Into<String>)`](crate::client::fluent_builders::CreateSavingsPlan::upfront_payment_amount) / [`set_upfront_payment_amount(Option<String>)`](crate::client::fluent_builders::CreateSavingsPlan::set_upfront_payment_amount): <p>The up-front payment amount. This is a whole number between 50 and 99 percent of the total value of the Savings Plan. This parameter is supported only if the payment option is <code>Partial Upfront</code>.</p> /// - [`purchase_time(DateTime)`](crate::client::fluent_builders::CreateSavingsPlan::purchase_time) / [`set_purchase_time(Option<DateTime>)`](crate::client::fluent_builders::CreateSavingsPlan::set_purchase_time): <p>The time at which to purchase the Savings Plan, in UTC format (YYYY-MM-DDTHH:MM:SSZ).</p> /// - [`client_token(impl Into<String>)`](crate::client::fluent_builders::CreateSavingsPlan::client_token) / [`set_client_token(Option<String>)`](crate::client::fluent_builders::CreateSavingsPlan::set_client_token): <p>Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.</p> /// - [`tags(HashMap<String, String>)`](crate::client::fluent_builders::CreateSavingsPlan::tags) / [`set_tags(Option<HashMap<String, String>>)`](crate::client::fluent_builders::CreateSavingsPlan::set_tags): <p>One or more tags.</p> /// - On success, responds with [`CreateSavingsPlanOutput`](crate::output::CreateSavingsPlanOutput) with field(s): /// - [`savings_plan_id(Option<String>)`](crate::output::CreateSavingsPlanOutput::savings_plan_id): <p>The ID of the Savings Plan.</p> /// - On failure, responds with [`SdkError<CreateSavingsPlanError>`](crate::error::CreateSavingsPlanError) pub fn create_savings_plan(&self) -> fluent_builders::CreateSavingsPlan { fluent_builders::CreateSavingsPlan::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DeleteQueuedSavingsPlan`](crate::client::fluent_builders::DeleteQueuedSavingsPlan) operation. /// /// - The fluent builder is configurable: /// - [`savings_plan_id(impl Into<String>)`](crate::client::fluent_builders::DeleteQueuedSavingsPlan::savings_plan_id) / [`set_savings_plan_id(Option<String>)`](crate::client::fluent_builders::DeleteQueuedSavingsPlan::set_savings_plan_id): <p>The ID of the Savings Plan.</p> /// - On success, responds with [`DeleteQueuedSavingsPlanOutput`](crate::output::DeleteQueuedSavingsPlanOutput) /// - On failure, responds with [`SdkError<DeleteQueuedSavingsPlanError>`](crate::error::DeleteQueuedSavingsPlanError) pub fn delete_queued_savings_plan(&self) -> fluent_builders::DeleteQueuedSavingsPlan { fluent_builders::DeleteQueuedSavingsPlan::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeSavingsPlanRates`](crate::client::fluent_builders::DescribeSavingsPlanRates) operation. /// /// - The fluent builder is configurable: /// - [`savings_plan_id(impl Into<String>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::savings_plan_id) / [`set_savings_plan_id(Option<String>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::set_savings_plan_id): <p>The ID of the Savings Plan.</p> /// - [`filters(Vec<SavingsPlanRateFilter>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::filters) / [`set_filters(Option<Vec<SavingsPlanRateFilter>>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::set_filters): <p>The filters.</p> /// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::set_next_token): <p>The token for the next page of results.</p> /// - [`max_results(i32)`](crate::client::fluent_builders::DescribeSavingsPlanRates::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::DescribeSavingsPlanRates::set_max_results): <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> /// - On success, responds with [`DescribeSavingsPlanRatesOutput`](crate::output::DescribeSavingsPlanRatesOutput) with field(s): /// - [`savings_plan_id(Option<String>)`](crate::output::DescribeSavingsPlanRatesOutput::savings_plan_id): <p>The ID of the Savings Plan.</p> /// - [`search_results(Option<Vec<SavingsPlanRate>>)`](crate::output::DescribeSavingsPlanRatesOutput::search_results): <p>Information about the Savings Plans rates.</p> /// - [`next_token(Option<String>)`](crate::output::DescribeSavingsPlanRatesOutput::next_token): <p>The token to use to retrieve the next page of results. This value is null when there are no more results to return.</p> /// - On failure, responds with [`SdkError<DescribeSavingsPlanRatesError>`](crate::error::DescribeSavingsPlanRatesError) pub fn describe_savings_plan_rates(&self) -> fluent_builders::DescribeSavingsPlanRates { fluent_builders::DescribeSavingsPlanRates::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeSavingsPlans`](crate::client::fluent_builders::DescribeSavingsPlans) operation. /// /// - The fluent builder is configurable: /// - [`savings_plan_arns(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlans::savings_plan_arns) / [`set_savings_plan_arns(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlans::set_savings_plan_arns): <p>The Amazon Resource Names (ARN) of the Savings Plans.</p> /// - [`savings_plan_ids(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlans::savings_plan_ids) / [`set_savings_plan_ids(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlans::set_savings_plan_ids): <p>The IDs of the Savings Plans.</p> /// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::DescribeSavingsPlans::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::DescribeSavingsPlans::set_next_token): <p>The token for the next page of results.</p> /// - [`max_results(i32)`](crate::client::fluent_builders::DescribeSavingsPlans::max_results) / [`set_max_results(Option<i32>)`](crate::client::fluent_builders::DescribeSavingsPlans::set_max_results): <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> /// - [`states(Vec<SavingsPlanState>)`](crate::client::fluent_builders::DescribeSavingsPlans::states) / [`set_states(Option<Vec<SavingsPlanState>>)`](crate::client::fluent_builders::DescribeSavingsPlans::set_states): <p>The states.</p> /// - [`filters(Vec<SavingsPlanFilter>)`](crate::client::fluent_builders::DescribeSavingsPlans::filters) / [`set_filters(Option<Vec<SavingsPlanFilter>>)`](crate::client::fluent_builders::DescribeSavingsPlans::set_filters): <p>The filters.</p> /// - On success, responds with [`DescribeSavingsPlansOutput`](crate::output::DescribeSavingsPlansOutput) with field(s): /// - [`savings_plans(Option<Vec<SavingsPlan>>)`](crate::output::DescribeSavingsPlansOutput::savings_plans): <p>Information about the Savings Plans.</p> /// - [`next_token(Option<String>)`](crate::output::DescribeSavingsPlansOutput::next_token): <p>The token to use to retrieve the next page of results. This value is null when there are no more results to return.</p> /// - On failure, responds with [`SdkError<DescribeSavingsPlansError>`](crate::error::DescribeSavingsPlansError) pub fn describe_savings_plans(&self) -> fluent_builders::DescribeSavingsPlans { fluent_builders::DescribeSavingsPlans::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeSavingsPlansOfferingRates`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates) operation. /// /// - The fluent builder is configurable: /// - [`savings_plan_offering_ids(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::savings_plan_offering_ids) / [`set_savings_plan_offering_ids(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_savings_plan_offering_ids): <p>The IDs of the offerings.</p> /// - [`savings_plan_payment_options(Vec<SavingsPlanPaymentOption>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::savings_plan_payment_options) / [`set_savings_plan_payment_options(Option<Vec<SavingsPlanPaymentOption>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_savings_plan_payment_options): <p>The payment options.</p> /// - [`savings_plan_types(Vec<SavingsPlanType>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::savings_plan_types) / [`set_savings_plan_types(Option<Vec<SavingsPlanType>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_savings_plan_types): <p>The plan types.</p> /// - [`products(Vec<SavingsPlanProductType>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::products) / [`set_products(Option<Vec<SavingsPlanProductType>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_products): <p>The AWS products.</p> /// - [`service_codes(Vec<SavingsPlanRateServiceCode>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::service_codes) / [`set_service_codes(Option<Vec<SavingsPlanRateServiceCode>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_service_codes): <p>The services.</p> /// - [`usage_types(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::usage_types) / [`set_usage_types(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_usage_types): <p>The usage details of the line item in the billing report.</p> /// - [`operations(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::operations) / [`set_operations(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_operations): <p>The specific AWS operation for the line item in the billing report.</p> /// - [`filters(Vec<SavingsPlanOfferingRateFilterElement>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::filters) / [`set_filters(Option<Vec<SavingsPlanOfferingRateFilterElement>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_filters): <p>The filters.</p> /// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_next_token): <p>The token for the next page of results.</p> /// - [`max_results(i32)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::max_results) / [`set_max_results(i32)`](crate::client::fluent_builders::DescribeSavingsPlansOfferingRates::set_max_results): <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> /// - On success, responds with [`DescribeSavingsPlansOfferingRatesOutput`](crate::output::DescribeSavingsPlansOfferingRatesOutput) with field(s): /// - [`search_results(Option<Vec<SavingsPlanOfferingRate>>)`](crate::output::DescribeSavingsPlansOfferingRatesOutput::search_results): <p>Information about the Savings Plans offering rates.</p> /// - [`next_token(Option<String>)`](crate::output::DescribeSavingsPlansOfferingRatesOutput::next_token): <p>The token to use to retrieve the next page of results. This value is null when there are no more results to return.</p> /// - On failure, responds with [`SdkError<DescribeSavingsPlansOfferingRatesError>`](crate::error::DescribeSavingsPlansOfferingRatesError) pub fn describe_savings_plans_offering_rates( &self, ) -> fluent_builders::DescribeSavingsPlansOfferingRates { fluent_builders::DescribeSavingsPlansOfferingRates::new(self.handle.clone()) } /// Constructs a fluent builder for the [`DescribeSavingsPlansOfferings`](crate::client::fluent_builders::DescribeSavingsPlansOfferings) operation. /// /// - The fluent builder is configurable: /// - [`offering_ids(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::offering_ids) / [`set_offering_ids(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_offering_ids): <p>The IDs of the offerings.</p> /// - [`payment_options(Vec<SavingsPlanPaymentOption>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::payment_options) / [`set_payment_options(Option<Vec<SavingsPlanPaymentOption>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_payment_options): <p>The payment options.</p> /// - [`product_type(SavingsPlanProductType)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::product_type) / [`set_product_type(Option<SavingsPlanProductType>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_product_type): <p>The product type.</p> /// - [`plan_types(Vec<SavingsPlanType>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::plan_types) / [`set_plan_types(Option<Vec<SavingsPlanType>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_plan_types): <p>The plan type.</p> /// - [`durations(Vec<i64>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::durations) / [`set_durations(Option<Vec<i64>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_durations): <p>The durations, in seconds.</p> /// - [`currencies(Vec<CurrencyCode>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::currencies) / [`set_currencies(Option<Vec<CurrencyCode>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_currencies): <p>The currencies.</p> /// - [`descriptions(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::descriptions) / [`set_descriptions(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_descriptions): <p>The descriptions.</p> /// - [`service_codes(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::service_codes) / [`set_service_codes(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_service_codes): <p>The services.</p> /// - [`usage_types(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::usage_types) / [`set_usage_types(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_usage_types): <p>The usage details of the line item in the billing report.</p> /// - [`operations(Vec<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::operations) / [`set_operations(Option<Vec<String>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_operations): <p>The specific AWS operation for the line item in the billing report.</p> /// - [`filters(Vec<SavingsPlanOfferingFilterElement>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::filters) / [`set_filters(Option<Vec<SavingsPlanOfferingFilterElement>>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_filters): <p>The filters.</p> /// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_next_token): <p>The token for the next page of results.</p> /// - [`max_results(i32)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::max_results) / [`set_max_results(i32)`](crate::client::fluent_builders::DescribeSavingsPlansOfferings::set_max_results): <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> /// - On success, responds with [`DescribeSavingsPlansOfferingsOutput`](crate::output::DescribeSavingsPlansOfferingsOutput) with field(s): /// - [`search_results(Option<Vec<SavingsPlanOffering>>)`](crate::output::DescribeSavingsPlansOfferingsOutput::search_results): <p>Information about the Savings Plans offerings.</p> /// - [`next_token(Option<String>)`](crate::output::DescribeSavingsPlansOfferingsOutput::next_token): <p>The token to use to retrieve the next page of results. This value is null when there are no more results to return.</p> /// - On failure, responds with [`SdkError<DescribeSavingsPlansOfferingsError>`](crate::error::DescribeSavingsPlansOfferingsError) pub fn describe_savings_plans_offerings( &self, ) -> fluent_builders::DescribeSavingsPlansOfferings { fluent_builders::DescribeSavingsPlansOfferings::new(self.handle.clone()) } /// Constructs a fluent builder for the [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) operation. /// /// - The fluent builder is configurable: /// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::ListTagsForResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::ListTagsForResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the resource.</p> /// - On success, responds with [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) with field(s): /// - [`tags(Option<HashMap<String, String>>)`](crate::output::ListTagsForResourceOutput::tags): <p>Information about the tags.</p> /// - On failure, responds with [`SdkError<ListTagsForResourceError>`](crate::error::ListTagsForResourceError) pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource { fluent_builders::ListTagsForResource::new(self.handle.clone()) } /// Constructs a fluent builder for the [`TagResource`](crate::client::fluent_builders::TagResource) operation. /// /// - The fluent builder is configurable: /// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::TagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::TagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the resource.</p> /// - [`tags(HashMap<String, String>)`](crate::client::fluent_builders::TagResource::tags) / [`set_tags(Option<HashMap<String, String>>)`](crate::client::fluent_builders::TagResource::set_tags): <p>One or more tags. For example, { "tags": {"key1":"value1", "key2":"value2"} }.</p> /// - On success, responds with [`TagResourceOutput`](crate::output::TagResourceOutput) /// - On failure, responds with [`SdkError<TagResourceError>`](crate::error::TagResourceError) pub fn tag_resource(&self) -> fluent_builders::TagResource { fluent_builders::TagResource::new(self.handle.clone()) } /// Constructs a fluent builder for the [`UntagResource`](crate::client::fluent_builders::UntagResource) operation. /// /// - The fluent builder is configurable: /// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::UntagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::UntagResource::set_resource_arn): <p>The Amazon Resource Name (ARN) of the resource.</p> /// - [`tag_keys(Vec<String>)`](crate::client::fluent_builders::UntagResource::tag_keys) / [`set_tag_keys(Option<Vec<String>>)`](crate::client::fluent_builders::UntagResource::set_tag_keys): <p>The tag keys.</p> /// - On success, responds with [`UntagResourceOutput`](crate::output::UntagResourceOutput) /// - On failure, responds with [`SdkError<UntagResourceError>`](crate::error::UntagResourceError) pub fn untag_resource(&self) -> fluent_builders::UntagResource { fluent_builders::UntagResource::new(self.handle.clone()) } } pub mod fluent_builders { //! //! Utilities to ergonomically construct a request to the service. //! //! Fluent builders are created through the [`Client`](crate::client::Client) by calling //! one if its operation methods. After parameters are set using the builder methods, //! the `send` method can be called to initiate the request. //! /// Fluent builder constructing a request to `CreateSavingsPlan`. /// /// <p>Creates a Savings Plan.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct CreateSavingsPlan { handle: std::sync::Arc<super::Handle>, inner: crate::input::create_savings_plan_input::Builder, } impl CreateSavingsPlan { /// Creates a new `CreateSavingsPlan`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateSavingsPlanOutput, aws_smithy_http::result::SdkError<crate::error::CreateSavingsPlanError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The ID of the offering.</p> pub fn savings_plan_offering_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.savings_plan_offering_id(input.into()); self } /// <p>The ID of the offering.</p> pub fn set_savings_plan_offering_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_savings_plan_offering_id(input); self } /// <p>The hourly commitment, in USD. This is a value between 0.001 and 1 million. You cannot specify more than five digits after the decimal point.</p> pub fn commitment(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.commitment(input.into()); self } /// <p>The hourly commitment, in USD. This is a value between 0.001 and 1 million. You cannot specify more than five digits after the decimal point.</p> pub fn set_commitment(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_commitment(input); self } /// <p>The up-front payment amount. This is a whole number between 50 and 99 percent of the total value of the Savings Plan. This parameter is supported only if the payment option is <code>Partial Upfront</code>.</p> pub fn upfront_payment_amount(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.upfront_payment_amount(input.into()); self } /// <p>The up-front payment amount. This is a whole number between 50 and 99 percent of the total value of the Savings Plan. This parameter is supported only if the payment option is <code>Partial Upfront</code>.</p> pub fn set_upfront_payment_amount( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_upfront_payment_amount(input); self } /// <p>The time at which to purchase the Savings Plan, in UTC format (YYYY-MM-DDTHH:MM:SSZ).</p> pub fn purchase_time(mut self, input: aws_smithy_types::DateTime) -> Self { self.inner = self.inner.purchase_time(input); self } /// <p>The time at which to purchase the Savings Plan, in UTC format (YYYY-MM-DDTHH:MM:SSZ).</p> pub fn set_purchase_time( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.inner = self.inner.set_purchase_time(input); self } /// <p>Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.</p> pub fn client_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.client_token(input.into()); self } /// <p>Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.</p> pub fn set_client_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_client_token(input); self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>One or more tags.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.tags(k.into(), v.into()); self } /// <p>One or more tags.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `DeleteQueuedSavingsPlan`. /// /// <p>Deletes the queued purchase for the specified Savings Plan.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DeleteQueuedSavingsPlan { handle: std::sync::Arc<super::Handle>, inner: crate::input::delete_queued_savings_plan_input::Builder, } impl DeleteQueuedSavingsPlan { /// Creates a new `DeleteQueuedSavingsPlan`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteQueuedSavingsPlanOutput, aws_smithy_http::result::SdkError<crate::error::DeleteQueuedSavingsPlanError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The ID of the Savings Plan.</p> pub fn savings_plan_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.savings_plan_id(input.into()); self } /// <p>The ID of the Savings Plan.</p> pub fn set_savings_plan_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_savings_plan_id(input); self } } /// Fluent builder constructing a request to `DescribeSavingsPlanRates`. /// /// <p>Describes the specified Savings Plans rates.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeSavingsPlanRates { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_savings_plan_rates_input::Builder, } impl DescribeSavingsPlanRates { /// Creates a new `DescribeSavingsPlanRates`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeSavingsPlanRatesOutput, aws_smithy_http::result::SdkError<crate::error::DescribeSavingsPlanRatesError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The ID of the Savings Plan.</p> pub fn savings_plan_id(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.savings_plan_id(input.into()); self } /// <p>The ID of the Savings Plan.</p> pub fn set_savings_plan_id( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_savings_plan_id(input); self } /// Appends an item to `filters`. /// /// To override the contents of this collection use [`set_filters`](Self::set_filters). /// /// <p>The filters.</p> pub fn filters(mut self, input: crate::model::SavingsPlanRateFilter) -> Self { self.inner = self.inner.filters(input); self } /// <p>The filters.</p> pub fn set_filters( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanRateFilter>>, ) -> Self { self.inner = self.inner.set_filters(input); self } /// <p>The token for the next page of results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(input.into()); self } /// <p>The token for the next page of results.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn max_results(mut self, input: i32) -> Self { self.inner = self.inner.max_results(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } } /// Fluent builder constructing a request to `DescribeSavingsPlans`. /// /// <p>Describes the specified Savings Plans.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeSavingsPlans { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_savings_plans_input::Builder, } impl DescribeSavingsPlans { /// Creates a new `DescribeSavingsPlans`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeSavingsPlansOutput, aws_smithy_http::result::SdkError<crate::error::DescribeSavingsPlansError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Appends an item to `savingsPlanArns`. /// /// To override the contents of this collection use [`set_savings_plan_arns`](Self::set_savings_plan_arns). /// /// <p>The Amazon Resource Names (ARN) of the Savings Plans.</p> pub fn savings_plan_arns(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.savings_plan_arns(input.into()); self } /// <p>The Amazon Resource Names (ARN) of the Savings Plans.</p> pub fn set_savings_plan_arns( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_savings_plan_arns(input); self } /// Appends an item to `savingsPlanIds`. /// /// To override the contents of this collection use [`set_savings_plan_ids`](Self::set_savings_plan_ids). /// /// <p>The IDs of the Savings Plans.</p> pub fn savings_plan_ids(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.savings_plan_ids(input.into()); self } /// <p>The IDs of the Savings Plans.</p> pub fn set_savings_plan_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_savings_plan_ids(input); self } /// <p>The token for the next page of results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(input.into()); self } /// <p>The token for the next page of results.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn max_results(mut self, input: i32) -> Self { self.inner = self.inner.max_results(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// Appends an item to `states`. /// /// To override the contents of this collection use [`set_states`](Self::set_states). /// /// <p>The states.</p> pub fn states(mut self, input: crate::model::SavingsPlanState) -> Self { self.inner = self.inner.states(input); self } /// <p>The states.</p> pub fn set_states( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanState>>, ) -> Self { self.inner = self.inner.set_states(input); self } /// Appends an item to `filters`. /// /// To override the contents of this collection use [`set_filters`](Self::set_filters). /// /// <p>The filters.</p> pub fn filters(mut self, input: crate::model::SavingsPlanFilter) -> Self { self.inner = self.inner.filters(input); self } /// <p>The filters.</p> pub fn set_filters( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanFilter>>, ) -> Self { self.inner = self.inner.set_filters(input); self } } /// Fluent builder constructing a request to `DescribeSavingsPlansOfferingRates`. /// /// <p>Describes the specified Savings Plans offering rates.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeSavingsPlansOfferingRates { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_savings_plans_offering_rates_input::Builder, } impl DescribeSavingsPlansOfferingRates { /// Creates a new `DescribeSavingsPlansOfferingRates`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeSavingsPlansOfferingRatesOutput, aws_smithy_http::result::SdkError<crate::error::DescribeSavingsPlansOfferingRatesError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Appends an item to `savingsPlanOfferingIds`. /// /// To override the contents of this collection use [`set_savings_plan_offering_ids`](Self::set_savings_plan_offering_ids). /// /// <p>The IDs of the offerings.</p> pub fn savings_plan_offering_ids(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.savings_plan_offering_ids(input.into()); self } /// <p>The IDs of the offerings.</p> pub fn set_savings_plan_offering_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_savings_plan_offering_ids(input); self } /// Appends an item to `savingsPlanPaymentOptions`. /// /// To override the contents of this collection use [`set_savings_plan_payment_options`](Self::set_savings_plan_payment_options). /// /// <p>The payment options.</p> pub fn savings_plan_payment_options( mut self, input: crate::model::SavingsPlanPaymentOption, ) -> Self { self.inner = self.inner.savings_plan_payment_options(input); self } /// <p>The payment options.</p> pub fn set_savings_plan_payment_options( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanPaymentOption>>, ) -> Self { self.inner = self.inner.set_savings_plan_payment_options(input); self } /// Appends an item to `savingsPlanTypes`. /// /// To override the contents of this collection use [`set_savings_plan_types`](Self::set_savings_plan_types). /// /// <p>The plan types.</p> pub fn savings_plan_types(mut self, input: crate::model::SavingsPlanType) -> Self { self.inner = self.inner.savings_plan_types(input); self } /// <p>The plan types.</p> pub fn set_savings_plan_types( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanType>>, ) -> Self { self.inner = self.inner.set_savings_plan_types(input); self } /// Appends an item to `products`. /// /// To override the contents of this collection use [`set_products`](Self::set_products). /// /// <p>The AWS products.</p> pub fn products(mut self, input: crate::model::SavingsPlanProductType) -> Self { self.inner = self.inner.products(input); self } /// <p>The AWS products.</p> pub fn set_products( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanProductType>>, ) -> Self { self.inner = self.inner.set_products(input); self } /// Appends an item to `serviceCodes`. /// /// To override the contents of this collection use [`set_service_codes`](Self::set_service_codes). /// /// <p>The services.</p> pub fn service_codes(mut self, input: crate::model::SavingsPlanRateServiceCode) -> Self { self.inner = self.inner.service_codes(input); self } /// <p>The services.</p> pub fn set_service_codes( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanRateServiceCode>>, ) -> Self { self.inner = self.inner.set_service_codes(input); self } /// Appends an item to `usageTypes`. /// /// To override the contents of this collection use [`set_usage_types`](Self::set_usage_types). /// /// <p>The usage details of the line item in the billing report.</p> pub fn usage_types(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.usage_types(input.into()); self } /// <p>The usage details of the line item in the billing report.</p> pub fn set_usage_types( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_usage_types(input); self } /// Appends an item to `operations`. /// /// To override the contents of this collection use [`set_operations`](Self::set_operations). /// /// <p>The specific AWS operation for the line item in the billing report.</p> pub fn operations(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.operations(input.into()); self } /// <p>The specific AWS operation for the line item in the billing report.</p> pub fn set_operations( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_operations(input); self } /// Appends an item to `filters`. /// /// To override the contents of this collection use [`set_filters`](Self::set_filters). /// /// <p>The filters.</p> pub fn filters( mut self, input: crate::model::SavingsPlanOfferingRateFilterElement, ) -> Self { self.inner = self.inner.filters(input); self } /// <p>The filters.</p> pub fn set_filters( mut self, input: std::option::Option< std::vec::Vec<crate::model::SavingsPlanOfferingRateFilterElement>, >, ) -> Self { self.inner = self.inner.set_filters(input); self } /// <p>The token for the next page of results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(input.into()); self } /// <p>The token for the next page of results.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn max_results(mut self, input: i32) -> Self { self.inner = self.inner.max_results(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } } /// Fluent builder constructing a request to `DescribeSavingsPlansOfferings`. /// /// <p>Describes the specified Savings Plans offerings.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct DescribeSavingsPlansOfferings { handle: std::sync::Arc<super::Handle>, inner: crate::input::describe_savings_plans_offerings_input::Builder, } impl DescribeSavingsPlansOfferings { /// Creates a new `DescribeSavingsPlansOfferings`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeSavingsPlansOfferingsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeSavingsPlansOfferingsError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Appends an item to `offeringIds`. /// /// To override the contents of this collection use [`set_offering_ids`](Self::set_offering_ids). /// /// <p>The IDs of the offerings.</p> pub fn offering_ids(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.offering_ids(input.into()); self } /// <p>The IDs of the offerings.</p> pub fn set_offering_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_offering_ids(input); self } /// Appends an item to `paymentOptions`. /// /// To override the contents of this collection use [`set_payment_options`](Self::set_payment_options). /// /// <p>The payment options.</p> pub fn payment_options(mut self, input: crate::model::SavingsPlanPaymentOption) -> Self { self.inner = self.inner.payment_options(input); self } /// <p>The payment options.</p> pub fn set_payment_options( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanPaymentOption>>, ) -> Self { self.inner = self.inner.set_payment_options(input); self } /// <p>The product type.</p> pub fn product_type(mut self, input: crate::model::SavingsPlanProductType) -> Self { self.inner = self.inner.product_type(input); self } /// <p>The product type.</p> pub fn set_product_type( mut self, input: std::option::Option<crate::model::SavingsPlanProductType>, ) -> Self { self.inner = self.inner.set_product_type(input); self } /// Appends an item to `planTypes`. /// /// To override the contents of this collection use [`set_plan_types`](Self::set_plan_types). /// /// <p>The plan type.</p> pub fn plan_types(mut self, input: crate::model::SavingsPlanType) -> Self { self.inner = self.inner.plan_types(input); self } /// <p>The plan type.</p> pub fn set_plan_types( mut self, input: std::option::Option<std::vec::Vec<crate::model::SavingsPlanType>>, ) -> Self { self.inner = self.inner.set_plan_types(input); self } /// Appends an item to `durations`. /// /// To override the contents of this collection use [`set_durations`](Self::set_durations). /// /// <p>The durations, in seconds.</p> pub fn durations(mut self, input: i64) -> Self { self.inner = self.inner.durations(input); self } /// <p>The durations, in seconds.</p> pub fn set_durations(mut self, input: std::option::Option<std::vec::Vec<i64>>) -> Self { self.inner = self.inner.set_durations(input); self } /// Appends an item to `currencies`. /// /// To override the contents of this collection use [`set_currencies`](Self::set_currencies). /// /// <p>The currencies.</p> pub fn currencies(mut self, input: crate::model::CurrencyCode) -> Self { self.inner = self.inner.currencies(input); self } /// <p>The currencies.</p> pub fn set_currencies( mut self, input: std::option::Option<std::vec::Vec<crate::model::CurrencyCode>>, ) -> Self { self.inner = self.inner.set_currencies(input); self } /// Appends an item to `descriptions`. /// /// To override the contents of this collection use [`set_descriptions`](Self::set_descriptions). /// /// <p>The descriptions.</p> pub fn descriptions(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.descriptions(input.into()); self } /// <p>The descriptions.</p> pub fn set_descriptions( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_descriptions(input); self } /// Appends an item to `serviceCodes`. /// /// To override the contents of this collection use [`set_service_codes`](Self::set_service_codes). /// /// <p>The services.</p> pub fn service_codes(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.service_codes(input.into()); self } /// <p>The services.</p> pub fn set_service_codes( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_service_codes(input); self } /// Appends an item to `usageTypes`. /// /// To override the contents of this collection use [`set_usage_types`](Self::set_usage_types). /// /// <p>The usage details of the line item in the billing report.</p> pub fn usage_types(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.usage_types(input.into()); self } /// <p>The usage details of the line item in the billing report.</p> pub fn set_usage_types( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_usage_types(input); self } /// Appends an item to `operations`. /// /// To override the contents of this collection use [`set_operations`](Self::set_operations). /// /// <p>The specific AWS operation for the line item in the billing report.</p> pub fn operations(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.operations(input.into()); self } /// <p>The specific AWS operation for the line item in the billing report.</p> pub fn set_operations( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_operations(input); self } /// Appends an item to `filters`. /// /// To override the contents of this collection use [`set_filters`](Self::set_filters). /// /// <p>The filters.</p> pub fn filters(mut self, input: crate::model::SavingsPlanOfferingFilterElement) -> Self { self.inner = self.inner.filters(input); self } /// <p>The filters.</p> pub fn set_filters( mut self, input: std::option::Option< std::vec::Vec<crate::model::SavingsPlanOfferingFilterElement>, >, ) -> Self { self.inner = self.inner.set_filters(input); self } /// <p>The token for the next page of results.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(input.into()); self } /// <p>The token for the next page of results.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn max_results(mut self, input: i32) -> Self { self.inner = self.inner.max_results(input); self } /// <p>The maximum number of results to return with a single call. To retrieve additional results, make another call with the returned token value.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } } /// Fluent builder constructing a request to `ListTagsForResource`. /// /// <p>Lists the tags for the specified resource.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct ListTagsForResource { handle: std::sync::Arc<super::Handle>, inner: crate::input::list_tags_for_resource_input::Builder, } impl ListTagsForResource { /// Creates a new `ListTagsForResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListTagsForResourceOutput, aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the resource.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the resource.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `TagResource`. /// /// <p>Adds the specified tags to the specified resource.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct TagResource { handle: std::sync::Arc<super::Handle>, inner: crate::input::tag_resource_input::Builder, } impl TagResource { /// Creates a new `TagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::TagResourceOutput, aws_smithy_http::result::SdkError<crate::error::TagResourceError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the resource.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the resource.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>One or more tags. For example, { "tags": {"key1":"value1", "key2":"value2"} }.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { self.inner = self.inner.tags(k.into(), v.into()); self } /// <p>One or more tags. For example, { "tags": {"key1":"value1", "key2":"value2"} }.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `UntagResource`. /// /// <p>Removes the specified tags from the specified resource.</p> #[derive(std::clone::Clone, std::fmt::Debug)] pub struct UntagResource { handle: std::sync::Arc<super::Handle>, inner: crate::input::untag_resource_input::Builder, } impl UntagResource { /// Creates a new `UntagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UntagResourceOutput, aws_smithy_http::result::SdkError<crate::error::UntagResourceError>, > { let op = self .inner .build() .map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))? .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the resource.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the resource.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `tagKeys`. /// /// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys). /// /// <p>The tag keys.</p> pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self { self.inner = self.inner.tag_keys(input.into()); self } /// <p>The tag keys.</p> pub fn set_tag_keys( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_tag_keys(input); self } } } impl Client { /// Creates a client with the given service config and connector override. pub fn from_conf_conn<C, E>(conf: crate::Config, conn: C) -> Self where C: aws_smithy_client::bounds::SmithyConnector<Error = E> + Send + 'static, E: Into<aws_smithy_http::result::ConnectorError>, { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut builder = aws_smithy_client::Builder::new() .connector(aws_smithy_client::erase::DynConnector::new(conn)) .middleware(aws_smithy_client::erase::DynMiddleware::new( crate::middleware::DefaultMiddleware::new(), )); builder.set_retry_config(retry_config.into()); builder.set_timeout_config(timeout_config); if let Some(sleep_impl) = sleep_impl { builder.set_sleep_impl(Some(sleep_impl)); } let client = builder.build(); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } /// Creates a new client from a shared config. #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn new(config: &aws_types::config::Config) -> Self { Self::from_conf(config.into()) } /// Creates a new client from the service [`Config`](crate::Config). #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn from_conf(conf: crate::Config) -> Self { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut builder = aws_smithy_client::Builder::dyn_https().middleware( aws_smithy_client::erase::DynMiddleware::new( crate::middleware::DefaultMiddleware::new(), ), ); builder.set_retry_config(retry_config.into()); builder.set_timeout_config(timeout_config); // the builder maintains a try-state. To avoid suppressing the warning when sleep is unset, // only set it if we actually have a sleep impl. if let Some(sleep_impl) = sleep_impl { builder.set_sleep_impl(Some(sleep_impl)); } let client = builder.build(); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } }
53.056911
474
0.622614
7550c8e473ec8e0473804df270a064f471a4d37c
1,576
use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode}; use ::RegType::*; use ::instruction_def::*; use ::Operand::*; use ::Reg::*; use ::RegScale::*; use ::test::run_test; #[test] fn cvtpd2dq_1() { run_test(&Instruction { mnemonic: Mnemonic::CVTPD2DQ, operand1: Some(Direct(XMM2)), operand2: Some(Direct(XMM4)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[242, 15, 230, 212], OperandSize::Dword) } #[test] fn cvtpd2dq_2() { run_test(&Instruction { mnemonic: Mnemonic::CVTPD2DQ, operand1: Some(Direct(XMM1)), operand2: Some(Indirect(EDI, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[242, 15, 230, 15], OperandSize::Dword) } #[test] fn cvtpd2dq_3() { run_test(&Instruction { mnemonic: Mnemonic::CVTPD2DQ, operand1: Some(Direct(XMM1)), operand2: Some(Direct(XMM4)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[242, 15, 230, 204], OperandSize::Qword) } #[test] fn cvtpd2dq_4() { run_test(&Instruction { mnemonic: Mnemonic::CVTPD2DQ, operand1: Some(Direct(XMM0)), operand2: Some(IndirectScaledIndexed(RCX, RCX, Four, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[242, 15, 230, 4, 137], OperandSize::Qword) }
54.344828
348
0.706218
efde04ce5b75f51b4ab01bc68a47a27ce1c782c5
977
use rusoto_credential::{InstanceMetadataProvider, ProvideAwsCredentials}; use std::time::Duration; // This test is marked ignored because it requires special setup. // It's run with the `credential_integration_test` Makefile target. #[tokio::test] #[ignore] async fn it_fetches_basic_role() { // set env vars to point to local provider let mut provider = InstanceMetadataProvider::new(); provider.set_timeout(Duration::from_secs(5)); provider.set_ip_addr_with_port("127.0.0.1", "8080"); let creds = provider.credentials().await.expect("credentials"); assert_eq!(creds.aws_access_key_id(), "Access_key_id_value"); assert_eq!(creds.aws_secret_access_key(), "Secret_access_key_value"); assert_eq!(creds.token().as_ref(), Some(&"AAAAA".to_string())); let dt = match creds.expires_at().as_ref() { Some(d) => d.to_string(), None => panic!("Expiration should be present"), }; assert_eq!(dt, "2015-08-04 06:32:37 UTC"); }
39.08
73
0.70522
39a7e9988288d651f7f5271bed8f3da1e0824f22
2,824
extern crate core_foundation; extern crate videotoolbox_sys; use core_foundation::base::{ TCFType, kCFAllocatorDefault, CFIndexConvertible, }; use core_foundation::dictionary::{ CFDictionaryRef, CFDictionary, CFDictionaryCreate, kCFTypeDictionaryKeyCallBacks, kCFTypeDictionaryValueCallBacks, }; use core_foundation::array::{ CFArrayRef, CFArray, CFArrayCreate, }; use core_foundation::string::CFString; use videotoolbox_sys::utilities::{ VTCopyVideoEncoderList, kVTVideoEncoderList_CodecType, kVTVideoEncoderList_EncoderID, kVTVideoEncoderList_CodecName, kVTVideoEncoderList_EncoderName, kVTVideoEncoderList_DisplayName, }; use std::ptr; use std::mem; unsafe fn run (){ println!("kVTVideoEncoderList_CodecType: {:?}", CFString::wrap_under_create_rule(kVTVideoEncoderList_CodecType)); println!("kVTVideoEncoderList_EncoderID: {:?}", CFString::wrap_under_create_rule(kVTVideoEncoderList_EncoderID)); println!("kVTVideoEncoderList_CodecName: {:?}", CFString::wrap_under_create_rule(kVTVideoEncoderList_CodecName)); println!("kVTVideoEncoderList_EncoderName: {:?}", CFString::wrap_under_create_rule(kVTVideoEncoderList_EncoderName)); println!("kVTVideoEncoderList_DisplayName: {:?}", CFString::wrap_under_create_rule(kVTVideoEncoderList_DisplayName)); println!("\n\n\n"); let keys: Vec<CFString> = vec![ CFString::new("CodecName"), CFString::new("CodecType"), CFString::new("EncoderID"), CFString::new("EncoderName"), CFString::new("DisplayName"), ]; let values: Vec<CFString> = vec![ CFString::new(""), CFString::new(""), CFString::new(""), CFString::new(""), CFString::new(""), ]; let opts_ref: CFDictionaryRef = CFDictionaryCreate(kCFAllocatorDefault, mem::transmute(keys.as_ptr()), mem::transmute(values.as_ptr()), keys.len().to_CFIndex(), &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); let mut result_ref: CFArrayRef = CFArrayCreate(kCFAllocatorDefault, ptr::null_mut(), 0.to_CFIndex(), ptr::null()); let ret_code = VTCopyVideoEncoderList(opts_ref, &mut result_ref); println!("opts: {:?}", CFDictionary::wrap_under_create_rule(opts_ref)); println!("ret_code: {:?}", ret_code); println!("result: {:?}", CFArray::wrap_under_create_rule(result_ref)); } fn main () { unsafe { run(); } }
37.157895
121
0.61296
28e77e07a909e9fc7705a9ef72b1549f0032b8fe
1,152
use core::pin::Pin; use futures_core::future::Future; use futures_core::stream::Stream; use futures_core::task::{Waker, Poll}; use pin_utils::unsafe_pinned; /// A type which converts a `Future` into a `Stream` /// containing a single element. #[must_use = "futures do nothing unless polled"] #[derive(Debug)] pub struct IntoStream<Fut: Future> { future: Option<Fut> } impl<Fut: Future> IntoStream<Fut> { unsafe_pinned!(future: Option<Fut>); pub(super) fn new(future: Fut) -> IntoStream<Fut> { IntoStream { future: Some(future) } } } impl<Fut: Future> Stream for IntoStream<Fut> { type Item = Fut::Output; fn poll_next(mut self: Pin<&mut Self>, waker: &Waker) -> Poll<Option<Self::Item>> { let v = match self.as_mut().future().as_pin_mut() { Some(fut) => { match fut.poll(waker) { Poll::Pending => return Poll::Pending, Poll::Ready(v) => v } } None => return Poll::Ready(None), }; Pin::set(&mut self.as_mut().future(), None); Poll::Ready(Some(v)) } }
26.790698
87
0.570313
267f736c5c42e46398ff107be64a3b193feacef7
7,562
use crate::Feerate; use async_trait::async_trait; use bitcoin::{Block, BlockHash, Network, Transaction}; use bitcoincore_rpc::bitcoincore_rpc_json::EstimateMode; use bitcoincore_rpc::RpcApi; use tracing::warn; /// Trait that allows interacting with the Bitcoin blockchain /// /// Functions may panic if if the bitcoind node is not reachable. #[async_trait] pub trait BitcoindRpc: Send + Sync { /// Returns the Bitcoin network the node is connected to async fn get_network(&self) -> bitcoin::Network; /// Returns the current block height async fn get_block_height(&self) -> u64; /// Returns the block hash at a given height /// /// # Panics /// If the node does not know a block for that height. Make sure to only query blocks of a /// height less or equal to the one returned by `Self::get_block_height`. /// /// While there is a corner case that the blockchain shrinks between these two calls (through on /// average heavier blocks on a fork) this is prevented by only querying hashes for blocks /// tailing the chain tip by a certain number of blocks. async fn get_block_hash(&self, height: u64) -> BlockHash; /// Returns the block with the given hash /// /// # Panics /// If the block doesn't exist. async fn get_block(&self, hash: &BlockHash) -> bitcoin::Block; /// Estimates the fee rate for a given confirmation target. Make sure that all federation /// members use the same algorithm to avoid widely diverging results. If the node is not ready /// yet to return a fee rate estimation this function returns `None`. async fn get_fee_rate(&self, confirmation_target: u16) -> Option<Feerate>; /// Submits a transaction to the Bitcoin network /// /// # Panics /// If the transaction is deemed invalid by the node it was submitted to async fn submit_transaction(&self, transaction: Transaction); } #[async_trait] impl BitcoindRpc for bitcoincore_rpc::Client { async fn get_network(&self) -> Network { let network = tokio::task::block_in_place(|| self.get_blockchain_info()) .expect("Bitcoind returned an error"); match network.chain.as_str() { "main" => Network::Bitcoin, "test" => Network::Testnet, "regtest" => Network::Regtest, _ => panic!("Unknown Network"), } } async fn get_block_height(&self) -> u64 { tokio::task::block_in_place(|| self.get_block_count()).expect("Bitcoind returned an error") } async fn get_block_hash(&self, height: u64) -> BlockHash { tokio::task::block_in_place(|| bitcoincore_rpc::RpcApi::get_block_hash(self, height)) .expect("Bitcoind returned an error") } async fn get_block(&self, hash: &BlockHash) -> Block { tokio::task::block_in_place(|| bitcoincore_rpc::RpcApi::get_block(self, hash)) .expect("Bitcoind returned an error") } async fn get_fee_rate(&self, confirmation_target: u16) -> Option<Feerate> { tokio::task::block_in_place(|| { self.estimate_smart_fee(confirmation_target, Some(EstimateMode::Conservative)) }) .expect("Bitcoind returned an error") // TODO: implement retry logic in case bitcoind is temporarily unreachable .fee_rate .map(|per_kb| Feerate { sats_per_kvb: per_kb.as_sat(), }) } async fn submit_transaction(&self, transaction: Transaction) { if let Err(error) = tokio::task::block_in_place(|| self.send_raw_transaction(&transaction)) { warn!(?error, "Submitting transaction failed"); } } } #[allow(dead_code)] pub mod test { use crate::bitcoind::BitcoindRpc; use crate::Feerate; use async_trait::async_trait; use bitcoin::hashes::Hash; use bitcoin::{Block, BlockHash, BlockHeader, Network, Transaction}; use std::collections::{HashMap, VecDeque}; use std::sync::Arc; use tokio::sync::Mutex; #[derive(Debug, Default)] pub struct FakeBitcoindRpcState { fee_rate: Option<Feerate>, block_height: u64, transactions: VecDeque<Transaction>, tx_in_blocks: HashMap<BlockHash, Vec<Transaction>>, } #[derive(Clone, Default)] pub struct FakeBitcoindRpc { state: Arc<Mutex<FakeBitcoindRpcState>>, } pub struct FakeBitcoindRpcController { pub state: Arc<Mutex<FakeBitcoindRpcState>>, } #[async_trait] impl BitcoindRpc for FakeBitcoindRpc { async fn get_network(&self) -> Network { bitcoin::Network::Regtest } async fn get_block_height(&self) -> u64 { self.state.lock().await.block_height } async fn get_block_hash(&self, height: u64) -> BlockHash { height_hash(height) } async fn get_block(&self, hash: &BlockHash) -> Block { let txdata = self .state .lock() .await .tx_in_blocks .get(hash) .cloned() .unwrap_or_default(); Block { header: BlockHeader { version: 0, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 0, bits: 0, nonce: 0, }, txdata, } } async fn get_fee_rate(&self, _confirmation_target: u16) -> Option<Feerate> { self.state.lock().await.fee_rate } async fn submit_transaction(&self, transaction: Transaction) { self.state.lock().await.transactions.push_back(transaction); } } impl FakeBitcoindRpc { pub fn new() -> FakeBitcoindRpc { FakeBitcoindRpc::default() } pub fn controller(&self) -> FakeBitcoindRpcController { FakeBitcoindRpcController { state: self.state.clone(), } } } impl FakeBitcoindRpcController { pub async fn set_fee_rate(&self, fee_rate: Option<Feerate>) { self.state.lock().await.fee_rate = fee_rate; } pub async fn set_block_height(&self, block_height: u64) { self.state.lock().await.block_height = block_height } pub async fn is_btc_sent_to( &self, amount: bitcoin::Amount, recipient: bitcoin::Address, ) -> bool { self.state .lock() .await .transactions .iter() .flat_map(|tx| tx.output.iter()) .any(|output| { output.value == amount.as_sat() && output.script_pubkey == recipient.script_pubkey() }) } pub async fn add_pending_tx_to_block(&self, block: u64) { let block_hash = height_hash(block); let mut state = self.state.lock().await; #[allow(clippy::needless_collect)] let txns = state.transactions.drain(..).collect::<Vec<_>>(); state .tx_in_blocks .entry(block_hash) .or_default() .extend(txns.into_iter()); } } fn height_hash(height: u64) -> BlockHash { let mut bytes = [0u8; 32]; bytes[..8].copy_from_slice(&height.to_le_bytes()[..]); BlockHash::from_inner(bytes) } }
33.758929
120
0.589527
8f3ef4296f0cc25bdbd0189ba6f4532fc0084b3d
19,224
//! # Rustls - a modern TLS library //! Rustls is a TLS library that aims to provide a good level of cryptographic security, //! requires no configuration to achieve that security, and provides no unsafe features or //! obsolete cryptography. //! //! ## Current features //! //! * TLS1.2 and TLS1.3. //! * ECDSA, Ed25519 or RSA server authentication by clients. //! * ECDSA, Ed25519 or RSA server authentication by servers. //! * Forward secrecy using ECDHE; with curve25519, nistp256 or nistp384 curves. //! * AES128-GCM and AES256-GCM bulk encryption, with safe nonces. //! * ChaCha20-Poly1305 bulk encryption ([RFC7905](https://tools.ietf.org/html/rfc7905)). //! * ALPN support. //! * SNI support. //! * Tunable fragment size to make TLS messages match size of underlying transport. //! * Optional use of vectored IO to minimise system calls. //! * TLS1.2 session resumption. //! * TLS1.2 resumption via tickets ([RFC5077](https://tools.ietf.org/html/rfc5077)). //! * TLS1.3 resumption via tickets or session storage. //! * TLS1.3 0-RTT data for clients. //! * Client authentication by clients. //! * Client authentication by servers. //! * Extended master secret support ([RFC7627](https://tools.ietf.org/html/rfc7627)). //! * Exporters ([RFC5705](https://tools.ietf.org/html/rfc5705)). //! * OCSP stapling by servers. //! * SCT stapling by servers. //! * SCT verification by clients. //! //! ## Possible future features //! //! * PSK support. //! * OCSP verification by clients. //! * Certificate pinning. //! //! ## Non-features //! //! For reasons [explained in the manual](manual), //! rustls does not and will not support: //! //! * SSL1, SSL2, SSL3, TLS1 or TLS1.1. //! * RC4. //! * DES or triple DES. //! * EXPORT ciphersuites. //! * MAC-then-encrypt ciphersuites. //! * Ciphersuites without forward secrecy. //! * Renegotiation. //! * Kerberos. //! * Compression. //! * Discrete-log Diffie-Hellman. //! * Automatic protocol version downgrade. //! //! There are plenty of other libraries that provide these features should you //! need them. //! //! ### Platform support //! //! Rustls uses [`ring`](https://crates.io/crates/ring) for implementing the //! cryptography in TLS. As a result, rustls only runs on platforms //! [supported by `ring`](https://github.com/briansmith/ring#online-automated-testing). //! At the time of writing this means x86, x86-64, armv7, and aarch64. //! //! ## Design Overview //! ### Rustls does not take care of network IO //! It doesn't make or accept TCP connections, or do DNS, or read or write files. //! //! There's example client and server code which uses mio to do all needed network //! IO. //! //! ### Rustls provides encrypted pipes //! These are the [`ServerConnection`] and [`ClientConnection`] types. You supply raw TLS traffic //! on the left (via the [`read_tls()`] and [`write_tls()`] methods) and then read/write the //! plaintext on the right: //! //! [`read_tls()`]: Connection::read_tls //! [`write_tls()`]: Connection::read_tls //! //! ```text //! TLS Plaintext //! === ========= //! read_tls() +-----------------------+ reader() as io::Read //! | | //! +---------> ClientConnection +---------> //! | or | //! <---------+ ServerConnection <---------+ //! | | //! write_tls() +-----------------------+ writer() as io::Write //! ``` //! //! ### Rustls takes care of server certificate verification //! You do not need to provide anything other than a set of root certificates to trust. //! Certificate verification cannot be turned off or disabled in the main API. //! //! ## Getting started //! This is the minimum you need to do to make a TLS client connection. //! //! First we load some root certificates. These are used to authenticate the server. //! The recommended way is to depend on the `webpki_roots` crate which contains //! the Mozilla set of root certificates. //! //! ```rust,no_run //! let mut root_store = rustls::RootCertStore::empty(); //! root_store.add_server_trust_anchors( //! webpki_roots::TLS_SERVER_ROOTS //! .0 //! .iter() //! .map(|ta| { //! rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( //! ta.subject, //! ta.spki, //! ta.name_constraints, //! ) //! }) //! ); //! ``` //! //! Next, we make a `ClientConfig`. You're likely to make one of these per process, //! and use it for all connections made by that process. //! //! ```rust,no_run //! # let root_store: rustls::RootCertStore = panic!(); //! let config = rustls::ClientConfig::builder() //! .with_safe_defaults() //! .with_root_certificates(root_store) //! .with_no_client_auth(); //! ``` //! //! Now we can make a connection. You need to provide the server's hostname so we //! know what to expect to find in the server's certificate. //! //! ```rust //! # use rustls; //! # use webpki; //! # use std::sync::Arc; //! # use std::convert::TryInto; //! # let mut root_store = rustls::RootCertStore::empty(); //! # root_store.add_server_trust_anchors( //! # webpki_roots::TLS_SERVER_ROOTS //! # .0 //! # .iter() //! # .map(|ta| { //! # rustls::OwnedTrustAnchor::from_subject_spki_name_constraints( //! # ta.subject, //! # ta.spki, //! # ta.name_constraints, //! # ) //! # }) //! # ); //! # let config = rustls::ClientConfig::builder() //! # .with_safe_defaults() //! # .with_root_certificates(root_store) //! # .with_no_client_auth(); //! let rc_config = Arc::new(config); //! let example_com = "example.com".try_into().unwrap(); //! let mut client = rustls::ClientConnection::new(rc_config, example_com); //! ``` //! //! Now you should do appropriate IO for the `client` object. If `client.wants_read()` yields //! true, you should call `client.read_tls()` when the underlying connection has data. //! Likewise, if `client.wants_write()` yields true, you should call `client.write_tls()` //! when the underlying connection is able to send data. You should continue doing this //! as long as the connection is valid. //! //! The return types of `read_tls()` and `write_tls()` only tell you if the IO worked. No //! parsing or processing of the TLS messages is done. After each `read_tls()` you should //! therefore call `client.process_new_packets()` which parses and processes the messages. //! Any error returned from `process_new_packets` is fatal to the connection, and will tell you //! why. For example, if the server's certificate is expired `process_new_packets` will //! return `Err(WebPkiError(CertExpired, ValidateServerCert))`. From this point on, //! `process_new_packets` will not do any new work and will return that error continually. //! //! You can extract newly received data by calling `client.reader()` (which implements the //! `io::Read` trait). You can send data to the peer by calling `client.writer()` (which //! implements `io::Write` trait). Note that `client.writer().write()` buffers data you //! send if the TLS connection is not yet established: this is useful for writing (say) a //! HTTP request, but this is buffered so avoid large amounts of data. //! //! The following code uses a fictional socket IO API for illustration, and does not handle //! errors. //! //! ```rust,no_run //! # let mut client = rustls::ClientConnection::new(panic!(), panic!()).unwrap(); //! # struct Socket { } //! # impl Socket { //! # fn ready_for_write(&self) -> bool { false } //! # fn ready_for_read(&self) -> bool { false } //! # fn wait_for_something_to_happen(&self) { } //! # } //! # //! # use std::io::{Read, Write, Result}; //! # impl Read for Socket { //! # fn read(&mut self, buf: &mut [u8]) -> Result<usize> { panic!() } //! # } //! # impl Write for Socket { //! # fn write(&mut self, buf: &[u8]) -> Result<usize> { panic!() } //! # fn flush(&mut self) -> Result<()> { panic!() } //! # } //! # //! # fn connect(_address: &str, _port: u16) -> Socket { //! # panic!(); //! # } //! use std::io; //! use rustls::Connection; //! //! client.writer().write(b"GET / HTTP/1.0\r\n\r\n").unwrap(); //! let mut socket = connect("example.com", 443); //! loop { //! if client.wants_read() && socket.ready_for_read() { //! client.read_tls(&mut socket).unwrap(); //! client.process_new_packets().unwrap(); //! //! let mut plaintext = Vec::new(); //! client.reader().read_to_end(&mut plaintext).unwrap(); //! io::stdout().write(&plaintext).unwrap(); //! } //! //! if client.wants_write() && socket.ready_for_write() { //! client.write_tls(&mut socket).unwrap(); //! } //! //! socket.wait_for_something_to_happen(); //! } //! ``` //! //! # Examples //! `tlsserver` and `tlsclient` are full worked examples. These both use mio. //! //! # Crate features //! Here's a list of what features are exposed by the rustls crate and what //! they mean. //! //! - `logging`: this makes the rustls crate depend on the `log` crate. //! rustls outputs interesting protocol-level messages at `trace!` and `debug!` //! level, and protocol-level errors at `warn!` and `error!` level. The log //! messages do not contain secret key data, and so are safe to archive without //! affecting session security. This feature is in the default set. //! //! - `dangerous_configuration`: this feature enables a `dangerous()` method on //! `ClientConfig` and `ServerConfig` that allows setting inadvisable options, //! such as replacing the certificate verification process. Applications //! requesting this feature should be reviewed carefully. //! //! - `quic`: this feature exposes additional constructors and functions //! for using rustls as a TLS library for QUIC. See the `quic` module for //! details of these. You will only need this if you're writing a QUIC //! implementation. //! //! - `tls12`: enables support for TLS version 1.2. This feature is in the default //! set. Note that, due to the additive nature of Cargo features and because it //! is enabled by default, other crates in your dependency graph could re-enable //! it for your application. If you want to disable TLS 1.2 for security reasons, //! consider explicitly enabling TLS 1.3 only in the config builder API. //! //! - `read_buf`: this nightly-only feature adds support for the unstable //! `std::io::ReadBuf` and related APIs. This reduces costs from initializing //! buffers. // Require docs for public APIs, deny unsafe code, etc. #![forbid(unsafe_code, unused_must_use)] #![cfg_attr(not(feature = "read_buf"), forbid(unstable_features))] #![deny( clippy::clone_on_ref_ptr, clippy::use_self, trivial_casts, trivial_numeric_casts, missing_docs, unreachable_pub, unused_import_braces, unused_extern_crates, unused_qualifications )] // Relax these clippy lints: // - ptr_arg: this triggers on references to type aliases that are Vec // underneath. // - too_many_arguments: some things just need a lot of state, wrapping it // doesn't necessarily make it easier to follow what's going on // - new_ret_no_self: we sometimes return `Arc<Self>`, which seems fine // - single_component_path_imports: our top-level `use log` import causes // a false positive, https://github.com/rust-lang/rust-clippy/issues/5210 // - new_without_default: for internal constructors, the indirection is not // helpful #![allow( clippy::too_many_arguments, clippy::new_ret_no_self, clippy::ptr_arg, clippy::single_component_path_imports, clippy::new_without_default )] // Enable documentation for all features on docs.rs #![cfg_attr(docsrs, feature(doc_cfg))] // Early testing of the read_buf nightly feature #![cfg_attr(feature = "read_buf", feature(read_buf))] // log for logging (optional). #[cfg(feature = "logging")] use log; #[cfg(not(feature = "logging"))] #[macro_use] mod log { macro_rules! trace ( ($($tt:tt)*) => {{}} ); macro_rules! debug ( ($($tt:tt)*) => {{}} ); macro_rules! warn ( ($($tt:tt)*) => {{}} ); macro_rules! error ( ($($tt:tt)*) => {{}} ); } #[allow(missing_docs)] #[macro_use] mod msgs; mod anchors; mod cipher; mod conn; mod error; mod hash_hs; mod limited_cache; mod rand; mod record_layer; mod stream; #[cfg(feature = "tls12")] mod tls12; mod tls13; mod vecbuf; mod verify; #[cfg(test)] mod verifybench; mod x509; #[macro_use] mod check; mod bs_debug; mod builder; mod key; mod keylog; mod kx; mod suites; mod ticketer; mod versions; /// Internal classes which may be useful outside the library. /// The contents of this section DO NOT form part of the stable interface. pub mod internal { /// Low-level TLS message parsing and encoding functions. pub mod msgs { pub use crate::msgs::*; } /// Low-level TLS message decryption functions. pub mod cipher { pub use crate::cipher::MessageDecrypter; } } // The public interface is: pub use crate::anchors::{OwnedTrustAnchor, RootCertStore}; pub use crate::builder::{ ConfigBuilder, ConfigSide, WantsCipherSuites, WantsKxGroups, WantsVerifier, WantsVersions, }; pub use crate::conn::{ CommonState, Connection, ConnectionCommon, IoState, Reader, SideData, Writer, }; pub use crate::error::Error; pub use crate::key::{Certificate, PrivateKey}; pub use crate::keylog::{KeyLog, KeyLogFile, NoKeyLog}; pub use crate::kx::{SupportedKxGroup, ALL_KX_GROUPS}; pub use crate::msgs::enums::CipherSuite; pub use crate::msgs::enums::ProtocolVersion; pub use crate::msgs::enums::SignatureScheme; pub use crate::msgs::handshake::DistinguishedNames; pub use crate::stream::{Stream, StreamOwned}; pub use crate::suites::{ BulkAlgorithm, SupportedCipherSuite, ALL_CIPHER_SUITES, DEFAULT_CIPHER_SUITES, }; pub use crate::ticketer::Ticketer; #[cfg(feature = "tls12")] pub use crate::tls12::Tls12CipherSuite; pub use crate::tls13::Tls13CipherSuite; pub use crate::versions::{SupportedProtocolVersion, ALL_VERSIONS, DEFAULT_VERSIONS}; /// Items for use in a client. pub mod client { pub(super) mod builder; mod client_conn; mod common; pub(super) mod handy; mod hs; #[cfg(feature = "tls12")] mod tls12; mod tls13; pub use builder::{WantsClientCert, WantsTransparencyPolicyOrClientCert}; #[cfg(feature = "quic")] #[cfg_attr(docsrs, doc(cfg(feature = "quic")))] pub use client_conn::ClientQuicExt; pub use client_conn::InvalidDnsNameError; pub use client_conn::ResolvesClientCert; pub use client_conn::ServerName; pub use client_conn::StoresClientSessions; pub use client_conn::{ClientConfig, ClientConnection, ClientConnectionData, WriteEarlyData}; pub use handy::{ClientSessionMemoryCache, NoClientSessionStorage}; #[cfg(feature = "dangerous_configuration")] #[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] pub use crate::verify::{ CertificateTransparencyPolicy, HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier, WebPkiVerifier, }; #[cfg(feature = "dangerous_configuration")] #[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] pub use client_conn::danger::DangerousClientConfig; } pub use client::{ClientConfig, ClientConnection, ServerName}; /// Items for use in a server. pub mod server { pub(crate) mod builder; mod common; pub(crate) mod handy; mod hs; mod server_conn; #[cfg(feature = "tls12")] mod tls12; mod tls13; pub use crate::verify::{ AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient, NoClientAuth, }; pub use builder::WantsServerCert; pub use handy::ResolvesServerCertUsingSni; pub use handy::{NoServerSessionStorage, ServerSessionMemoryCache}; #[cfg(feature = "quic")] #[cfg_attr(docsrs, doc(cfg(feature = "quic")))] pub use server_conn::ServerQuicExt; pub use server_conn::StoresServerSessions; pub use server_conn::{ Accepted, Acceptor, ServerConfig, ServerConnection, ServerConnectionData, }; pub use server_conn::{ClientHello, ProducesTickets, ResolvesServerCert}; #[cfg(feature = "dangerous_configuration")] #[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] pub use crate::verify::{ClientCertVerified, ClientCertVerifier, DnsName}; } pub use server::{ServerConfig, ServerConnection}; /// All defined ciphersuites appear in this module. /// /// [`ALL_CIPHER_SUITES`] is provided as an array of all of these values. pub mod cipher_suite { #[cfg(feature = "tls12")] pub use crate::tls12::TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256; #[cfg(feature = "tls12")] pub use crate::tls12::TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384; #[cfg(feature = "tls12")] pub use crate::tls12::TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256; #[cfg(feature = "tls12")] pub use crate::tls12::TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256; #[cfg(feature = "tls12")] pub use crate::tls12::TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384; #[cfg(feature = "tls12")] pub use crate::tls12::TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256; pub use crate::tls13::TLS13_AES_128_GCM_SHA256; pub use crate::tls13::TLS13_AES_256_GCM_SHA384; pub use crate::tls13::TLS13_CHACHA20_POLY1305_SHA256; } /// All defined protocol versions appear in this module. /// /// ALL_VERSIONS is a provided as an array of all of these values. pub mod version { #[cfg(feature = "tls12")] pub use crate::versions::TLS12; pub use crate::versions::TLS13; } /// All defined key exchange groups appear in this module. /// /// ALL_KX_GROUPS is provided as an array of all of these values. pub mod kx_group { pub use crate::kx::SECP256R1; pub use crate::kx::SECP384R1; pub use crate::kx::X25519; } /// Message signing interfaces and implementations. pub mod sign; #[cfg(feature = "quic")] #[cfg_attr(docsrs, doc(cfg(feature = "quic")))] /// APIs for implementing QUIC TLS pub mod quic; /// This is the rustls manual. pub mod manual; /** Type renames. */ #[allow(clippy::upper_case_acronyms)] #[doc(hidden)] #[deprecated(since = "0.20.0", note = "Use ResolvesServerCertUsingSni")] pub type ResolvesServerCertUsingSNI = server::ResolvesServerCertUsingSni; #[allow(clippy::upper_case_acronyms)] #[cfg(feature = "dangerous_configuration")] #[cfg_attr(docsrs, doc(cfg(feature = "dangerous_configuration")))] #[doc(hidden)] #[deprecated(since = "0.20.0", note = "Use client::WebPkiVerifier")] pub type WebPKIVerifier = client::WebPkiVerifier; #[allow(clippy::upper_case_acronyms)] #[doc(hidden)] #[deprecated(since = "0.20.0", note = "Use Error")] pub type TLSError = Error; #[doc(hidden)] #[deprecated(since = "0.20.0", note = "Use ClientConnection")] pub type ClientSession = ClientConnection; #[doc(hidden)] #[deprecated(since = "0.20.0", note = "Use ServerConnection")] pub type ServerSession = ServerConnection; /* Apologies: would make a trait alias here, but those remain unstable. pub trait Session = Connection; */
36.969231
98
0.669528
870dda1bae52a0cb7fce57f7efb8498028b11bc3
9,987
// Copyright 2017 ETH Zurich. All rights reserved. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This module contains all requests (and the respones) to the the coordinator by the //! `strymon submit` tool, submitted and running jobs, as well as connecting executors. pub mod catalog; use std::io; use num_traits::FromPrimitive; use strymon_model::*; use strymon_communication::rpc::{Name, Request}; /// The list of supported RPC methods at the coordinator. #[derive(Primitive, Debug, PartialEq, Eq, Clone, Copy)] #[repr(u8)] pub enum CoordinatorRPC { /// Requests a new job submission. Submission = 1, /// Requests the termination of a running job. Termination = 2, /// Registers a new executor at the coordinator. AddExecutor = 3, /// Registers a spawned job worker group. AddWorkerGroup = 4, /// Subscribes to an topic in the catalog. Subscribe = 5, /// Unsubscribes from a topic. Unsubscribe = 6, /// Publishes a new topic in the catalog. Publish = 7, /// Unpublishes a new topic. Unpublish = 8, /// Performs a topic lookup without subscribing to it. Lookup = 9, } impl Name for CoordinatorRPC { type Discriminant = u8; fn discriminant(&self) -> Self::Discriminant { *self as Self::Discriminant } fn from_discriminant(value: &Self::Discriminant) -> Option<Self> { FromPrimitive::from_u8(*value) } } /// Defines the placement of job workers on the available executors. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum Placement { /// Randomly picks *(Number of Executors, Number of Worker Threads)* workers. The number of threads is per executor. Random(usize, usize), // (num executors, num workers) /// Spawns the specified number of worker threads on each of the selected executors. Fixed(Vec<ExecutorId>, usize), // (executors, num workers) } /// A new job submission. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Submission { /// Specifies the job executable. pub job: JobProgram, /// An optional human-readable description. pub name: Option<String>, /// The placement of workers in the cluster. pub placement: Placement, } /// The error type for failed job submissions. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SubmissionError { /// The specified executor list or the request number of executors is not available. ExecutorsNotFound, /// The coordinator was unable to reach a required executor. ExecutorUnreachable, /// An executor reported an error while spawning. SpawnError(::executor::SpawnError), /// The coordinator timed out waiting for worker groups to arrive TimedOut, /// An unknown error occured. Other, } impl From<io::Error> for SubmissionError { fn from(_: io::Error) -> SubmissionError { SubmissionError::TimedOut } } impl Request<CoordinatorRPC> for Submission { type Success = JobId; type Error = SubmissionError; const NAME: CoordinatorRPC = CoordinatorRPC::Submission; } /// A job termination request. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Termination { /// Identifier of the job to terminate. pub job: JobId, } /// The error type for failed job termination requests. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum TerminationError { /// The specified job was not found NotFound, /// The coordinator was unable to reach a required executors. ExecutorUnreachable, /// An executor reported an error while terminating the job. TerminateError(::executor::TerminateError), } impl Request<CoordinatorRPC> for Termination { type Success = (); type Error = TerminationError; const NAME: CoordinatorRPC = CoordinatorRPC::Termination; } /// The message sent by new executors to register themselves at the coordinator. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AddExecutor { /// The hostname of the machine on which this the new executor is running. pub host: String, /// A range of ports to be assigned for the `timely_communication` channels. pub ports: (u16, u16), /// The format of the executables this executor can spawn. pub format: ExecutionFormat, } /// Error which occurs when coordinator rejects the registration of a new executor. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ExecutorError; impl Request<CoordinatorRPC> for AddExecutor { type Success = ExecutorId; type Error = ExecutorError; const NAME: CoordinatorRPC = CoordinatorRPC::AddExecutor; } /// An opaque token used by job worker groups to authenticate themselves at the coordinator. #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct JobToken { /// The job identifier of the token owner. pub id: JobId, /// A opaque random number only known to the job process and the coordinator. pub auth: u64, } /// Registers a newly spawned worker group at the coordinator. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct AddWorkerGroup { /// The identifier of the job this group belongs to. pub job: JobId, /// The index of this group within the list of groups of the job. pub group: usize, } /// The error cause sent back to worker groups when job spawning fails. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum WorkerGroupError { /// The provided worker group meta-data was invalid. InvalidWorkerGroup, /// The spawning of the job has been aborted. SpawningAborted, /// A peer worker group caused this job submission to fail. PeerFailed, } impl Request<CoordinatorRPC> for AddWorkerGroup { type Success = JobToken; type Error = WorkerGroupError; const NAME: CoordinatorRPC = CoordinatorRPC::AddWorkerGroup; } /// A topic subscription request, sent by a a spawned job the the coordinator. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Subscribe { /// The name of the topic this job would like to subscribe to. pub name: String, /// If `blocking` is true, the response is delayed until a topic with a matching name is published. /// Otherwise, an error message is returned indicating that the requested topic does not exist. pub blocking: bool, /// A token authenticating the the submitter as a successfully spawned job. pub token: JobToken, } /// The error message sent back to unsuccessful subscription requests. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SubscribeError { /// The requested topic does not exist. TopicNotFound, /// The provided authentication token was invalid. AuthenticationFailure, } impl Request<CoordinatorRPC> for Subscribe { type Success = Topic; type Error = SubscribeError; const NAME: CoordinatorRPC = CoordinatorRPC::Subscribe; } /// A request to unsubscribe from a topic. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Unsubscribe { /// The identifier of the subscribed topic. pub topic: TopicId, /// A token authenticating the the submitter as a successfully spawned job. pub token: JobToken, } /// The error message sent back for failed unsubscription request. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum UnsubscribeError { /// No subscription found for the requested topic. InvalidTopicId, /// The provided authentication token was invalid. AuthenticationFailure, } impl Request<CoordinatorRPC> for Unsubscribe { type Success = (); type Error = UnsubscribeError; const NAME: CoordinatorRPC = CoordinatorRPC::Unsubscribe; } /// A request to publish a topic. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Publish { /// The name of the topic to be created. pub name: String, /// A `strymon_communication` endpoint address on which subscribers can access the publication. pub addr: (String, u16), /// The kind of topic being published. pub schema: TopicSchema, /// A token authenticating the the submitter as a successfully spawned job. pub token: JobToken, } /// The error message sent back for failed publication request. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum PublishError { /// A topic with the same name already exists. TopicAlreadyExists, /// The provided authentication token was invalid. AuthenticationFailure, } impl Request<CoordinatorRPC> for Publish { type Success = Topic; type Error = PublishError; const NAME: CoordinatorRPC = CoordinatorRPC::Publish; } /// A request to unpublish a published topic. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Unpublish { /// The identifier of the topic to unpublish. pub topic: TopicId, /// A token authenticating the the submitter as a successfully spawned job. pub token: JobToken, } /// The error message sent back for failed unpublication request. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum UnpublishError { /// No publication found for the requested topic. InvalidTopicId, /// The provided authentication token was invalid. AuthenticationFailure, } impl Request<CoordinatorRPC> for Unpublish { type Success = (); type Error = UnpublishError; const NAME: CoordinatorRPC = CoordinatorRPC::Unpublish; } /// Looks up a topic at the coordinator without registering a subscription for it. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct Lookup { /// The name of the topic to look up. pub name: String, } impl Request<CoordinatorRPC> for Lookup { type Success = Topic; type Error = (); const NAME: CoordinatorRPC = CoordinatorRPC::Lookup; }
32.530945
120
0.713327
d72d74c659c032f841d3c8e03d90f2b5ed9c3447
6,998
use futures::channel::mpsc::Sender; use futures::prelude::*; use std::any::Any; use std::sync::{Arc, Mutex}; use vulkano::buffer::BufferAccess; use crate::runtime::buffer::vulkan::BufferEmpty; use crate::runtime::buffer::vulkan::BufferFull; use crate::runtime::buffer::BufferBuilder; use crate::runtime::buffer::BufferReader; use crate::runtime::buffer::BufferReaderCustom; use crate::runtime::buffer::BufferWriter; use crate::runtime::buffer::BufferWriterHost; use crate::runtime::AsyncMessage; #[derive(Debug, PartialEq, Hash)] pub struct H2D; impl Eq for H2D {} impl H2D { pub fn new() -> H2D { H2D } } impl Default for H2D { fn default() -> Self { Self::new() } } impl BufferBuilder for H2D { fn build( &self, item_size: usize, writer_inbox: Sender<AsyncMessage>, writer_output_id: usize, ) -> BufferWriter { WriterH2D::new(item_size, writer_inbox, writer_output_id) } } // everything is measured in items, e.g., offsets, capacity, space available // ====================== WRITER ============================ #[derive(Debug)] pub struct WriterH2D { buffer: Option<CurrentBuffer>, inbound: Arc<Mutex<Vec<BufferEmpty>>>, outbound: Arc<Mutex<Vec<BufferFull>>>, item_size: usize, finished: bool, writer_inbox: Sender<AsyncMessage>, writer_output_id: usize, reader_inbox: Option<Sender<AsyncMessage>>, reader_input_id: Option<usize>, } #[derive(Debug)] struct CurrentBuffer { buffer: BufferEmpty, offset: usize, } impl WriterH2D { pub fn new( item_size: usize, writer_inbox: Sender<AsyncMessage>, writer_output_id: usize, ) -> BufferWriter { debug!("H2D writer created"); BufferWriter::Host(Box::new(WriterH2D { buffer: None, inbound: Arc::new(Mutex::new(Vec::new())), outbound: Arc::new(Mutex::new(Vec::new())), item_size, finished: false, writer_inbox, writer_output_id, reader_inbox: None, reader_input_id: None, })) } } #[async_trait] impl BufferWriterHost for WriterH2D { fn add_reader( &mut self, reader_inbox: Sender<AsyncMessage>, reader_input_id: usize, ) -> BufferReader { debug!("H2D writer called add reader"); debug_assert!(self.reader_inbox.is_none()); debug_assert!(self.reader_input_id.is_none()); self.reader_inbox = Some(reader_inbox); self.reader_input_id = Some(reader_input_id); debug_assert_eq!(reader_input_id, 0); BufferReader::Custom(Box::new(ReaderH2D { inbound: self.outbound.clone(), outbound: self.inbound.clone(), writer_inbox: self.writer_inbox.clone(), writer_output_id: self.writer_output_id, finished: false, })) } fn as_any(&mut self) -> &mut dyn Any { self } fn bytes(&mut self) -> (*mut u8, usize) { if self.buffer.is_none() { if let Some(b) = self.inbound.lock().unwrap().pop() { self.buffer = Some(CurrentBuffer { buffer: b, offset: 0, }); } else { debug!("H2D writer called bytes, buff is none"); return (std::ptr::null_mut::<u8>(), 0); } } debug!("H2D writer called bytes, buff is some"); unsafe { let buffer = self.buffer.as_mut().unwrap(); let capacity = buffer.buffer.buffer.size() as usize / self.item_size; let mut ret = buffer.buffer.buffer.write().unwrap(); ( ret.as_mut_ptr().add(buffer.offset * self.item_size), (capacity - buffer.offset) * self.item_size, ) } } fn produce(&mut self, amount: usize) { debug!("H2D writer called produce {}", amount); let buffer = self.buffer.as_mut().unwrap(); let capacity = buffer.buffer.buffer.size() as usize / self.item_size; debug_assert!(amount + buffer.offset <= capacity); buffer.offset += amount; if buffer.offset == capacity { let buffer = self.buffer.take().unwrap().buffer.buffer; self.outbound.lock().unwrap().push(BufferFull { buffer, used_bytes: capacity * self.item_size, }); if let Some(b) = self.inbound.lock().unwrap().pop() { self.buffer = Some(CurrentBuffer { buffer: b, offset: 0, }); } let _ = self .reader_inbox .as_mut() .unwrap() .try_send(AsyncMessage::Notify); } } async fn notify_finished(&mut self) { debug!("H2D writer called finish"); if self.finished { return; } if let Some(CurrentBuffer { offset, buffer }) = self.buffer.take() { if offset > 0 { self.outbound.lock().unwrap().push(BufferFull { buffer: buffer.buffer, used_bytes: offset * self.item_size, }); } } self.reader_inbox .as_mut() .unwrap() .send(AsyncMessage::StreamInputDone { input_id: self.reader_input_id.unwrap(), }) .await .unwrap(); } fn finish(&mut self) { self.finished = true; } fn finished(&self) -> bool { self.finished } } unsafe impl Send for WriterH2D {} // ====================== READER ============================ #[derive(Debug)] pub struct ReaderH2D { inbound: Arc<Mutex<Vec<BufferFull>>>, outbound: Arc<Mutex<Vec<BufferEmpty>>>, writer_output_id: usize, writer_inbox: Sender<AsyncMessage>, finished: bool, } impl ReaderH2D { pub fn submit(&mut self, buffer: BufferEmpty) { debug!("H2D reader handling empty buffer"); self.outbound.lock().unwrap().push(buffer); let _ = self.writer_inbox.try_send(AsyncMessage::Notify); } pub fn buffers(&mut self) -> Vec<BufferFull> { let mut vec = self.inbound.lock().unwrap(); std::mem::take(&mut vec) } } #[async_trait] impl BufferReaderCustom for ReaderH2D { fn as_any(&mut self) -> &mut dyn Any { self } async fn notify_finished(&mut self) { debug!("H2D reader finish"); if self.finished { return; } self.writer_inbox .send(AsyncMessage::StreamOutputDone { output_id: self.writer_output_id, }) .await .unwrap(); } fn finish(&mut self) { self.finished = true; } fn finished(&self) -> bool { self.finished } } unsafe impl Send for ReaderH2D {}
26.812261
81
0.548442
fbd60830feef1c29cf57d205d3a417076c0b05c9
2,137
use dasp_graph::{Buffer, Input, Node}; use super::super::{GlicolNodeData, NodeData, BoxedNodeSend, mono_node}; pub struct Pan { pan: f32 } impl Pan { pub fn new(pan: f32) -> GlicolNodeData { mono_node!( Self { pan } ) } } impl Node<128> for Pan { fn process(&mut self, inputs: &[Input<128>], output: &mut [Buffer<128>]) { if false { assert!(inputs.len() > 0); let mod_buf = &mut inputs[0].buffers(); match inputs[0].buffers().len() { 1 => { output[0] = inputs[1].buffers()[0].clone(); output[1] = inputs[1].buffers()[0].clone(); }, 2 => { output[0] = inputs[1].buffers()[0].clone(); output[1] = inputs[1].buffers()[1].clone(); }, _ => {unimplemented!()} }; for i in 0..128 { let p = mod_buf[0][i]; output[0][i] *= 1.0 - (p+1.)/2.; output[1][i] *= (p+1.)/2.; } } else { match inputs[0].buffers().len() { 1 => { let mut l = inputs[0].buffers()[0].clone(); let mut r = l.clone(); l.iter_mut().for_each(|s| *s = *s * (1.0 -(self.pan+1./2.)) ); r.iter_mut().for_each(|s| *s = *s * (self.pan+1./2.)); output[0] = l; output[1] = r; }, 2 => { output[0] = inputs[0].buffers()[0].clone(); output[1] = inputs[0].buffers()[1].clone(); output[0].iter_mut().for_each(|s| *s = *s * (1.0 -(self.pan+1./2.))); output[1].iter_mut().for_each(|s| *s = *s * (self.pan+1./2.)); }, _ => {panic!()} } } } } #[macro_export] macro_rules! pan { () => { // controlled by modulator, no need for value Pan::new(0.5) }; ($data: expr) => { Pan::new($data) }; }
30.528571
89
0.386055
d6d31b6c2b4df52d98870dca70a614d3d3192fc3
1,227
use crate::data::{get_version_specific_file, ENTITIES_FILE}; use crate::models::entity::Entity; use crate::models::version::Version; use crate::DataResult; use std::collections::HashMap; use std::iter::FromIterator; use std::sync::Arc; pub struct Entities { version: Arc<Version>, } impl Entities { pub fn new(version: Arc<Version>) -> Self { Self { version } } /// Returns an unordered array of entities pub fn entities_array(&self) -> DataResult<Vec<Entity>> { let content = get_version_specific_file(&self.version, ENTITIES_FILE)?; let entities = serde_json::from_str(&content)?; Ok(entities) } /// Returns entities indexed by name pub fn entities_by_name(&self) -> DataResult<HashMap<String, Entity>> { let entities = self.entities_array()?; let entities_map = HashMap::from_iter(entities.into_iter().map(|e| (e.name.clone(), e))); Ok(entities_map) } /// Returns entities indexed by id pub fn entities(&self) -> DataResult<HashMap<u32, Entity>> { let entities = self.entities_array()?; let entities_map = HashMap::from_iter(entities.into_iter().map(|e| (e.id, e))); Ok(entities_map) } }
29.214286
97
0.654442
dd1b63c61c7270d7c485bcbaa3371c7e1a74db18
22,153
#![doc(html_playground_url = "https://play.rust-lang.org")] //! # The ECMA 402 abstract API surface proposal for `Intl.ListFormat` //! //! *[email protected]\ //! Created: 2020-05-28\ //! Last updated: 2020-06-09* //! //! This proposal is an elaboration of the article [Meta-proposal: Towards common ECMA 402 API //! surface for Rust][meta1]. It contains traits declarations for a rust-flavored //! [ECMA-402 API][ecma402api]. //! //! [ecma402api]: https://www.ecma-international.org/publications/standards/Ecma-402.htm //! [meta1]: https://github.com/unicode-org/icu4x/blob/%6d%61%73%74%65%72/proposals/pr001-ecma402.md //! //! ## A note about presentation //! //! This proposal is deliberately written in the form of compilable rust code, and is perhaps best //! consumed by looking at the output of the command `cargo doc --open` ran at the top level //! directory. Such a presentation allows us to embed live code inline with the text, which can //! be readily tested by clicking the "Run" button: //! //! ```rust //! println!("Hello Unicode! ❤️!"); //! ``` //! //! It's not quite [literate //! programming](https://en.wikipedia.org/wiki/Literate_programming) but should be close enough for //! getting a general feel for how the API will be used, and allows to follow the text together //! with the presentation. //! //! ## Approach //! //! As outlined in the [meta-proposal][meta1approach], we will first test the feasibility of an //! ECMA-402 API with a minimal example. [`Intl.ListFormat`][listformat] was chosen. It is //! a very small API surface with few configuration options compared to other members of the same //! API family, while in general it is very similar to all other functions in the [`Intl` //! library][intl]. //! //! [meta1approach]: https://github.com/unicode-org/icu4x/blob/%6d%61%73%74%65%72/proposals/pr001-ecma402.md#approach //! [listformat]: //! https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/ListFormat //! [intl]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl //! //! # A closer look at `Intl.ListFormat` //! //! A [quick example of `Intl.ListFormat` use][lfquick] in JavaScript is shown below, for //! completeness and as a baseline for comparison. //! //! [lfquick]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/ListFormat/ListFormat //! //! ```js //! const vehicles = ['Motorcycle', 'Bus', 'Car']; //! //! const formatter = new Intl.ListFormat('en', { style: 'long', type: 'conjunction' }); //! console.log(formatter.format(vehicles)); //! // expected output: "Motorcycle, Bus, and Car" //! ``` //! //! Per [MDN][lfquick], disregarding the JavaScript declaration overhead, the following functions //! are the focus: //! //! | | Description | //! |--|--| //! | `Intl.supportedLocalesOf() => [String]` | Creates a ListFormat object, initialized for the given locale and options set | //! | `Intl.ListFormat(locale..., options) => <ListFormat object>` | Creates a ListFormat object, initialized for the given locale and options set | //! | `<ListFormat object>.format([element,...]) => <String>` | Formats the list of `element`s according to the formatting rules for `locale` that the `<ListFormat object>` was initialized with. | //! | `<ListFormat object>.formatToParts([element,...]) => [String]` | Formats the list of `element`s according to the formatting rules for `locale` that the `<ListFormat object>` was initialized with. | //! //! The options are as follows (details omitted here, refer to [MDN][lfquick]): //! //! | | Values | //! |--|--| //! | `locale` | [BCP 47][bcp47] language tag for the locale actually used. | //! | `style` | One of `long`, `short`, `narrow` | //! | `type` | One of `conjunction`, `disjunction` | //! //! [bcp47]: https://tools.ietf.org/html/bcp47 //! //! # `Intl.ListFormat` API proposal in Rust //! //! This section presents a condensed view of the implementation for `ListFormat` in Rust. Please //! see the section [Considerations](#considerations) for the design rationale. //! //! Implementation fragments, consult each link for specific on each of the items below: //! - [Locale] trait (better name pending). //! - [listformat] mod. //! - [listformat::options] mod. //! - [listformat::Format] trait. //! //! # Considerations //! //! ## Names, names //! //! Naming is always fun. This proposal is presented as a crate `ecma402_listformat`, but is //! intended to be re-exported from a top-level crate with a name such as `ecma402::listformat`. //! //! This gives the following sample options uses, which can be lenthened to avoid ambiguity. //! //! ```rust ignore //! use ecma402::listformat::options::In; //! use ecma402::listformat::options::Style; //! // ... etc //! ``` //! //! ## Options handling //! //! The configuration options may be the easy bits. The style and type are probably simple enough //! to define in place, instead of delegating that to the implementor. We define them in a new //! mod, to keep the `Style` and `Type` names free for reuse for other parts of the API surface. //! Not all options can be packaged this way, some require an open ended set of possible elements //! (of which some may be well-formed but invalid, for example). //! //! `Style` and `Type` are concrete types, since those are fixed by the API. //! //! ```rust //! /// Deliberately placed in a separate mod, to make for ergonomic naming. Short //! /// names are a personal preference: `options::Style` reads better to me than //! /// `OptionsStyle` or such. It does not seem in strife with ECMA-402, since //! /// it says nothing about types at all. //! mod options { //! pub enum Style { //! Long, Short, Narrow, //! } //! pub enum Type { //! Conjunction, Disjunction, //! } //! /// These are the options that go in. //! /// Use as `options::In`. I happen to like short names on types that //! /// have the same name as the mod they are in, to reduce repetition. //! pub struct In { //! pub style: Style, //! /// `type` is a reserved word. Better suggestions welcome. //! pub in_type: Type, //! } //! } //! ``` //! //! You would refer to them by `listformat::options::Style::Long` and the like, which gives us //! non-repetitive and reusable names. `use` clauses can be used to shorten if needed: //! //! ```rust ignore //! use listformat::options::Style; //! // ... //! ``` //! //! ## Locale //! //! Passing in a locale specifier is the first nontrivial design decision. This is since locales //! may be implemented in a number of different ways: //! //! 1. Is the locale data owned or borrowed? //! 1. Are locales fallible or infallible? I.e. are they *well-formed* or always *valid*. //! 1. Do locales come from a system repository or are they somehow provided by the end user? //! //! In Rust, each of these concerns seems to ask for different interfaces, making general //! interfaces hard to formulate. I welcome a correction here if I'm wrong. The objective is to //! unify as many as possible of the above use cases in a trait that can express all of them. //! //! ### "Stringly typed" API //! //! One possibility is to require a string-like type (`AsRef<str>`). //! //! Pros: //! //! * This is the closest to the JavaScript API surface //! * It is the easiest one to implement. //! //! Cons: //! //! * It has the loosest guarantees as to the functionality it provides. //! * It implies convertibility to a string-like object, which may require allocation. //! //! This approach is inferior compared to the Formatting API because it forces //! the user's hand in string use. A better alternative is below. //! //! ### Formatting API //! //! The formatting API is just the following: //! //! ```rust //! use std::fmt; //! use std::hash; //! pub trait Locale: fmt::Display + hash::Hash { //! // We may extend this with more functionality. //! }; //! ``` //! //! Pros: //! //! * Simplicity //! * Defers the decision on how to format the `Locale` and delegates it to //! the user. //! //! Cons: //! //! * Assumes that on-screen display of a `Locale` is the same as string //! serialization of the locale. //! //! We believe that this conflation is not an issue today, as the same effective //! approach is already being used in Gecko. //! //! ## Error handling //! //! It is conceivable that a method call could return an error. As error reporting is fairly cheap //! in Rust, returning `Result<_, E>` for `E` being some error type, should be expected and //! natural. The to make this useful, `E` should probably be constrained to a standard "error //! reporting" type such as `std::error::Error`. //! //! This suggests a general error handling approach: //! //! ```rust //! use std::error; //! pub trait Trait { //! type Error: error::Error; //! fn function() -> Result<(), Self::Error>; //! } //! ``` //! //! Pros: //! //! - A standardized error reporting type. //! - Allows the use of crates such as [`anyhow`](https://crates.io/crates/anyhow). //! //! Cons: //! //! - Requires `Trait::Error` to implement a potentially hefty trait `std::error::Error`. //! - There can be only one implementation of `Trait` in a library. In this case it seems //! that may be enough. //! //! ### Fallibility //! //! The issue of fallibility in the API comes up because an implementor can decide to implement //! lazy checking of the constructed collaborator types. A `LocaleIdentifier` type is one of //! those types. While [`Locale`][lid] validates its inputs, [`rust_icu_uloc::ULoc`][uloc] does //! not: in fact, almost all of its operations are fallible, and there does not seem to be a way to //! validate `ULoc` eagerly, the way the underlying ICU4C API is defined. //! //! [lid]: https://github.com/unicode-org/icu4x/pull/47 //! [uloc]: https://docs.rs/rust_icu_uloc/0.2.2/rust_icu_uloc/struct.ULoc.html //! //! Now since `Intl.supportedLocalesOf()` exists, could say that that any user will have //! the chance to obtain a valid locale either by taking one from the list of supported locales, //! or by language-matching desired locale with the list of supported locales ahead of time. //! //! This means that an infallible API could work for the case of `Intl.ListFormat`. However, //! we do not want to rely on a locale resolution protocol imposed by the end user. Furthermore, //! not all combination of input options will be valid across all constructors of `Intl.*` types. //! With this in mind //! //! > Note: Previous versions of this proposal had an infallible constructor `new`. This has been //! determined to be infeasible, and a fallible constructor `try_new` has been put in place //! instead. //! //! ```rust ignore //! let loc = rust_icu_uloc::ULoc::try_from("nl-NL").expect("locale accepted"); //! let formatter = ecma402::listformat::Format::try_new( //! &loc, ecma402::listformat::options::In{ /* ... */ }).expect("formatter constructed"); //! ``` //! //! ## Sequences as input parameters //! //! This section concerns input parameters that are sequences. The general approach is to //! defer the forming of the sequence and use `IntoIterator` to pass the values in like so: //! //! ```rust //! // This is the trait that our objects will implement. //! pub trait Trait {} //! //! pub trait Api { //! fn function<'a, T>(input: impl IntoIterator<Item=&'a T>) //! where T: Trait + 'a; //! } //! ``` //! //! This approach does not work for output parameters returned in a trait, however. An approach //! for that is given below. //! //! ## Iteration over output parameters //! //! Next up, let's take a look at how output iteration (iterators as return types) may be //! traitified. //! //! We are exploring this //! because APIs that require iteration may naturally come out of data items that contain //! sequences; such as the [variant subtags][vartags]. And since generic iteration may be //! of more general interest, we explore it in a broader context. //! //! [vartags]: http://unicode.org/reports/tr35/#unicode_variant_subtag //! //! Generic iteration seems somewhat complicated to express in a Rust trait. //! [unic_langid::LanguageIdentifier`][ulangid], for example, has the following method: //! //! [ulangid]: https://docs.rs/unic-langid/0.9.0/unic_langid/struct.LanguageIdentifier.html#method.variants //! //! ```rust ignore //! pub fn variants(&self) -> impl ExactSizeIterator //! ``` //! //! This expresses quite a natural need to iterate over the specified variants of a locale. //! We would very much like to traitify this function so that different implementors could //! contribute their own. The straightforward approach won't fly: //! //! ```rust //! // This will not compile: //! // "`impl Trait` not alowed outside of function and inherent method return types" //! use core::iter::ExactSizeIterator; //! pub trait Trait { //! fn variants() -> impl ExactSizeIterator; //! } //! ``` //! //! A second attempt fails too: //! //! ```rust //! // This will not compile: //! // error[E0191]: the value of the associated type `Item` //! // (from trait `std::iter::Iterator`) must be specified //! use core::iter::ExactSizeIterator; //! pub trait Trait { //! fn variants() -> dyn ExactSizeIterator; //! } //! ``` //! //! Of course this is all invalid Rust, but it was a naive attempt to express a seemingly natural //! idea of "I'd like this trait to return me an iterator over some string-like objects". //! //! Here's more exhibits for the gallery of failed approaches: //! //! ```rust //! // This will not compile: //! // error[E0191]: the value of the associated type `Item` //! // (from trait `std::iter::Iterator`) must be specified //! use core::iter::ExactSizeIterator; //! pub trait Trait { //! fn variants() -> Box<dyn ExactSizeIterator>; //! } //! ``` //! //! This has a couple of problems: //! //! 1. E0191, requiring a concrete associated type as an iterator `Item`, which we don't have. //! 2. If we were to bind `Item` to a concrete type, we would have made that type obligatory //! for all the implementors. //! 3. [Box] requires an allocation, which is not useful for `#![no_std]` (without alloc). //! //! Even if we disregard (3), then (2) will get us. The following snippet compiles, but //! forever fixes the associated iteration type to `String`. Any implementors that don't //! implement variants as strings are out of luck. //! //! ```rust //! use core::iter::ExactSizeIterator; //! pub trait Trait { //! // Oops... associated type is fixed now. //! fn variants() -> Box<dyn ExactSizeIterator<Item=String>>; //! } //! ``` //! //! Trying to be clever with genericizing the type also gets us nowhere. This works, but //! requires a `Box`, which in turn requires a change to the type signature of //! `LanguageIdentifier::variants()`. //! //! ```rust //! use core::iter::ExactSizeIterator; //! pub trait Trait { //! type IterItem; //! fn variants() -> Box<dyn ExactSizeIterator<Item=Self::IterItem>>; //! } //! ``` //! //! Getting to a generic iterator that doesn't require a box is a piece of gymnastics. Getting //! to an iterable of owned strings requires the iterating trait to be implemented over //! *a lifetime-scoped reference* of the implementing type. //! //! ```rust //! pub trait Variants { //! /// The type of the item yieled by the iterator returned by [Variants::variants]. Note //! /// that [Variants::Variant] may be a reference to the type stored in the iterator. //! type Variant; //! /// The type of the iterator returned by [Variants::variants]. //! type Iter: ExactSizeIterator<Item = Self::Variant>; //! fn variants(self) -> Self::Iter; //! } //! //! // Here's how to implement the trait when the underlying type is borrowed. //! //! pub struct BorrowedVariant { //! variants: Vec<&'static str>, //! } //! //! impl<'a> Variants for &'a BorrowedVariant { //! type Variant = &'a &'a str; //! type Iter = std::slice::Iter<'a, &'a str>; //! fn variants(self) -> Self::Iter { //! self.variants.iter() //! } //! } //! //! let borrowed = BorrowedVariant{ variants: vec!["a", "b"], }; //! assert_eq!( //! vec!["a", "b"], //! borrowed.variants() //! .map(|v| v.to_owned().to_owned()) //! .collect::<Vec<String>>(), //! ); //! //! // Here is how to implement the trait when the underlying type is owned. //! //! pub struct OwnedVariant { //! variants: Vec<String>, //! } //! //! impl<'a> Variants for &'a OwnedVariant { //! type Variant = &'a String; //! type Iter = std::slice::Iter<'a, String>; //! fn variants(self) -> Self::Iter { //! self.variants.iter() //! } //! } //! //! let owned = OwnedVariant{ variants: vec!["a".to_string(), "b".to_string()], }; //! assert_eq!( //! vec!["a", "b"], //! owned.variants() //! .map(|v| v.to_owned()) //! .collect::<Vec<String>>(), //! ); //! ``` use std::fmt; /// This trait contains the common features of the Locale object that must be shared among /// all the implementations. Every implementor of `listformat` should provide their /// own version of [Locale], and should ensure that it implements [Locale]. as /// specified here. /// /// For the time being we agreed that a [Locale] *must* be convertible into its string /// form, using `Display`. pub trait Locale: fmt::Display {} /// The [listformat] mod contains all the needed implementation bits for `Intl.ListFormat`. /// /// > Note: This is not yet the entire API. I'd like to get to a consensus on what has been /// defined, then use the patterns adopted here for the rest. pub mod listformat { /// Contains the API configuration as prescribed by ECMA 402. /// /// The meaning of the options is the same as in the similarly named /// options in the JS version. /// /// See [Options] for the contents of the options. See the [Format::try_new] /// for the use of the options. pub mod options { /// Chooses the list formatting approach. #[derive(Eq, PartialEq, Debug, Clone)] pub enum Style { Long, Short, Narrow, } /// Chooses between "this, that and other", and "this, that or other". #[derive(Eq, PartialEq, Debug, Clone)] pub enum Type { /// "This, that and other". Conjunction, /// "This, that or other". Disjunction, } } /// The options set by the user at construction time. See discussion at the top level /// about the name choice. Provides as a "bag of options" since we don't expect any /// implementations to be attached to this struct. /// /// The default values of all the options are prescribed in by the [TC39 report][tc39lf]. /// /// [tc39lf]: https://tc39.es/proposal-intl-list-format/#sec-Intl.ListFormat #[derive(Eq, PartialEq, Debug, Clone)] pub struct Options { /// Selects a [options::Style] for the formatted list. If unset, defaults /// to [options::Style::Long]. pub style: options::Style, /// Selects a [options::Type] for the formatted list. If unset, defaults to /// [options::Type::Conjunction]. pub in_type: options::Type, } /// Allows the use of `listformat::Format::try_new(..., Default::default())`. impl Default for Options { /// Gets the default values of [Options] if omitted at setup. The /// default values are prescribed in by the [TC39 report][tc39lf]. /// /// [tc39lf]: https://tc39.es/proposal-intl-list-format/#sec-Intl.ListFormat fn default() -> Self { Options { style: options::Style::Long, in_type: options::Type::Conjunction, } } } use std::fmt; /// The package workhorse: formats supplied pieces of text into an ergonomically formatted /// list. /// /// While ECMA 402 originally has functions under `Intl`, we probably want to /// obtain a separate factory from each implementor. /// /// Purposely omitted: /// /// - `supported_locales_of`. pub trait Format { /// The type of error reported, if any. type Error: std::error::Error; /// Creates a new [Format]. /// /// Creation may fail, for example, if the locale-specific data is not loaded, or if /// the supplied options are inconsistent. fn try_new(l: impl crate::Locale, opts: Options) -> Result<Self, Self::Error> where Self: std::marker::Sized; /// Formats `list` into the supplied standard `writer` [fmt::Write]. /// /// The original [ECMA 402 function][ecma402fmt] returns a string. This is likely the only /// reasonably generic option in JavaScript so it is adequate. In Rust, however, it is /// possible to pass in a standard formatting strategy (through `writer`). /// /// [ecma402fmt]: /// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Intl/ListFormat/format /// /// This makes it unnecessary for [Format] to implement its own, and can /// completely avoid constructing any intermediary representation. This, in turn, /// allows the user to provide a purpose built formatter, or a custom one if needed. /// /// A purpose built formatter could be one that formats into a fixed-size buffer; or /// another that knows how to format strings into a DOM. If ECMA 402 compatibility is /// needed, the user can force formatting into a string by passing the appropriate /// formatter. /// /// > Note: /// > - Should there be a convenience method that prints to string specifically? /// > - Do we need `format_into_parts`? fn format<I, L, W>(self, list: L, writer: &mut W) -> fmt::Result where I: fmt::Display, L: IntoIterator<Item = I>, W: fmt::Write; } }
40.132246
203
0.649077
ac55e643f6578f5d7e9017f0b4b1c766e3d241ad
1,291
// Copyright 2021 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 use bee_tangle::{storage::StorageBackend, Tangle}; use crate::{types::LedgerIndex, workers::snapshot::config::SnapshotConfig}; /// Reasons for skipping snapshotting. #[derive(Debug)] pub enum SnapshottingSkipReason { /// Not enough data yet to create a snapshot. BelowThreshold { reached_in: u32 }, /// Snapshotting is deferred to a later milestone. Deferred { next_in: u32 }, } pub(crate) fn should_snapshot<B: StorageBackend>( tangle: &Tangle<B>, ledger_index: LedgerIndex, snapshot_depth: u32, snapshot_config: &SnapshotConfig, ) -> Result<(), SnapshottingSkipReason> { let snapshot_index = *tangle.get_snapshot_index(); let snapshot_interval = if tangle.is_synced() { snapshot_config.interval_synced() } else { snapshot_config.interval_unsynced() }; if *ledger_index < snapshot_depth { Err(SnapshottingSkipReason::BelowThreshold { reached_in: snapshot_depth - *ledger_index, }) } else if *ledger_index < snapshot_index + snapshot_interval { Err(SnapshottingSkipReason::Deferred { next_in: (snapshot_index + snapshot_interval) - *ledger_index, }) } else { Ok(()) } }
30.023256
75
0.677769
b9ab0b3f30815b47d3bdc47990f40df38bd602e2
4,279
#![no_std] #![cfg_attr(target_os = "none", no_main)] use core::fmt; use farcri::black_box; use farcri::{criterion_group, criterion_main, BenchmarkGroup, BenchmarkId, Criterion}; use binary_search::{custom_binary_search_1, std_binary_search}; criterion_group!( benches, bench_binsearch, bench_binsearch_duplicates, bench_binsearch_worstcases, bench_random_sorted, ); criterion_main!(benches); #[derive(Copy, Clone)] enum Cache { L1, L2, L3, } impl Cache { fn size(&self) -> usize { match self { Cache::L1 => 1000, // 8kb Cache::L2 => 10_000, // 80kb Cache::L3 => 1_000_000, // 8Mb } } } impl fmt::Display for Cache { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s = match self { Cache::L1 => "L1", Cache::L2 => "L2", Cache::L3 => "L3", }; f.write_str(s) } } static CACHE_LEVELS: &[Cache] = &[Cache::L1, Cache::L2, Cache::L3]; static mut POOL: [usize; 10_000] = [0; 10_000]; fn bench_binsearch(c: &mut Criterion) { let mut group = c.benchmark_group("Binary Search Increasing"); binsearch(&mut group, |i| i * 2); group.finish(); } fn bench_binsearch_duplicates(c: &mut Criterion) { let mut group = c.benchmark_group("Binary Search With Duplicates"); binsearch(&mut group, core::convert::identity); } fn bench_binsearch_worstcases(c: &mut Criterion) { let mut group = c.benchmark_group("Binary Search Worst cases"); for cache in CACHE_LEVELS { let size = cache.size(); let v = unsafe { if let Some(x) = POOL.get_mut(..size) { x } else { continue; } }; let i = 1; *(v.last_mut().unwrap()) = i; group.bench_with_input(BenchmarkId::new(&"std", cache), &i, |b, i| { b.iter(|| std_binary_search(&v, &i)) }); group.bench_with_input(BenchmarkId::new(&"custom_1", cache), &i, |b, i| { b.iter(|| custom_binary_search_1(&v, &i)) }); } group.finish(); } fn binsearch<F>(group: &mut BenchmarkGroup<'_, '_>, mapper: F) where F: Fn(usize) -> usize, { // LCG constants from https://en.wikipedia.org/wiki/Numerical_Recipes. let r = black_box(|| 0_usize.wrapping_mul(1664525).wrapping_add(1013904223)); let r = r(); for cache in CACHE_LEVELS { let size = cache.size(); let v = unsafe { if let Some(x) = POOL.get_mut(..size) { x } else { continue; } }; for (i, x) in v.iter_mut().enumerate() { *x = mapper(i); } group.bench_with_input(BenchmarkId::new(&"std", cache), &size, |b, size| { // Lookup the whole range to get 50% hits and 50% misses. let i = mapper(r % size); b.iter(|| std_binary_search(&v, &i)) }); group.bench_with_input(BenchmarkId::new(&"custom_1", cache), &size, |b, size| { let i = mapper(r % size); b.iter(|| custom_binary_search_1(&v, &i)) }); } } fn bench_random_sorted(c: &mut Criterion) { use rand::{Rng, SeedableRng}; // LCG constants from https://en.wikipedia.org/wiki/Numerical_Recipes. let r = black_box(|| 0_usize.wrapping_mul(1664525).wrapping_add(1013904223)); let r = r(); let mut rng = rand::rngs::StdRng::seed_from_u64(123456789876545); let mut group = c.benchmark_group("Binary Search With Random Elements Sorted"); for cache in CACHE_LEVELS { let size = cache.size(); let i = r % size; let v = unsafe { if let Some(x) = POOL.get_mut(..size) { x } else { continue; } }; for x in v.iter_mut() { *x = rng.gen_range(1_usize..=256); } v.sort_unstable(); group.bench_with_input(BenchmarkId::new(&"std", cache), &i, |b, i| { b.iter(|| std_binary_search(&v, &i)) }); group.bench_with_input(BenchmarkId::new(&"custom_1", cache), &i, |b, i| { b.iter(|| custom_binary_search_1(&v, &i)) }); } group.finish(); }
28.526667
87
0.548259
878f7899ea462195fa38c308793b875674f90bcb
297
foreigner_class!(class One { self_type One; private constructor = empty; }); foreigner_class!(class Two { self_type Two; private constructor = empty; }); foreigner_class!(class Foo { self_type Foo; private constructor = empty; method Foo::f(&self) -> (One, Two); });
18.5625
39
0.653199
71a3948d6535fe5698275c43d167b8696fb7040d
139
pub struct DisplayOptions { pub hide_collections: bool, pub display_only_size: bool, pub filter_boxes: Option<Vec<String>>, }
19.857143
42
0.719424
09f0d9bc4cb8df2db5d5594f63b500c74d9bd3fe
2,944
#[doc = "Reader of register FORCEOFF"] pub type R = crate::R<u32, super::FORCEOFF>; #[doc = "Writer for register FORCEOFF"] pub type W = crate::W<u32, super::FORCEOFF>; #[doc = "Register FORCEOFF `reset()`'s with value 0x01"] impl crate::ResetValue for super::FORCEOFF { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x01 } } #[doc = "Force off power and clock in Network core\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FORCEOFF_A { #[doc = "0: Release force off signal"] RELEASE, #[doc = "1: Hold force off signal"] HOLD, } impl From<FORCEOFF_A> for bool { #[inline(always)] fn from(variant: FORCEOFF_A) -> Self { match variant { FORCEOFF_A::RELEASE => false, FORCEOFF_A::HOLD => true, } } } #[doc = "Reader of field `FORCEOFF`"] pub type FORCEOFF_R = crate::R<bool, FORCEOFF_A>; impl FORCEOFF_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FORCEOFF_A { match self.bits { false => FORCEOFF_A::RELEASE, true => FORCEOFF_A::HOLD, } } #[doc = "Checks if the value of the field is `RELEASE`"] #[inline(always)] pub fn is_release(&self) -> bool { *self == FORCEOFF_A::RELEASE } #[doc = "Checks if the value of the field is `HOLD`"] #[inline(always)] pub fn is_hold(&self) -> bool { *self == FORCEOFF_A::HOLD } } #[doc = "Write proxy for field `FORCEOFF`"] pub struct FORCEOFF_W<'a> { w: &'a mut W, } impl<'a> FORCEOFF_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FORCEOFF_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Release force off signal"] #[inline(always)] pub fn release(self) -> &'a mut W { self.variant(FORCEOFF_A::RELEASE) } #[doc = "Hold force off signal"] #[inline(always)] pub fn hold(self) -> &'a mut W { self.variant(FORCEOFF_A::HOLD) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 0 - Force off power and clock in Network core"] #[inline(always)] pub fn forceoff(&self) -> FORCEOFF_R { FORCEOFF_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Force off power and clock in Network core"] #[inline(always)] pub fn forceoff(&mut self) -> FORCEOFF_W { FORCEOFF_W { w: self } } }
28.038095
73
0.564198
0373e59bf885fdd6b20a036f7e72249c63d8b2c0
40
pub mod c1; pub mod c2; pub mod c2_mod;
10
15
0.7
62f7d1030aa34c7674f0c285a467ca71e744f973
3,896
use crate::errors::{AppError, AppErrorType::*}; use crate::models::{TodoItem, TodoList}; use deadpool_postgres::Client; use tokio_pg_mapper::FromTokioPostgresRow; pub async fn create_todo(client: &Client, title: String) -> Result<TodoList, AppError> { let statement = client .prepare("insert into todo_list (title) values ($1) returning id, title") .await?; client .query(&statement, &[&title]) .await? .iter() .map(|row| TodoList::from_row_ref(&row).unwrap()) .collect::<Vec<TodoList>>() .pop() .ok_or(AppError { message: Some("Error crating TODO list".to_string()), cause: Some("Unknown error.".to_string()), error_type: DbError, }) } pub async fn get_todos(client: &Client) -> Result<Vec<TodoList>, AppError> { let statement = client .prepare("select * from todo_list order by id desc") .await?; let todos = client .query(&statement, &[]) .await? .iter() .map(|row| TodoList::from_row_ref(row).unwrap()) .collect::<Vec<TodoList>>(); Ok(todos) } pub async fn get_todo(client: &Client, list_id: i32) -> Result<TodoList, AppError> { let statement = client .prepare("select * from todo_list where id = $1") .await?; let maybe_todo = client .query_opt(&statement, &[&list_id]) .await? .map(|row| TodoList::from_row_ref(&row).unwrap()); match maybe_todo { Some(todo) => Ok(todo), None => Err(AppError { error_type: NotFoundError, cause: None, message: Some(format!("Todo list {} not found.", list_id)), }) } } pub async fn create_item( client: &Client, list_id: i32, title: String, ) -> Result<TodoItem, AppError> { let statement = client .prepare("insert into todo_item (list_id, title) values ($1, $2) returning id, list_id, title, checked") .await?; client .query(&statement, &[&list_id, &title]) .await? .iter() .map(|row| TodoItem::from_row_ref(row).unwrap()) .collect::<Vec<TodoItem>>() .pop() .ok_or(AppError { message: Some("Error creating TODO item".to_string()), cause: Some("Unknown error".to_string()), error_type: DbError, }) } pub async fn get_items(client: &Client, list_id: i32) -> Result<Vec<TodoItem>, AppError> { let statement = client .prepare("select * from todo_item where list_id = $1 order by id") .await?; let items = client .query(&statement, &[&list_id]) .await? .iter() .map(|row| TodoItem::from_row_ref(row).unwrap()) .collect::<Vec<TodoItem>>(); Ok(items) } pub async fn get_item(client: &Client, list_id: i32, item_id: i32) -> Result<TodoItem, AppError> { let statement = client .prepare("select * from todo_item where list_id = $1 and id = $2") .await?; let maybe_item = client .query_opt(&statement, &[&list_id, &item_id]) .await? .map(|row| TodoItem::from_row_ref(&row).unwrap()); match maybe_item { Some(item) => Ok(item), None => Err(AppError { error_type: NotFoundError, cause: None, message: Some(format!("Todo item {} form list {} not found.", item_id, list_id)), }), } } pub async fn check_todo(client: &Client, list_id: i32, item_id: i32) -> Result<bool, AppError> { let statement = client .prepare("update todo_item set checked = true where list_id = $1 and id = $2 and checked = false") .await?; let result = client .execute(&statement, &[&list_id, &item_id]) .await?; match result { ref updated if *updated == 1 => Ok(true), _ => Ok(false), } }
29.740458
112
0.572125
d6b49f5940607a2742e48e73501c9821d861e824
288,437
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; use snafu::{ResultExt, Snafu}; pub mod account_filters { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<AccountFilterCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/accountFilters", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AccountFilterCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter_name: &str, ) -> std::result::Result<AccountFilter, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/accountFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, filter_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AccountFilter = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter_name: &str, parameters: &AccountFilter, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/accountFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, filter_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AccountFilter = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: AccountFilter = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(AccountFilter), Created201(AccountFilter), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter_name: &str, parameters: &AccountFilter, ) -> std::result::Result<AccountFilter, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/accountFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, filter_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AccountFilter = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/accountFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, filter_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod operations { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.Media/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod mediaservices { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<MediaServiceCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MediaServiceCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<MediaService, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MediaService = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &MediaService, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MediaService = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: MediaService = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(MediaService), Created201(MediaService), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &MediaService, ) -> std::result::Result<MediaService, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MediaService = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn sync_storage_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &SyncStorageKeysInput, ) -> std::result::Result<(), sync_storage_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/syncStorageKeys", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(sync_storage_keys::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(sync_storage_keys::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(sync_storage_keys::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(sync_storage_keys::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(sync_storage_keys::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(sync_storage_keys::DeserializeError { body: rsp_body.clone() })?; sync_storage_keys::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod sync_storage_keys { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_edge_policies( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &ListEdgePoliciesInput, ) -> std::result::Result<EdgePolicies, list_edge_policies::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/listEdgePolicies", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list_edge_policies::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_edge_policies::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(list_edge_policies::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_edge_policies::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_edge_policies::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EdgePolicies = serde_json::from_slice(rsp_body).context(list_edge_policies::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list_edge_policies::DeserializeError { body: rsp_body.clone() })?; list_edge_policies::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_edge_policies { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<MediaServiceCollection, list_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Media/mediaservices", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).context(list_by_subscription::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_by_subscription::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_by_subscription::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_by_subscription::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MediaServiceCollection = serde_json::from_slice(rsp_body).context(list_by_subscription::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list_by_subscription::DeserializeError { body: rsp_body.clone() })?; list_by_subscription::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_subscription { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, account_name: &str, ) -> std::result::Result<MediaService, get_by_subscription::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Media/mediaservices/{}", operation_config.base_path(), subscription_id, account_name ); let mut url = url::Url::parse(url_str).context(get_by_subscription::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get_by_subscription::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get_by_subscription::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(get_by_subscription::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: MediaService = serde_json::from_slice(rsp_body).context(get_by_subscription::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get_by_subscription::DeserializeError { body: rsp_body.clone() })?; get_by_subscription::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get_by_subscription { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod private_link_resources { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<PrivateLinkResourceListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/privateLinkResources", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateLinkResourceListResult = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, name: &str, ) -> std::result::Result<PrivateLinkResource, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/privateLinkResources/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateLinkResource = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod private_endpoint_connections { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<PrivateEndpointConnectionListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/privateEndpointConnections", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnectionListResult = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, name: &str, ) -> std::result::Result<PrivateEndpointConnection, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnection = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, name: &str, parameters: &PrivateEndpointConnection, ) -> std::result::Result<PrivateEndpointConnection, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: PrivateEndpointConnection = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, name: &str, ) -> std::result::Result<(), delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/privateEndpointConnections/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod locations { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn check_name_availability( operation_config: &crate::OperationConfig, subscription_id: &str, location_name: &str, parameters: &CheckNameAvailabilityInput, ) -> std::result::Result<EntityNameAvailabilityCheckOutput, check_name_availability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Media/locations/{}/checkNameAvailability", operation_config.base_path(), subscription_id, location_name ); let mut url = url::Url::parse(url_str).context(check_name_availability::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(check_name_availability::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(check_name_availability::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(check_name_availability::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(check_name_availability::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: EntityNameAvailabilityCheckOutput = serde_json::from_slice(rsp_body).context(check_name_availability::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(check_name_availability::DeserializeError { body: rsp_body.clone() })?; check_name_availability::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod check_name_availability { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod assets { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, top: Option<i64>, orderby: Option<&str>, ) -> std::result::Result<AssetCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AssetCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, ) -> std::result::Result<Asset, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Asset = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, parameters: &Asset, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Asset = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Asset = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Asset), Created201(Asset), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, parameters: &Asset, ) -> std::result::Result<Asset, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Asset = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_container_sas( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, parameters: &ListContainerSasInput, ) -> std::result::Result<AssetContainerSas, list_container_sas::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/listContainerSas", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(list_container_sas::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_container_sas::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(list_container_sas::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_container_sas::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_container_sas::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AssetContainerSas = serde_json::from_slice(rsp_body).context(list_container_sas::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list_container_sas::DeserializeError { body: rsp_body.clone() })?; list_container_sas::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_container_sas { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get_encryption_key( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, ) -> std::result::Result<StorageEncryptedAssetDecryptionData, get_encryption_key::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/getEncryptionKey", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(get_encryption_key::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get_encryption_key::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get_encryption_key::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(get_encryption_key::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageEncryptedAssetDecryptionData = serde_json::from_slice(rsp_body).context(get_encryption_key::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get_encryption_key::DeserializeError { body: rsp_body.clone() })?; get_encryption_key::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get_encryption_key { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_streaming_locators( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, ) -> std::result::Result<ListStreamingLocatorsResponse, list_streaming_locators::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/listStreamingLocators", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(list_streaming_locators::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_streaming_locators::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_streaming_locators::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_streaming_locators::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ListStreamingLocatorsResponse = serde_json::from_slice(rsp_body).context(list_streaming_locators::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list_streaming_locators::DeserializeError { body: rsp_body.clone() })?; list_streaming_locators::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_streaming_locators { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod asset_filters { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, ) -> std::result::Result<AssetFilterCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/assetFilters", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AssetFilterCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, filter_name: &str, ) -> std::result::Result<AssetFilter, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/assetFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name, filter_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AssetFilter = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, filter_name: &str, parameters: &AssetFilter, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/assetFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name, filter_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AssetFilter = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: AssetFilter = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(AssetFilter), Created201(AssetFilter), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, filter_name: &str, parameters: &AssetFilter, ) -> std::result::Result<AssetFilter, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/assetFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name, filter_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: AssetFilter = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, asset_name: &str, filter_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/assets/{}/assetFilters/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, asset_name, filter_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod content_key_policies { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, top: Option<i64>, orderby: Option<&str>, ) -> std::result::Result<ContentKeyPolicyCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/contentKeyPolicies", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContentKeyPolicyCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, content_key_policy_name: &str, ) -> std::result::Result<ContentKeyPolicy, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/contentKeyPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, content_key_policy_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContentKeyPolicy = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, content_key_policy_name: &str, parameters: &ContentKeyPolicy, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/contentKeyPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, content_key_policy_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContentKeyPolicy = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: ContentKeyPolicy = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(ContentKeyPolicy), Created201(ContentKeyPolicy), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, content_key_policy_name: &str, parameters: &ContentKeyPolicy, ) -> std::result::Result<ContentKeyPolicy, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/contentKeyPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, content_key_policy_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContentKeyPolicy = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, content_key_policy_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/contentKeyPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, content_key_policy_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get_policy_properties_with_secrets( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, content_key_policy_name: &str, ) -> std::result::Result<ContentKeyPolicyProperties, get_policy_properties_with_secrets::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/contentKeyPolicies/{}/getPolicyPropertiesWithSecrets" , operation_config . base_path () , subscription_id , resource_group_name , account_name , content_key_policy_name) ; let mut url = url::Url::parse(url_str).context(get_policy_properties_with_secrets::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get_policy_properties_with_secrets::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .context(get_policy_properties_with_secrets::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(get_policy_properties_with_secrets::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContentKeyPolicyProperties = serde_json::from_slice(rsp_body) .context(get_policy_properties_with_secrets::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get_policy_properties_with_secrets::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body) .context(get_policy_properties_with_secrets::DeserializeError { body: rsp_body.clone() })?; get_policy_properties_with_secrets::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get_policy_properties_with_secrets { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod transforms { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, orderby: Option<&str>, ) -> std::result::Result<TransformCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: TransformCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, ) -> std::result::Result<Transform, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Transform = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, parameters: &Transform, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name ); let mut url = url::Url::parse(url_str).context(create_or_update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create_or_update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create_or_update::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(create_or_update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Transform = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Transform = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create_or_update::DeserializeError { body: rsp_body.clone() })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Transform), Created201(Transform), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, parameters: &Transform, ) -> std::result::Result<Transform, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Transform = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod jobs { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, filter: Option<&str>, orderby: Option<&str>, ) -> std::result::Result<JobCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}/jobs", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: JobCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, job_name: &str, ) -> std::result::Result<Job, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}/jobs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name, job_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Job = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, job_name: &str, parameters: &Job, ) -> std::result::Result<Job, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}/jobs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name, job_name ); let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: Job = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; create::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, job_name: &str, parameters: &Job, ) -> std::result::Result<Job, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}/jobs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name, job_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Job = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, job_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}/jobs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name, job_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn cancel_job( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, transform_name: &str, job_name: &str, ) -> std::result::Result<(), cancel_job::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/transforms/{}/jobs/{}/cancelJob", operation_config.base_path(), subscription_id, resource_group_name, account_name, transform_name, job_name ); let mut url = url::Url::parse(url_str).context(cancel_job::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(cancel_job::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(cancel_job::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(cancel_job::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(cancel_job::DeserializeError { body: rsp_body.clone() })?; cancel_job::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod cancel_job { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod streaming_policies { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, top: Option<i64>, orderby: Option<&str>, ) -> std::result::Result<StreamingPolicyCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingPolicies", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingPolicyCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_policy_name: &str, ) -> std::result::Result<StreamingPolicy, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_policy_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingPolicy = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_policy_name: &str, parameters: &StreamingPolicy, ) -> std::result::Result<StreamingPolicy, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_policy_name ); let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: StreamingPolicy = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; create::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_policy_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingPolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_policy_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod streaming_locators { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, top: Option<i64>, orderby: Option<&str>, ) -> std::result::Result<StreamingLocatorCollection, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingLocators", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingLocatorCollection = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_locator_name: &str, ) -> std::result::Result<StreamingLocator, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingLocators/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_locator_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingLocator = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_locator_name: &str, parameters: &StreamingLocator, ) -> std::result::Result<StreamingLocator, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingLocators/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_locator_name ); let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?; match rsp.status() { http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: StreamingLocator = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; create::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_locator_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingLocators/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_locator_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_content_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_locator_name: &str, ) -> std::result::Result<ListContentKeysResponse, list_content_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingLocators/{}/listContentKeys", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_locator_name ); let mut url = url::Url::parse(url_str).context(list_content_keys::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_content_keys::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_content_keys::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .context(list_content_keys::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ListContentKeysResponse = serde_json::from_slice(rsp_body).context(list_content_keys::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list_content_keys::DeserializeError { body: rsp_body.clone() })?; list_content_keys::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_content_keys { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_paths( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_locator_name: &str, ) -> std::result::Result<ListPathsResponse, list_paths::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaServices/{}/streamingLocators/{}/listPaths", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_locator_name ); let mut url = url::Url::parse(url_str).context(list_paths::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list_paths::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list_paths::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list_paths::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ListPathsResponse = serde_json::from_slice(rsp_body).context(list_paths::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list_paths::DeserializeError { body: rsp_body.clone() })?; list_paths::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_paths { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod live_events { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<LiveEventListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveEventListResult = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, ) -> std::result::Result<LiveEvent, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveEvent = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, auto_start: Option<bool>, parameters: &LiveEvent, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(auto_start) = auto_start { url.query_pairs_mut().append_pair("autoStart", auto_start.to_string().as_str()); } let req_body = azure_core::to_json(parameters).context(create::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveEvent = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: LiveEvent = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; create::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(LiveEvent), Created201(LiveEvent), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, parameters: &LiveEvent, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveEvent = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: LiveEvent = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(LiveEvent), Accepted202(LiveEvent), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn allocate( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, ) -> std::result::Result<allocate::Response, allocate::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/allocate", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(allocate::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(allocate::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(allocate::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(allocate::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(allocate::Response::Ok200), http::StatusCode::ACCEPTED => Ok(allocate::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(allocate::DeserializeError { body: rsp_body.clone() })?; allocate::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod allocate { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn start( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, ) -> std::result::Result<start::Response, start::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/start", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(start::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(start::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(start::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(start::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(start::Response::Ok200), http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(start::DeserializeError { body: rsp_body.clone() })?; start::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod start { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn stop( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, parameters: &LiveEventActionInput, ) -> std::result::Result<stop::Response, stop::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/stop", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(stop::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(stop::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(stop::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(stop::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(stop::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(stop::Response::Ok200), http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(stop::DeserializeError { body: rsp_body.clone() })?; stop::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod stop { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn reset( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, ) -> std::result::Result<reset::Response, reset::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/reset", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(reset::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(reset::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(reset::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(reset::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(reset::Response::Ok200), http::StatusCode::ACCEPTED => Ok(reset::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(reset::DeserializeError { body: rsp_body.clone() })?; reset::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod reset { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod live_outputs { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, ) -> std::result::Result<LiveOutputListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/liveOutputs", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveOutputListResult = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, live_output_name: &str, ) -> std::result::Result<LiveOutput, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/liveOutputs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name, live_output_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveOutput = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, live_output_name: &str, parameters: &LiveOutput, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/liveOutputs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name, live_output_name ); let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(create::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: LiveOutput = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: LiveOutput = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; create::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(LiveOutput), Created201(LiveOutput), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, live_event_name: &str, live_output_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/liveEvents/{}/liveOutputs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, live_event_name, live_output_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod streaming_endpoints { use crate::models::*; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<StreamingEndpointListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).context(list::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(list::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(list::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(list::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingEndpointListResult = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(list::DeserializeError { body: rsp_body.clone() })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, ) -> std::result::Result<StreamingEndpoint, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(get::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(get::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(get::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(get::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingEndpoint = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => get::NotFound404 {}.fail(), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(get::DeserializeError { body: rsp_body.clone() })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { NotFound404 {}, DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, auto_start: Option<bool>, parameters: &StreamingEndpoint, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(create::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(create::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(auto_start) = auto_start { url.query_pairs_mut().append_pair("autoStart", auto_start.to_string().as_str()); } let req_body = azure_core::to_json(parameters).context(create::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(create::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(create::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingEndpoint = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: StreamingEndpoint = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(create::DeserializeError { body: rsp_body.clone() })?; create::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(StreamingEndpoint), Created201(StreamingEndpoint), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, parameters: &StreamingEndpoint, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(update::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(update::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(update::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(update::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(update::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StreamingEndpoint = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: StreamingEndpoint = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(update::DeserializeError { body: rsp_body.clone() })?; update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod update { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(StreamingEndpoint), Accepted202(StreamingEndpoint), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(delete::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(delete::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(delete::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(delete::DeserializeError { body: rsp_body.clone() })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn start( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, ) -> std::result::Result<start::Response, start::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}/start", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(start::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(start::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(start::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(start::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(start::Response::Ok200), http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(start::DeserializeError { body: rsp_body.clone() })?; start::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod start { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn stop( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, ) -> std::result::Result<stop::Response, stop::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}/stop", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(stop::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(stop::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(stop::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(stop::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(stop::Response::Ok200), http::StatusCode::ACCEPTED => Ok(stop::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(stop::DeserializeError { body: rsp_body.clone() })?; stop::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod stop { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn scale( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, streaming_endpoint_name: &str, parameters: &StreamingEntityScaleUnit, ) -> std::result::Result<scale::Response, scale::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Media/mediaservices/{}/streamingEndpoints/{}/scale", operation_config.base_path(), subscription_id, resource_group_name, account_name, streaming_endpoint_name ); let mut url = url::Url::parse(url_str).context(scale::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .context(scale::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(parameters).context(scale::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).context(scale::BuildRequestError)?; let rsp = http_client.execute_request(req).await.context(scale::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(scale::Response::Ok200), http::StatusCode::ACCEPTED => Ok(scale::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: ApiError = serde_json::from_slice(rsp_body).context(scale::DeserializeError { body: rsp_body.clone() })?; scale::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod scale { use crate::{models, models::*}; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: http::StatusCode, value: models::ApiError, }, ParseUrlError { source: url::ParseError, }, BuildRequestError { source: http::Error, }, ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send>, }, SerializeError { source: Box<dyn std::error::Error + Sync + Send>, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } }
41.270139
287
0.549538
507c6ad671a67ed67d2a9f64fd89fd383d3574d9
2,789
// SPDX-FileCopyrightText: 2021 Open Energy Solutions Inc // // SPDX-License-Identifier: Apache-2.0 use std::str::FromStr; use metermodule::MeterReadingProfile; use openfmb_messages::{ commonmodule::{MessageInfo, ReadingMessageInfo}, *, }; use snafu::{OptionExt, ResultExt}; use uuid::Uuid; use crate::{error::*, OpenFMBExt, OpenFMBExtReading, ReadingProfileExt}; impl OpenFMBExt for MeterReadingProfile { fn device_state(&self) -> OpenFMBResult<String> { Ok(self .meter_reading .as_ref() .context(NoMeterReading)? .reading_mmxu .as_ref() .context(NoReadingMmxu)? .w .as_ref() .context(NoW)? .net .as_ref() .context(NoNet)? .c_val .as_ref() .context(NoCVal)? .mag .to_string()) } fn message_info(&self) -> OpenFMBResult<&MessageInfo> { Ok(self .reading_message_info .as_ref() .context(NoReadingMessageInfo)? .message_info .as_ref() .context(NoMessageInfo)?) } fn message_type(&self) -> OpenFMBResult<String> { Ok("MeterReadingProfile".to_string()) } fn device_mrid(&self) -> OpenFMBResult<Uuid> { Ok(Uuid::from_str( &self .meter .as_ref() .context(NoMeter)? .conducting_equipment .as_ref() .context(NoConductingEquipment)? .m_rid, ) .context(UuidError)?) } fn device_name(&self) -> OpenFMBResult<String> { Ok(self .meter .as_ref() .context(NoMeter)? .conducting_equipment .as_ref() .context(NoConductingEquipment)? .named_object .as_ref() .context(NoNamedObject)? .name .clone() .context(NoName)?) } } impl OpenFMBExtReading for MeterReadingProfile { fn reading_message_info(&self) -> OpenFMBResult<&ReadingMessageInfo> { Ok(self .reading_message_info .as_ref() .context(NoStatusMessageInfo)?) } } pub trait MeterReadingExt: ReadingProfileExt { fn meter_reading(&self) -> f64; } impl MeterReadingExt for MeterReadingProfile { fn meter_reading(&self) -> f64 { self.meter_reading .clone() .unwrap() .reading_mmxu .unwrap() .w .unwrap() .net .unwrap() .c_val .unwrap() .mag } } impl ReadingProfileExt for MeterReadingProfile {}
24.252174
74
0.520258
759b45e5b68187d51b2abc42b23c3bbd2d705832
1,420
use ptr::Ptr; pub const REG_GRAPHICS_MODE: Ptr<u16> = unsafe { Ptr::from_u32(0x04000000) }; pub const REG_BG_AFFINE: Ptr<u16> = unsafe { Ptr::from_u32(0x04000000) }; pub const REG_VCOUNT: Ptr<u16> = unsafe { Ptr::from_u32(0x04000006) }; pub const REG_BGCNT: Ptr<u16> = unsafe { Ptr::from_u32(0x04000008) }; pub const REG_BG_OFS: Ptr<u16> = unsafe { Ptr::from_u32(0x04000010) }; pub const REG_BG_VOFS: Ptr<u16> = unsafe { Ptr::from_u32(0x04000012) }; pub const REG_DATA_IN0: Ptr<u16> = unsafe { Ptr::from_u32(0x04000120) }; pub const REG_DATA_IN1: Ptr<u16> = unsafe { Ptr::from_u32(0x04000122) }; pub const REG_DATA_IN2: Ptr<u16> = unsafe { Ptr::from_u32(0x04000124) }; pub const REG_DATA_IN3: Ptr<u16> = unsafe { Ptr::from_u32(0x04000126) }; pub const REG_SIOCNT: Ptr<u16> = unsafe { Ptr::from_u32(0x04000128) }; pub const REG_DATA_OUT: Ptr<u16> = unsafe { Ptr::from_u32(0x0400012A) }; pub const REG_KEY_INPUT: Ptr<u16> = unsafe { Ptr::from_u32(0x04000130) }; pub const REG_RCNT: Ptr<u16> = unsafe { Ptr::from_u32(0x04000134) }; pub const REG_IE: Ptr<u16> = unsafe { Ptr::from_u32(0x04000200) }; pub const REG_IME: Ptr<u16> = unsafe { Ptr::from_u32(0x04000208) }; pub const VRAM: Ptr<u16> = unsafe { Ptr::from_u32(0x06000000) }; pub const OAM: Ptr<u32> = unsafe { Ptr::from_u32(0x07000000) };
64.545455
77
0.653521
f5b63561424c22930a9c7b70f8b8a92bbad396bc
45,412
#![allow(clippy::enum_glob_use)] use std::fmt::{self, Debug, Display}; use glutin::event::VirtualKeyCode::*; use glutin::event::{ModifiersState, MouseButton, VirtualKeyCode}; use serde::de::Error as SerdeError; use serde::de::{self, MapAccess, Unexpected, Visitor}; use serde::{Deserialize, Deserializer}; use serde_yaml::Value as SerdeValue; use alacritty_terminal::config::Program; use alacritty_terminal::term::TermMode; use alacritty_terminal::vi_mode::ViMotion; /// Describes a state and action to take in that state. /// /// This is the shared component of `MouseBinding` and `KeyBinding`. #[derive(Debug, Clone, PartialEq, Eq)] pub struct Binding<T> { /// Modifier keys required to activate binding. pub mods: ModifiersState, /// String to send to PTY if mods and mode match. pub action: Action, /// Terminal mode required to activate binding. pub mode: TermMode, /// excluded terminal modes where the binding won't be activated. pub notmode: TermMode, /// This property is used as part of the trigger detection code. /// /// For example, this might be a key like "G", or a mouse button. pub trigger: T, } /// Bindings that are triggered by a keyboard key. pub type KeyBinding = Binding<Key>; /// Bindings that are triggered by a mouse button. pub type MouseBinding = Binding<MouseButton>; impl<T: Eq> Binding<T> { #[inline] pub fn is_triggered_by(&self, mode: TermMode, mods: ModifiersState, input: &T) -> bool { // Check input first since bindings are stored in one big list. This is // the most likely item to fail so prioritizing it here allows more // checks to be short circuited. self.trigger == *input && self.mods == mods && mode.contains(self.mode) && !mode.intersects(self.notmode) } #[inline] pub fn triggers_match(&self, binding: &Binding<T>) -> bool { // Check the binding's key and modifiers. if self.trigger != binding.trigger || self.mods != binding.mods { return false; } let selfmode = if self.mode.is_empty() { TermMode::ANY } else { self.mode }; let bindingmode = if binding.mode.is_empty() { TermMode::ANY } else { binding.mode }; if !selfmode.intersects(bindingmode) { return false; } // The bindings are never active at the same time when the required modes of one binding // are part of the forbidden bindings of the other. if self.mode.intersects(binding.notmode) || binding.mode.intersects(self.notmode) { return false; } true } } #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] pub enum Action { /// Write an escape sequence. #[serde(skip)] Esc(String), /// Run given command. #[serde(skip)] Command(Program), /// Move vi mode cursor. #[serde(skip)] ViMotion(ViMotion), /// Perform vi mode action. #[serde(skip)] ViAction(ViAction), /// Paste contents of system clipboard. Paste, /// Store current selection into clipboard. Copy, #[cfg(not(any(target_os = "macos", windows)))] /// Store current selection into selection buffer. CopySelection, /// Paste contents of selection buffer. PasteSelection, /// Increase font size. IncreaseFontSize, /// Decrease font size. DecreaseFontSize, /// Reset font size to the config value. ResetFontSize, /// Scroll exactly one page up. ScrollPageUp, /// Scroll exactly one page down. ScrollPageDown, /// Scroll half a page up. ScrollHalfPageUp, /// Scroll half a page down. ScrollHalfPageDown, /// Scroll one line up. ScrollLineUp, /// Scroll one line down. ScrollLineDown, /// Scroll all the way to the top. ScrollToTop, /// Scroll all the way to the bottom. ScrollToBottom, /// Clear the display buffer(s) to remove history. ClearHistory, /// Hide the Alacritty window. Hide, /// Minimize the Alacritty window. Minimize, /// Quit Alacritty. Quit, /// Clear warning and error notices. ClearLogNotice, /// Spawn a new instance of Alacritty. SpawnNewInstance, /// Toggle fullscreen. ToggleFullscreen, /// Toggle simple fullscreen on macOS. #[cfg(target_os = "macos")] ToggleSimpleFullscreen, /// Clear active selection. ClearSelection, /// Toggle vi mode. ToggleViMode, /// Allow receiving char input. ReceiveChar, /// Start a forward buffer search. SearchForward, /// Start a backward buffer search. SearchBackward, /// No action. None, } impl From<&'static str> for Action { fn from(s: &'static str) -> Action { Action::Esc(s.into()) } } /// Display trait used for error logging. impl Display for Action { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Action::ViMotion(motion) => motion.fmt(f), Action::ViAction(action) => action.fmt(f), _ => write!(f, "{:?}", self), } } } /// Vi mode specific actions. #[derive(Deserialize, Debug, Copy, Clone, PartialEq, Eq)] pub enum ViAction { /// Toggle normal vi selection. ToggleNormalSelection, /// Toggle line vi selection. ToggleLineSelection, /// Toggle block vi selection. ToggleBlockSelection, /// Toggle semantic vi selection. ToggleSemanticSelection, /// Jump to the beginning of the next match. SearchNext, /// Jump to the beginning of the previous match. SearchPrevious, /// Jump to the next start of a match to the left of the origin. SearchStart, /// Jump to the next end of a match to the right of the origin. SearchEnd, /// Launch the URL below the vi mode cursor. Open, } impl From<ViAction> for Action { fn from(action: ViAction) -> Self { Self::ViAction(action) } } impl From<ViMotion> for Action { fn from(motion: ViMotion) -> Self { Self::ViMotion(motion) } } macro_rules! bindings { ( KeyBinding; $( $key:ident $(,$mods:expr)* $(,+$mode:expr)* $(,~$notmode:expr)* ;$action:expr );* $(;)* ) => {{ bindings!( KeyBinding; $( Key::Keycode($key) $(,$mods)* $(,+$mode)* $(,~$notmode)* ;$action );* ) }}; ( $ty:ident; $( $key:expr $(,$mods:expr)* $(,+$mode:expr)* $(,~$notmode:expr)* ;$action:expr );* $(;)* ) => {{ let mut v = Vec::new(); $( let mut _mods = ModifiersState::empty(); $(_mods = $mods;)* let mut _mode = TermMode::empty(); $(_mode.insert($mode);)* let mut _notmode = TermMode::empty(); $(_notmode.insert($notmode);)* v.push($ty { trigger: $key, mods: _mods, mode: _mode, notmode: _notmode, action: $action.into(), }); )* v }}; } pub fn default_mouse_bindings() -> Vec<MouseBinding> { bindings!( MouseBinding; MouseButton::Middle, ~TermMode::VI; Action::PasteSelection; ) } pub fn default_key_bindings() -> Vec<KeyBinding> { let mut bindings = bindings!( KeyBinding; Copy; Action::Copy; Copy, +TermMode::VI; Action::ClearSelection; Paste, ~TermMode::VI; Action::Paste; L, ModifiersState::CTRL; Action::ClearLogNotice; L, ModifiersState::CTRL, ~TermMode::VI; Action::Esc("\x0c".into()); Tab, ModifiersState::SHIFT, ~TermMode::VI; Action::Esc("\x1b[Z".into()); Back, ModifiersState::ALT, ~TermMode::VI; Action::Esc("\x1b\x7f".into()); Back, ModifiersState::SHIFT, ~TermMode::VI; Action::Esc("\x7f".into()); Home, ModifiersState::SHIFT, ~TermMode::ALT_SCREEN; Action::ScrollToTop; End, ModifiersState::SHIFT, ~TermMode::ALT_SCREEN; Action::ScrollToBottom; PageUp, ModifiersState::SHIFT, ~TermMode::ALT_SCREEN; Action::ScrollPageUp; PageDown, ModifiersState::SHIFT, ~TermMode::ALT_SCREEN; Action::ScrollPageDown; Home, ModifiersState::SHIFT, +TermMode::ALT_SCREEN, ~TermMode::VI; Action::Esc("\x1b[1;2H".into()); End, ModifiersState::SHIFT, +TermMode::ALT_SCREEN, ~TermMode::VI; Action::Esc("\x1b[1;2F".into()); PageUp, ModifiersState::SHIFT, +TermMode::ALT_SCREEN, ~TermMode::VI; Action::Esc("\x1b[5;2~".into()); PageDown, ModifiersState::SHIFT, +TermMode::ALT_SCREEN, ~TermMode::VI; Action::Esc("\x1b[6;2~".into()); Home, +TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1bOH".into()); Home, ~TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1b[H".into()); End, +TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1bOF".into()); End, ~TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1b[F".into()); Up, +TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1bOA".into()); Up, ~TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1b[A".into()); Down, +TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1bOB".into()); Down, ~TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1b[B".into()); Right, +TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1bOC".into()); Right, ~TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1b[C".into()); Left, +TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1bOD".into()); Left, ~TermMode::APP_CURSOR, ~TermMode::VI; Action::Esc("\x1b[D".into()); Back, ~TermMode::VI; Action::Esc("\x7f".into()); Insert, ~TermMode::VI; Action::Esc("\x1b[2~".into()); Delete, ~TermMode::VI; Action::Esc("\x1b[3~".into()); PageUp, ~TermMode::VI; Action::Esc("\x1b[5~".into()); PageDown, ~TermMode::VI; Action::Esc("\x1b[6~".into()); F1, ~TermMode::VI; Action::Esc("\x1bOP".into()); F2, ~TermMode::VI; Action::Esc("\x1bOQ".into()); F3, ~TermMode::VI; Action::Esc("\x1bOR".into()); F4, ~TermMode::VI; Action::Esc("\x1bOS".into()); F5, ~TermMode::VI; Action::Esc("\x1b[15~".into()); F6, ~TermMode::VI; Action::Esc("\x1b[17~".into()); F7, ~TermMode::VI; Action::Esc("\x1b[18~".into()); F8, ~TermMode::VI; Action::Esc("\x1b[19~".into()); F9, ~TermMode::VI; Action::Esc("\x1b[20~".into()); F10, ~TermMode::VI; Action::Esc("\x1b[21~".into()); F11, ~TermMode::VI; Action::Esc("\x1b[23~".into()); F12, ~TermMode::VI; Action::Esc("\x1b[24~".into()); F13, ~TermMode::VI; Action::Esc("\x1b[25~".into()); F14, ~TermMode::VI; Action::Esc("\x1b[26~".into()); F15, ~TermMode::VI; Action::Esc("\x1b[28~".into()); F16, ~TermMode::VI; Action::Esc("\x1b[29~".into()); F17, ~TermMode::VI; Action::Esc("\x1b[31~".into()); F18, ~TermMode::VI; Action::Esc("\x1b[32~".into()); F19, ~TermMode::VI; Action::Esc("\x1b[33~".into()); F20, ~TermMode::VI; Action::Esc("\x1b[34~".into()); NumpadEnter, ~TermMode::VI; Action::Esc("\n".into()); Space, ModifiersState::SHIFT | ModifiersState::CTRL, +TermMode::VI; Action::ScrollToBottom; Space, ModifiersState::SHIFT | ModifiersState::CTRL; Action::ToggleViMode; Escape, +TermMode::VI; Action::ClearSelection; I, +TermMode::VI; Action::ScrollToBottom; I, +TermMode::VI; Action::ToggleViMode; C, ModifiersState::CTRL, +TermMode::VI; Action::ToggleViMode; Y, ModifiersState::CTRL, +TermMode::VI; Action::ScrollLineUp; E, ModifiersState::CTRL, +TermMode::VI; Action::ScrollLineDown; G, +TermMode::VI; Action::ScrollToTop; G, ModifiersState::SHIFT, +TermMode::VI; Action::ScrollToBottom; B, ModifiersState::CTRL, +TermMode::VI; Action::ScrollPageUp; F, ModifiersState::CTRL, +TermMode::VI; Action::ScrollPageDown; U, ModifiersState::CTRL, +TermMode::VI; Action::ScrollHalfPageUp; D, ModifiersState::CTRL, +TermMode::VI; Action::ScrollHalfPageDown; Y, +TermMode::VI; Action::Copy; Y, +TermMode::VI; Action::ClearSelection; Slash, +TermMode::VI; Action::SearchForward; Slash, ModifiersState::SHIFT, +TermMode::VI; Action::SearchBackward; V, +TermMode::VI; ViAction::ToggleNormalSelection; V, ModifiersState::SHIFT, +TermMode::VI; ViAction::ToggleLineSelection; V, ModifiersState::CTRL, +TermMode::VI; ViAction::ToggleBlockSelection; V, ModifiersState::ALT, +TermMode::VI; ViAction::ToggleSemanticSelection; N, +TermMode::VI; ViAction::SearchNext; N, ModifiersState::SHIFT, +TermMode::VI; ViAction::SearchPrevious; Return, +TermMode::VI; ViAction::Open; K, +TermMode::VI; ViMotion::Up; J, +TermMode::VI; ViMotion::Down; H, +TermMode::VI; ViMotion::Left; L, +TermMode::VI; ViMotion::Right; Up, +TermMode::VI; ViMotion::Up; Down, +TermMode::VI; ViMotion::Down; Left, +TermMode::VI; ViMotion::Left; Right, +TermMode::VI; ViMotion::Right; Key0, +TermMode::VI; ViMotion::First; Key4, ModifiersState::SHIFT, +TermMode::VI; ViMotion::Last; Key6, ModifiersState::SHIFT, +TermMode::VI; ViMotion::FirstOccupied; H, ModifiersState::SHIFT, +TermMode::VI; ViMotion::High; M, ModifiersState::SHIFT, +TermMode::VI; ViMotion::Middle; L, ModifiersState::SHIFT, +TermMode::VI; ViMotion::Low; B, +TermMode::VI; ViMotion::SemanticLeft; W, +TermMode::VI; ViMotion::SemanticRight; E, +TermMode::VI; ViMotion::SemanticRightEnd; B, ModifiersState::SHIFT, +TermMode::VI; ViMotion::WordLeft; W, ModifiersState::SHIFT, +TermMode::VI; ViMotion::WordRight; E, ModifiersState::SHIFT, +TermMode::VI; ViMotion::WordRightEnd; Key5, ModifiersState::SHIFT, +TermMode::VI; ViMotion::Bracket; ); // Code Modifiers // ---------+--------------------------- // 2 | Shift // 3 | Alt // 4 | Shift + Alt // 5 | Control // 6 | Shift + Control // 7 | Alt + Control // 8 | Shift + Alt + Control // ---------+--------------------------- // // from: https://invisible-island.net/xterm/ctlseqs/ctlseqs.html#h2-PC-Style-Function-Keys let mut modifiers = vec![ ModifiersState::SHIFT, ModifiersState::ALT, ModifiersState::SHIFT | ModifiersState::ALT, ModifiersState::CTRL, ModifiersState::SHIFT | ModifiersState::CTRL, ModifiersState::ALT | ModifiersState::CTRL, ModifiersState::SHIFT | ModifiersState::ALT | ModifiersState::CTRL, ]; for (index, mods) in modifiers.drain(..).enumerate() { let modifiers_code = index + 2; bindings.extend(bindings!( KeyBinding; Delete, mods, ~TermMode::VI; Action::Esc(format!("\x1b[3;{}~", modifiers_code)); Up, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}A", modifiers_code)); Down, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}B", modifiers_code)); Right, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}C", modifiers_code)); Left, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}D", modifiers_code)); F1, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}P", modifiers_code)); F2, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}Q", modifiers_code)); F3, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}R", modifiers_code)); F4, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}S", modifiers_code)); F5, mods, ~TermMode::VI; Action::Esc(format!("\x1b[15;{}~", modifiers_code)); F6, mods, ~TermMode::VI; Action::Esc(format!("\x1b[17;{}~", modifiers_code)); F7, mods, ~TermMode::VI; Action::Esc(format!("\x1b[18;{}~", modifiers_code)); F8, mods, ~TermMode::VI; Action::Esc(format!("\x1b[19;{}~", modifiers_code)); F9, mods, ~TermMode::VI; Action::Esc(format!("\x1b[20;{}~", modifiers_code)); F10, mods, ~TermMode::VI; Action::Esc(format!("\x1b[21;{}~", modifiers_code)); F11, mods, ~TermMode::VI; Action::Esc(format!("\x1b[23;{}~", modifiers_code)); F12, mods, ~TermMode::VI; Action::Esc(format!("\x1b[24;{}~", modifiers_code)); F13, mods, ~TermMode::VI; Action::Esc(format!("\x1b[25;{}~", modifiers_code)); F14, mods, ~TermMode::VI; Action::Esc(format!("\x1b[26;{}~", modifiers_code)); F15, mods, ~TermMode::VI; Action::Esc(format!("\x1b[28;{}~", modifiers_code)); F16, mods, ~TermMode::VI; Action::Esc(format!("\x1b[29;{}~", modifiers_code)); F17, mods, ~TermMode::VI; Action::Esc(format!("\x1b[31;{}~", modifiers_code)); F18, mods, ~TermMode::VI; Action::Esc(format!("\x1b[32;{}~", modifiers_code)); F19, mods, ~TermMode::VI; Action::Esc(format!("\x1b[33;{}~", modifiers_code)); F20, mods, ~TermMode::VI; Action::Esc(format!("\x1b[34;{}~", modifiers_code)); )); // We're adding the following bindings with `Shift` manually above, so skipping them here. if modifiers_code != 2 { bindings.extend(bindings!( KeyBinding; Insert, mods, ~TermMode::VI; Action::Esc(format!("\x1b[2;{}~", modifiers_code)); PageUp, mods, ~TermMode::VI; Action::Esc(format!("\x1b[5;{}~", modifiers_code)); PageDown, mods, ~TermMode::VI; Action::Esc(format!("\x1b[6;{}~", modifiers_code)); End, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}F", modifiers_code)); Home, mods, ~TermMode::VI; Action::Esc(format!("\x1b[1;{}H", modifiers_code)); )); } } bindings.extend(platform_key_bindings()); bindings } #[cfg(not(any(target_os = "macos", test)))] fn common_keybindings() -> Vec<KeyBinding> { bindings!( KeyBinding; V, ModifiersState::CTRL | ModifiersState::SHIFT, ~TermMode::VI; Action::Paste; C, ModifiersState::CTRL | ModifiersState::SHIFT; Action::Copy; F, ModifiersState::CTRL | ModifiersState::SHIFT; Action::SearchForward; B, ModifiersState::CTRL | ModifiersState::SHIFT; Action::SearchBackward; C, ModifiersState::CTRL | ModifiersState::SHIFT, +TermMode::VI; Action::ClearSelection; Insert, ModifiersState::SHIFT, ~TermMode::VI; Action::PasteSelection; Key0, ModifiersState::CTRL; Action::ResetFontSize; Equals, ModifiersState::CTRL; Action::IncreaseFontSize; Plus, ModifiersState::CTRL; Action::IncreaseFontSize; NumpadAdd, ModifiersState::CTRL; Action::IncreaseFontSize; Minus, ModifiersState::CTRL; Action::DecreaseFontSize; NumpadSubtract, ModifiersState::CTRL; Action::DecreaseFontSize; ) } #[cfg(not(any(target_os = "macos", target_os = "windows", test)))] pub fn platform_key_bindings() -> Vec<KeyBinding> { common_keybindings() } #[cfg(all(target_os = "windows", not(test)))] pub fn platform_key_bindings() -> Vec<KeyBinding> { let mut bindings = bindings!( KeyBinding; Return, ModifiersState::ALT; Action::ToggleFullscreen; ); bindings.extend(common_keybindings()); bindings } #[cfg(all(target_os = "macos", not(test)))] pub fn platform_key_bindings() -> Vec<KeyBinding> { bindings!( KeyBinding; Key0, ModifiersState::LOGO; Action::ResetFontSize; Equals, ModifiersState::LOGO; Action::IncreaseFontSize; Plus, ModifiersState::LOGO; Action::IncreaseFontSize; NumpadAdd, ModifiersState::LOGO; Action::IncreaseFontSize; Minus, ModifiersState::LOGO; Action::DecreaseFontSize; NumpadSubtract, ModifiersState::LOGO; Action::DecreaseFontSize; Insert, ModifiersState::SHIFT, ~TermMode::VI; Action::Esc("\x1b[2;2~".into()); K, ModifiersState::LOGO, ~TermMode::VI; Action::Esc("\x0c".into()); V, ModifiersState::LOGO, ~TermMode::VI; Action::Paste; N, ModifiersState::LOGO; Action::SpawnNewInstance; F, ModifiersState::CTRL | ModifiersState::LOGO; Action::ToggleFullscreen; K, ModifiersState::LOGO; Action::ClearHistory; C, ModifiersState::LOGO; Action::Copy; C, ModifiersState::LOGO, +TermMode::VI; Action::ClearSelection; H, ModifiersState::LOGO; Action::Hide; M, ModifiersState::LOGO; Action::Minimize; Q, ModifiersState::LOGO; Action::Quit; W, ModifiersState::LOGO; Action::Quit; F, ModifiersState::LOGO; Action::SearchForward; B, ModifiersState::LOGO; Action::SearchBackward; ) } // Don't return any bindings for tests since they are commented-out by default. #[cfg(test)] pub fn platform_key_bindings() -> Vec<KeyBinding> { vec![] } #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] pub enum Key { Scancode(u32), Keycode(VirtualKeyCode), } impl<'a> Deserialize<'a> for Key { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a>, { let value = SerdeValue::deserialize(deserializer)?; match u32::deserialize(value.clone()) { Ok(scancode) => Ok(Key::Scancode(scancode)), Err(_) => { let keycode = VirtualKeyCode::deserialize(value).map_err(D::Error::custom)?; Ok(Key::Keycode(keycode)) }, } } } struct ModeWrapper { pub mode: TermMode, pub not_mode: TermMode, } impl<'a> Deserialize<'a> for ModeWrapper { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a>, { struct ModeVisitor; impl<'a> Visitor<'a> for ModeVisitor { type Value = ModeWrapper; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str( "a combination of AppCursor | AppKeypad | Alt | Vi, possibly with negation (~)", ) } fn visit_str<E>(self, value: &str) -> Result<ModeWrapper, E> where E: de::Error, { let mut res = ModeWrapper { mode: TermMode::empty(), not_mode: TermMode::empty() }; for modifier in value.split('|') { match modifier.trim().to_lowercase().as_str() { "appcursor" => res.mode |= TermMode::APP_CURSOR, "~appcursor" => res.not_mode |= TermMode::APP_CURSOR, "appkeypad" => res.mode |= TermMode::APP_KEYPAD, "~appkeypad" => res.not_mode |= TermMode::APP_KEYPAD, "alt" => res.mode |= TermMode::ALT_SCREEN, "~alt" => res.not_mode |= TermMode::ALT_SCREEN, "vi" => res.mode |= TermMode::VI, "~vi" => res.not_mode |= TermMode::VI, _ => return Err(E::invalid_value(Unexpected::Str(modifier), &self)), } } Ok(res) } } deserializer.deserialize_str(ModeVisitor) } } struct MouseButtonWrapper(MouseButton); impl MouseButtonWrapper { fn into_inner(self) -> MouseButton { self.0 } } impl<'a> Deserialize<'a> for MouseButtonWrapper { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a>, { struct MouseButtonVisitor; impl<'a> Visitor<'a> for MouseButtonVisitor { type Value = MouseButtonWrapper; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("Left, Right, Middle, or a number from 0 to 65536") } fn visit_u64<E>(self, value: u64) -> Result<MouseButtonWrapper, E> where E: de::Error, { match value { 0..=65536 => Ok(MouseButtonWrapper(MouseButton::Other(value as u16))), _ => Err(E::invalid_value(Unexpected::Unsigned(value), &self)), } } fn visit_str<E>(self, value: &str) -> Result<MouseButtonWrapper, E> where E: de::Error, { match value { "Left" => Ok(MouseButtonWrapper(MouseButton::Left)), "Right" => Ok(MouseButtonWrapper(MouseButton::Right)), "Middle" => Ok(MouseButtonWrapper(MouseButton::Middle)), _ => Err(E::invalid_value(Unexpected::Str(value), &self)), } } } deserializer.deserialize_any(MouseButtonVisitor) } } /// Bindings are deserialized into a `RawBinding` before being parsed as a /// `KeyBinding` or `MouseBinding`. #[derive(PartialEq, Eq)] struct RawBinding { key: Option<Key>, mouse: Option<MouseButton>, mods: ModifiersState, mode: TermMode, notmode: TermMode, action: Action, } impl RawBinding { fn into_mouse_binding(self) -> Result<MouseBinding, Self> { if let Some(mouse) = self.mouse { Ok(Binding { trigger: mouse, mods: self.mods, action: self.action, mode: self.mode, notmode: self.notmode, }) } else { Err(self) } } fn into_key_binding(self) -> Result<KeyBinding, Self> { if let Some(key) = self.key { Ok(KeyBinding { trigger: key, mods: self.mods, action: self.action, mode: self.mode, notmode: self.notmode, }) } else { Err(self) } } } impl<'a> Deserialize<'a> for RawBinding { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a>, { const FIELDS: &[&str] = &["key", "mods", "mode", "action", "chars", "mouse", "command"]; enum Field { Key, Mods, Mode, Action, Chars, Mouse, Command, } impl<'a> Deserialize<'a> for Field { fn deserialize<D>(deserializer: D) -> Result<Field, D::Error> where D: Deserializer<'a>, { struct FieldVisitor; impl<'a> Visitor<'a> for FieldVisitor { type Value = Field; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("binding fields") } fn visit_str<E>(self, value: &str) -> Result<Field, E> where E: de::Error, { match value { "key" => Ok(Field::Key), "mods" => Ok(Field::Mods), "mode" => Ok(Field::Mode), "action" => Ok(Field::Action), "chars" => Ok(Field::Chars), "mouse" => Ok(Field::Mouse), "command" => Ok(Field::Command), _ => Err(E::unknown_field(value, FIELDS)), } } } deserializer.deserialize_str(FieldVisitor) } } struct RawBindingVisitor; impl<'a> Visitor<'a> for RawBindingVisitor { type Value = RawBinding; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("binding specification") } fn visit_map<V>(self, mut map: V) -> Result<RawBinding, V::Error> where V: MapAccess<'a>, { let mut mods: Option<ModifiersState> = None; let mut key: Option<Key> = None; let mut chars: Option<String> = None; let mut action: Option<Action> = None; let mut mode: Option<TermMode> = None; let mut not_mode: Option<TermMode> = None; let mut mouse: Option<MouseButton> = None; let mut command: Option<Program> = None; use de::Error; while let Some(struct_key) = map.next_key::<Field>()? { match struct_key { Field::Key => { if key.is_some() { return Err(<V::Error as Error>::duplicate_field("key")); } let val = map.next_value::<SerdeValue>()?; if val.is_u64() { let scancode = val.as_u64().unwrap(); if scancode > u64::from(std::u32::MAX) { return Err(<V::Error as Error>::custom(format!( "Invalid key binding, scancode too big: {}", scancode ))); } key = Some(Key::Scancode(scancode as u32)); } else { let k = Key::deserialize(val).map_err(V::Error::custom)?; key = Some(k); } }, Field::Mods => { if mods.is_some() { return Err(<V::Error as Error>::duplicate_field("mods")); } mods = Some(map.next_value::<ModsWrapper>()?.into_inner()); }, Field::Mode => { if mode.is_some() { return Err(<V::Error as Error>::duplicate_field("mode")); } let mode_deserializer = map.next_value::<ModeWrapper>()?; mode = Some(mode_deserializer.mode); not_mode = Some(mode_deserializer.not_mode); }, Field::Action => { if action.is_some() { return Err(<V::Error as Error>::duplicate_field("action")); } let value = map.next_value::<SerdeValue>()?; action = if let Ok(vi_action) = ViAction::deserialize(value.clone()) { Some(vi_action.into()) } else if let Ok(vi_motion) = ViMotion::deserialize(value.clone()) { Some(vi_motion.into()) } else { match Action::deserialize(value.clone()).map_err(V::Error::custom) { Ok(action) => Some(action), Err(err) => { let value = match value { SerdeValue::String(string) => string, SerdeValue::Mapping(map) if map.len() == 1 => { match map.into_iter().next() { Some(( SerdeValue::String(string), SerdeValue::Null, )) => string, _ => return Err(err), } }, _ => return Err(err), }; return Err(V::Error::custom(format!( "unknown keyboard action `{}`", value ))); }, } }; }, Field::Chars => { if chars.is_some() { return Err(<V::Error as Error>::duplicate_field("chars")); } chars = Some(map.next_value()?); }, Field::Mouse => { if chars.is_some() { return Err(<V::Error as Error>::duplicate_field("mouse")); } mouse = Some(map.next_value::<MouseButtonWrapper>()?.into_inner()); }, Field::Command => { if command.is_some() { return Err(<V::Error as Error>::duplicate_field("command")); } command = Some(map.next_value::<Program>()?); }, } } let mode = mode.unwrap_or_else(TermMode::empty); let not_mode = not_mode.unwrap_or_else(TermMode::empty); let mods = mods.unwrap_or_else(ModifiersState::default); let action = match (action, chars, command) { (Some(action @ Action::ViMotion(_)), None, None) | (Some(action @ Action::ViAction(_)), None, None) => { if !mode.intersects(TermMode::VI) || not_mode.intersects(TermMode::VI) { return Err(V::Error::custom(format!( "action `{}` is only available in vi mode, try adding `mode: Vi`", action, ))); } action }, (Some(action), None, None) => action, (None, Some(chars), None) => Action::Esc(chars), (None, None, Some(cmd)) => Action::Command(cmd), _ => { return Err(V::Error::custom( "must specify exactly one of chars, action or command", )) }, }; if mouse.is_none() && key.is_none() { return Err(V::Error::custom("bindings require mouse button or key")); } Ok(RawBinding { mode, notmode: not_mode, action, key, mouse, mods }) } } deserializer.deserialize_struct("RawBinding", FIELDS, RawBindingVisitor) } } impl<'a> Deserialize<'a> for MouseBinding { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a>, { let raw = RawBinding::deserialize(deserializer)?; raw.into_mouse_binding() .map_err(|_| D::Error::custom("expected mouse binding, got key binding")) } } impl<'a> Deserialize<'a> for KeyBinding { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a>, { let raw = RawBinding::deserialize(deserializer)?; raw.into_key_binding() .map_err(|_| D::Error::custom("expected key binding, got mouse binding")) } } /// Newtype for implementing deserialize on glutin Mods. /// /// Our deserialize impl wouldn't be covered by a derive(Deserialize); see the /// impl below. #[derive(Debug, Copy, Clone, Hash, Default, Eq, PartialEq)] pub struct ModsWrapper(ModifiersState); impl ModsWrapper { pub fn into_inner(self) -> ModifiersState { self.0 } } impl<'a> de::Deserialize<'a> for ModsWrapper { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: de::Deserializer<'a>, { struct ModsVisitor; impl<'a> Visitor<'a> for ModsVisitor { type Value = ModsWrapper; fn expecting(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("a subset of Shift|Control|Super|Command|Alt|Option") } fn visit_str<E>(self, value: &str) -> Result<ModsWrapper, E> where E: de::Error, { let mut res = ModifiersState::empty(); for modifier in value.split('|') { match modifier.trim().to_lowercase().as_str() { "command" | "super" => res.insert(ModifiersState::LOGO), "shift" => res.insert(ModifiersState::SHIFT), "alt" | "option" => res.insert(ModifiersState::ALT), "control" => res.insert(ModifiersState::CTRL), "none" => (), _ => return Err(E::invalid_value(Unexpected::Str(modifier), &self)), } } Ok(ModsWrapper(res)) } } deserializer.deserialize_str(ModsVisitor) } } #[cfg(test)] mod tests { use glutin::event::ModifiersState; use alacritty_terminal::term::TermMode; use crate::config::{Action, Binding}; type MockBinding = Binding<usize>; impl Default for MockBinding { fn default() -> Self { Self { mods: Default::default(), action: Action::None, mode: TermMode::empty(), notmode: TermMode::empty(), trigger: Default::default(), } } } #[test] fn binding_matches_itself() { let binding = MockBinding::default(); let identical_binding = MockBinding::default(); assert!(binding.triggers_match(&identical_binding)); assert!(identical_binding.triggers_match(&binding)); } #[test] fn binding_matches_different_action() { let binding = MockBinding::default(); let mut different_action = MockBinding::default(); different_action.action = Action::ClearHistory; assert!(binding.triggers_match(&different_action)); assert!(different_action.triggers_match(&binding)); } #[test] fn mods_binding_requires_strict_match() { let mut superset_mods = MockBinding::default(); superset_mods.mods = ModifiersState::all(); let mut subset_mods = MockBinding::default(); subset_mods.mods = ModifiersState::ALT; assert!(!superset_mods.triggers_match(&subset_mods)); assert!(!subset_mods.triggers_match(&superset_mods)); } #[test] fn binding_matches_identical_mode() { let mut b1 = MockBinding::default(); b1.mode = TermMode::ALT_SCREEN; let mut b2 = MockBinding::default(); b2.mode = TermMode::ALT_SCREEN; assert!(b1.triggers_match(&b2)); assert!(b2.triggers_match(&b1)); } #[test] fn binding_without_mode_matches_any_mode() { let b1 = MockBinding::default(); let mut b2 = MockBinding::default(); b2.mode = TermMode::APP_KEYPAD; b2.notmode = TermMode::ALT_SCREEN; assert!(b1.triggers_match(&b2)); } #[test] fn binding_with_mode_matches_empty_mode() { let mut b1 = MockBinding::default(); b1.mode = TermMode::APP_KEYPAD; b1.notmode = TermMode::ALT_SCREEN; let b2 = MockBinding::default(); assert!(b1.triggers_match(&b2)); assert!(b2.triggers_match(&b1)); } #[test] fn binding_matches_modes() { let mut b1 = MockBinding::default(); b1.mode = TermMode::ALT_SCREEN | TermMode::APP_KEYPAD; let mut b2 = MockBinding::default(); b2.mode = TermMode::APP_KEYPAD; assert!(b1.triggers_match(&b2)); assert!(b2.triggers_match(&b1)); } #[test] fn binding_matches_partial_intersection() { let mut b1 = MockBinding::default(); b1.mode = TermMode::ALT_SCREEN | TermMode::APP_KEYPAD; let mut b2 = MockBinding::default(); b2.mode = TermMode::APP_KEYPAD | TermMode::APP_CURSOR; assert!(b1.triggers_match(&b2)); assert!(b2.triggers_match(&b1)); } #[test] fn binding_mismatches_notmode() { let mut b1 = MockBinding::default(); b1.mode = TermMode::ALT_SCREEN; let mut b2 = MockBinding::default(); b2.notmode = TermMode::ALT_SCREEN; assert!(!b1.triggers_match(&b2)); assert!(!b2.triggers_match(&b1)); } #[test] fn binding_mismatches_unrelated() { let mut b1 = MockBinding::default(); b1.mode = TermMode::ALT_SCREEN; let mut b2 = MockBinding::default(); b2.mode = TermMode::APP_KEYPAD; assert!(!b1.triggers_match(&b2)); assert!(!b2.triggers_match(&b1)); } #[test] fn binding_matches_notmodes() { let mut subset_notmodes = MockBinding::default(); let mut superset_notmodes = MockBinding::default(); subset_notmodes.notmode = TermMode::VI | TermMode::APP_CURSOR; superset_notmodes.notmode = TermMode::APP_CURSOR; assert!(subset_notmodes.triggers_match(&superset_notmodes)); assert!(superset_notmodes.triggers_match(&subset_notmodes)); } #[test] fn binding_matches_mode_notmode() { let mut b1 = MockBinding::default(); let mut b2 = MockBinding::default(); b1.mode = TermMode::VI; b1.notmode = TermMode::APP_CURSOR; b2.notmode = TermMode::APP_CURSOR; assert!(b1.triggers_match(&b2)); assert!(b2.triggers_match(&b1)); } #[test] fn binding_trigger_input() { let mut binding = MockBinding::default(); binding.trigger = 13; let mods = binding.mods; let mode = binding.mode; assert!(binding.is_triggered_by(mode, mods, &13)); assert!(!binding.is_triggered_by(mode, mods, &32)); } #[test] fn binding_trigger_mods() { let mut binding = MockBinding::default(); binding.mods = ModifiersState::ALT | ModifiersState::LOGO; let superset_mods = ModifiersState::all(); let subset_mods = ModifiersState::empty(); let t = binding.trigger; let mode = binding.mode; assert!(binding.is_triggered_by(mode, binding.mods, &t)); assert!(!binding.is_triggered_by(mode, superset_mods, &t)); assert!(!binding.is_triggered_by(mode, subset_mods, &t)); } #[test] fn binding_trigger_modes() { let mut binding = MockBinding::default(); binding.mode = TermMode::ALT_SCREEN; let t = binding.trigger; let mods = binding.mods; assert!(!binding.is_triggered_by(TermMode::INSERT, mods, &t)); assert!(binding.is_triggered_by(TermMode::ALT_SCREEN, mods, &t)); assert!(binding.is_triggered_by(TermMode::ALT_SCREEN | TermMode::INSERT, mods, &t)); } #[test] fn binding_trigger_notmodes() { let mut binding = MockBinding::default(); binding.notmode = TermMode::ALT_SCREEN; let t = binding.trigger; let mods = binding.mods; assert!(binding.is_triggered_by(TermMode::INSERT, mods, &t)); assert!(!binding.is_triggered_by(TermMode::ALT_SCREEN, mods, &t)); assert!(!binding.is_triggered_by(TermMode::ALT_SCREEN | TermMode::INSERT, mods, &t)); } }
37.811823
102
0.521184
18cfbb240aef1898bbcf339e826fcc0b99da4e24
3,346
extern crate specs; use specs::prelude::*; use super::{EntityMoved, Position, EntryTrigger, Hidden, Map, Name, gamelog::GameLog, InflictsDamage, particle_system::ParticleBuilder, SufferDamage, SingleActivation}; pub struct TriggerSystem {} impl<'a> System<'a> for TriggerSystem { #[allow(clippy::type_complexity)] type SystemData = ( ReadExpect<'a, Map>, WriteStorage<'a, EntityMoved>, ReadStorage<'a, Position>, ReadStorage<'a, EntryTrigger>, WriteStorage<'a, Hidden>, ReadStorage<'a, Name>, Entities<'a>, WriteExpect<'a, GameLog>, ReadStorage<'a, InflictsDamage>, WriteExpect<'a, ParticleBuilder>, WriteStorage<'a, SufferDamage>, ReadStorage<'a, SingleActivation>); fn run(&mut self, data : Self::SystemData) { let (map, mut entity_moved, position, entry_trigger, mut hidden, names, entities, mut log, inflicts_damage, mut particle_builder, mut inflict_damage, single_activation) = data; // Iterate the entities that moved and their final position let mut remove_entities : Vec<Entity> = Vec::new(); for (entity, mut _entity_moved, pos) in (&entities, &mut entity_moved, &position).join() { let idx = map.xy_idx(pos.x, pos.y); for entity_id in map.tile_content[idx].iter() { if entity != *entity_id { // Do not bother to check yourself for being a trap! let maybe_trigger = entry_trigger.get(*entity_id); match maybe_trigger { None => {}, Some(_trigger) => { // We triggered it let name = names.get(*entity_id); if let Some(name) = name { log.entries.push(format!("{} triggers!", &name.name)); } hidden.remove(*entity_id); // The trap is no longer hidden // If the trap is damage inflicting, do it let damage = inflicts_damage.get(*entity_id); if let Some(damage) = damage { particle_builder.request(pos.x, pos.y, rltk::RGB::named(rltk::ORANGE), rltk::RGB::named(rltk::BLACK), rltk::to_cp437('‼'), 200.0); SufferDamage::new_damage(&mut inflict_damage, entity, damage.damage); } // If it is single activation, it needs to be removed let sa = single_activation.get(*entity_id); if let Some(_sa) = sa { remove_entities.push(*entity_id); } } } } } } // Remove any single activation traps for trap in remove_entities.iter() { entities.delete(*trap).expect("Unable to delete trap"); } // Remove all entity movement markers entity_moved.clear(); } }
45.835616
162
0.494023
095edbeb894c4f6fcc365087b5641a2ac5d7116c
1,575
#[cfg(test)] mod reqtrans_test; use super::*; use stun::attributes::*; use stun::checks::*; use stun::message::*; use util::Error; use std::fmt; // RequestedTransport represents REQUESTED-TRANSPORT attribute. // // This attribute is used by the client to request a specific transport // protocol for the allocated transport address. RFC 5766 only allows the use of // codepoint 17 (User Datagram protocol). // // RFC 5766 Section 14.7 #[derive(Default, Debug, PartialEq)] pub struct RequestedTransport { pub protocol: Protocol, } impl fmt::Display for RequestedTransport { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "protocol: {}", self.protocol) } } const REQUESTED_TRANSPORT_SIZE: usize = 4; impl Setter for RequestedTransport { // AddTo adds REQUESTED-TRANSPORT to message. fn add_to(&self, m: &mut Message) -> Result<(), Error> { let mut v = vec![0; REQUESTED_TRANSPORT_SIZE]; v[0] = self.protocol.0; // b[1:4] is RFFU = 0. // The RFFU field MUST be set to zero on transmission and MUST be // ignored on reception. It is reserved for future uses. m.add(ATTR_REQUESTED_TRANSPORT, &v); Ok(()) } } impl Getter for RequestedTransport { // GetFrom decodes REQUESTED-TRANSPORT from message. fn get_from(&mut self, m: &Message) -> Result<(), Error> { let v = m.get(ATTR_REQUESTED_TRANSPORT)?; check_size(ATTR_REQUESTED_TRANSPORT, v.len(), REQUESTED_TRANSPORT_SIZE)?; self.protocol = Protocol(v[0]); Ok(()) } }
27.631579
81
0.658413
03dd61c7938e6577e7c4a736870cb03448c0ef88
7,757
use std::collections::HashMap; use crate::basic::error::TardisError; use crate::basic::result::TardisResult; use crate::log::{debug, info}; use crate::{FrameworkConfig, TardisFuns, TardisWebClient}; /// Distributed search handle / 分布式搜索操作 /// /// Encapsulates common elasticsearch operations. /// /// 封装了Elasticsearch的常用操作. /// /// # Steps to use / 使用步骤 /// /// 1. Create the search configuration / 创建搜索配置, @see [SearchConfig](crate::basic::config::SearchConfig) /// /// 2. Use `TardisSearchClient` to operate search / 使用 `TardisSearchClient` 操作搜索, E.g: /// ```ignore /// use tardis::TardisFuns; /// TardisFuns::search().create_index("test_index").await.unwrap(); /// let id = TardisFuns::search().create_record("test_index", r#"{"user":{"id":1,"name":"张三","open":false}}"#).await.unwrap(); /// assert_eq!(TardisFuns::search().get_record("test_index", &id).await.unwrap(), r#"{"user":{"id":4,"name":"Tom","open":true}}"#); /// TardisFuns::search().simple_search("test_index", "张三").await.unwrap(); /// ``` pub struct TardisSearchClient { client: TardisWebClient, server_url: String, } impl TardisSearchClient { /// Initialize configuration from the search configuration object / 从搜索配置对象中初始化配置 pub fn init_by_conf(conf: &FrameworkConfig) -> TardisResult<HashMap<String, TardisSearchClient>> { let mut clients = HashMap::new(); clients.insert("".to_string(), TardisSearchClient::init(&conf.search.url, conf.search.timeout_sec)?); for (k, v) in &conf.search.modules { clients.insert(k.to_string(), TardisSearchClient::init(&v.url, v.timeout_sec)?); } Ok(clients) } /// Initialize configuration / 初始化配置 pub fn init(str_url: &str, timeout_sec: u64) -> TardisResult<TardisSearchClient> { info!("[Tardis.SearchClient] Initializing"); let mut client = TardisWebClient::init(timeout_sec)?; client.set_default_header("Content-Type", "application/json"); info!("[Tardis.SearchClient] Initialized"); TardisResult::Ok(TardisSearchClient { client, server_url: str_url.to_string(), }) } /// Create index / 创建索引 /// /// # Arguments /// /// * `index_name` - index name / 索引名称 /// /// # Examples /// ```ignore /// use tardis::TardisFuns; /// TardisFuns::search().create_index("test_index").await.unwrap(); /// ``` pub async fn create_index(&self, index_name: &str) -> TardisResult<()> { info!("[Tardis.SearchClient] Create index {}", index_name); let url = format!("{}/{}", self.server_url, index_name); let resp = self.client.put_str_to_str(&url, "", None).await?; if let Some(err) = TardisError::new(resp.code, resp.body.as_ref().unwrap_or(&"".to_string())) { Err(err) } else { Ok(()) } } /// Create record and return primary key value / 创建记录并返回主键值 /// /// # Arguments /// /// * `index_name` - index name / 索引名称 /// * `data` - record content / 记录内容 /// /// # Examples /// ```ignore /// use tardis::TardisFuns; /// let id = TardisFuns::search().create_record("test_index", r#"{"user":{"id":1,"name":"张三","open":false}}"#).await.unwrap(); /// ``` pub async fn create_record(&self, index_name: &str, data: &str) -> TardisResult<String> { debug!("[Tardis.SearchClient] Create index {}", index_name); let url = format!("{}/{}/_doc/", self.server_url, index_name); let resp = self.client.post_str_to_str(&url, data, None).await?; if let Some(err) = TardisError::new(resp.code, resp.body.as_ref().unwrap_or(&"".to_string())) { Err(err) } else { let result = TardisFuns::json.str_to_json(&resp.body.unwrap_or_else(|| "".to_string()))?; Ok(result["_id"].as_str().ok_or_else(|| TardisError::FormatError("[Tardis.SearchClient] [_id] structure not found".to_string()))?.to_string()) } } /// Get a record / 获取一条记录 /// /// # Arguments /// /// * `index_name` - index name / 索引名称 /// * `id` - record primary key value / 记录主键值 /// /// # Examples /// ```ignore /// use tardis::TardisFuns; /// TardisFuns::search().get_record("test_index", "xxxx").await.unwrap(); /// ``` pub async fn get_record(&self, index_name: &str, id: &str) -> TardisResult<String> { let url = format!("{}/{}/_doc/{}", self.server_url, index_name, id); let resp = self.client.get_to_str(&url, None).await?; if let Some(err) = TardisError::new(resp.code, resp.body.as_ref().unwrap_or(&"".to_string())) { Err(err) } else { let result = TardisFuns::json.str_to_json(&resp.body.unwrap_or_else(|| "".to_string()))?; Ok(result["_source"].to_string()) } } /// Simple (global) search / 简单(全局)搜索 /// /// # Arguments /// /// * `index_name` - index name / 索引名称 /// * `q` - keyword / 搜索关键字 /// /// # Examples /// ```ignore /// use tardis::TardisFuns; /// TardisFuns::search().simple_search("test_index", "张三").await.unwrap(); /// ``` pub async fn simple_search(&self, index_name: &str, q: &str) -> TardisResult<Vec<String>> { let url = format!("{}/{}/_search?q={}", self.server_url, index_name, q); let resp = self.client.get_to_str(&url, None).await?; if let Some(err) = TardisError::new(resp.code, resp.body.as_ref().unwrap_or(&"".to_string())) { Err(err) } else { Self::parse_search_result(&resp.body.unwrap_or_else(|| "".to_string())) } } /// Specified fields search / 指定字段搜索 /// /// # Arguments /// /// * `index_name` - index name / 索引名称 /// * `q` - search fields / 搜索的字段集合 /// /// The format of the search field: key = field name , value = field value, exact match, key supports multi-level operations of Json. /// /// 搜索字段的格式: key = 字段名 , value = 字段值,精确匹配,key支持Json的多级操作. /// /// # Examples /// ```ignore /// use std::collections::HashMap; /// use tardis::TardisFuns; /// TardisFuns::search().multi_search(index_name, HashMap::from([("user.id", "1"), ("user.name", "李四")])).await.unwrap(); /// ``` pub async fn multi_search(&self, index_name: &str, q: HashMap<&str, &str>) -> TardisResult<Vec<String>> { let q = q.into_iter().map(|(k, v)| format!(r#"{{"match": {{"{}": "{}"}}}}"#, k, v)).collect::<Vec<String>>().join(","); let q = format!(r#"{{ "query": {{ "bool": {{ "must": [{}]}}}}}}"#, q); self.raw_search(index_name, &q).await } /// Search using native format / 使用原生格式搜索 /// /// # Arguments /// /// * `index_name` - index name / 索引名称 /// * `q` - native format / 原生格式 /// pub async fn raw_search(&self, index_name: &str, q: &str) -> TardisResult<Vec<String>> { let url = format!("{}/{}/_search", self.server_url, index_name); let resp = self.client.post_str_to_str(&url, q, None).await?; if let Some(err) = TardisError::new(resp.code, resp.body.as_ref().unwrap_or(&"".to_string())) { Err(err) } else { Self::parse_search_result(&resp.body.unwrap_or_else(|| "".to_string())) } } fn parse_search_result(result: &str) -> TardisResult<Vec<String>> { let json = TardisFuns::json.str_to_json(result)?; let json = json["hits"]["hits"] .as_array() .ok_or_else(|| TardisError::FormatError("[Tardis.SearchClient] [hit.hit] structure not found".to_string()))? .iter() .map(|x| x["_source"].to_string()) .collect(); Ok(json) } }
39.576531
154
0.578445
ab8e22c63bb439a6c40a4932117d51a8adaf592a
1,078
use crate::{ model::{JavaValue, RuntimeResult}, Classpath, InvokeType, JniEnv, }; #[allow(non_snake_case)] fn Java_java_security_AccessController_doPrivileged(env: &JniEnv) -> RuntimeResult<Option<JavaValue>> { let action = match env.parameters[0].as_object().unwrap() { Some(id) => id, None => return Err(env.throw_exception("java/lang/NullPointerException", None)), }; Ok(Some( env.invoke_instance_method( InvokeType::Virtual, action, env.get_class_id("java/security/PrivilegedAction")?, "run", "()Ljava/lang/Object;", &[], )? .unwrap(), )) } #[allow(non_snake_case)] fn Java_java_security_AccessController_getStackAccessControlContext(_: &JniEnv) -> RuntimeResult<Option<JavaValue>> { Ok(Some(JavaValue::Object(None))) } pub fn initialize(cp: &mut Classpath) { register_jni!( cp, Java_java_security_AccessController_doPrivileged, Java_java_security_AccessController_getStackAccessControlContext ); }
29.135135
117
0.650278
61448aa4f18353964a41cd7762901f7b21563759
5,828
#[allow(dead_code)] use aoc_runner_derive::{aoc, aoc_generator}; #[allow(unused_imports)] use std::cmp::Ordering; use std::collections::hash_map::DefaultHasher; use std::collections::{HashMap, HashSet, VecDeque}; use std::hash::Hash; use std::hash::Hasher; #[aoc_generator(day22)] fn parse_input(input: &str) -> (Deck, Deck) { let mut player1 = Deck::new(); let mut player2 = Deck::new(); let mut p1 = true; for line in input.lines() { if let Ok(n) = line.parse::<usize>() { match p1 { true => player1.push_back(n), false => player2.push_back(n), } } else if line == "Player 2:" { p1 = false; } } (player1, player2) } #[aoc(day22, part1)] fn part1((player1, player2): &(Deck, Deck)) -> usize { let mut player1 = player1.clone(); let mut player2 = player2.clone(); loop { if player1.len() == 0 || player2.len() == 0 { break; } let n1 = player1.pop_front().unwrap(); let n2 = player2.pop_front().unwrap(); if n1 > n2 { player1.push_back(n1); player1.push_back(n2); } else { player2.push_back(n2); player2.push_back(n1); } } if player1.len() != 0 { calculate_score(&mut player1) } else { calculate_score(&mut player2) } } type Cache = HashMap<u64, HashSet<u64>>; type Deck = VecDeque<usize>; #[aoc(day22, part2)] fn part2((player1, player2): &(Deck, Deck)) -> usize { let mut player1 = player1.clone(); let mut player2 = player2.clone(); let p1_win = play_game(&mut player1, &mut player2, 1); //println!("P1:{:?}", player1); //println!("P2:{:?}", player2); //println!("WINNER P1:{:?}", p1_win); if p1_win { calculate_score(&mut player1) } else { calculate_score(&mut player2) } } fn play_game(player1: &mut Deck, player2: &mut Deck, game: usize) -> bool { let mut played_rounds = Cache::new(); let mut round = 0; loop { if player1.len() == 0 || player2.len() == 0 { break; } round = round + 1; //println!("\n-- Round {:?} (Game {:?}) --", round, game); //println!("Player 1's deck:{:?}", player1); //println!("Player 2's deck:{:?}", player2); { let h1 = calculate_hash(&player1); let h2 = calculate_hash(&player2); let h2rounds = played_rounds.entry(h1).or_insert(HashSet::new()); if h2rounds.contains(&h2) { //println!("...already played ... player 1 wins"); return true; //player1 wins } else { h2rounds.insert(h2); } } //play let n1 = player1.pop_front().unwrap(); let n2 = player2.pop_front().unwrap(); //println!("Player 1's plays:{:?}", n1); //println!("Player 2's plays:{:?}", n2); #[allow(unused_assignments)] let mut won_by_player1 = false; if player1.len() < n1 || player2.len() < n2 { //Winner determined by highest number won_by_player1 = n1 > n2; } else { //Recursive Game let mut p1sub = player1.iter().take(n1).fold(VecDeque::new(), |mut acc, v| { acc.push_back(*v); acc }); let mut p2sub = player2.iter().take(n2).fold(VecDeque::new(), |mut acc, v| { acc.push_back(*v); acc }); //println!("------ going into sub game -------",); won_by_player1 = play_game(&mut p1sub, &mut p2sub, game + 1); //println!("------ leaving sub game -------",); } if won_by_player1 { player1.push_back(n1); player1.push_back(n2); //println!("Player 1 wins game {:?} round {:?}", game, round); } else { player2.push_back(n2); player2.push_back(n1); //println!("Player 2 wins game {:?} round {:?}", game, round); } } /*match player1.len() != 0 { true => //println!("The winner of game {:?} is player 1!", game), false => //println!("The winner of game {:?} is player 2!", game), }*/ //println!(""); player1.len() != 0 } fn calculate_score(player: &mut Deck) -> usize { let mut result = 0; while let Some(x) = player.pop_front() { result += x * (player.len() + 1); } result } fn calculate_hash<T: Hash>(t: &T) -> u64 { let mut s = DefaultHasher::new(); t.hash(&mut s); s.finish() } #[cfg(test)] mod tests { use super::*; #[test] fn test_part2() { let data = parse_input(&SAMPLE_DATA); assert_eq!(291, part2(&data)); } #[test] fn test_part2_stalemate() { let data = parse_input(&STALEMATE_DATA); assert_eq!(105, part2(&data)); } #[test] fn test_part1() { let data = parse_input(&SAMPLE_DATA); assert_eq!(306, part1(&data)); } #[test] fn test_parse_input() { let (player1, player2) = parse_input(&SAMPLE_DATA); assert_eq!(5, player1.len()); assert_eq!(player1, [9, 2, 6, 3, 1]); assert_eq!(5, player2.len()); assert_eq!(player2, [5, 8, 4, 7, 10]); } lazy_static! { static ref STALEMATE_DATA: String = ["Player 1:", "43", "19", "", "Player 2:", "2", "29", "14",].join("\n"); static ref SAMPLE_DATA: String = [ "Player 1", "9", "2", "6", "3", "1", "", "Player 2:", "5", "8", "4", "7", "10", ] .join("\n"); } }
27.620853
88
0.491935
9b1c9b4074154360b31b3f83f0375bf6a54106dc
46
pub mod options; pub mod rpc; pub mod server;
11.5
16
0.73913
f5fdea9897c243ff077376ced9636b76ff079770
8,596
//Copyright 2020 WHTCORPS INC // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. const TORUS_NUMBER_LIMIT: usize = 1024; const TORUS_SIZE_LIMIT_MB: u64 = 512; pub struct Checker { tori: Vec<Vec<u8>>, cur_torus_size: u64, each_torus_size: u64, policy: CheckPolicy, } impl Checker { fn new(each_torus_size: u64, policy: CheckPolicy) -> Checker { Checker { each_torus_size, cur_torus_size: 0, tori: vec![], policy, } } } impl<E> SplitChecker<E> for Checker where E: KvEngine, { fn on_kv(&mut self, _: &mut ObserverContext<'_>, entry: &KeyEntry) -> bool { if self.tori.is_empty() || self.cur_torus_size >= self.each_torus_size { self.tori.push(entry.key().to_vec()); self.cur_torus_size = 0; } self.cur_torus_size += entry.entry_size() as u64; false } fn split_keys(&mut self) -> Vec<Vec<u8>> { let mid = self.tori.len() / 2; if mid == 0 { vec![] } else { let data_key = self.tori.swap_remove(mid); let key = keys::origin_key(&data_key).to_vec(); vec![key] } } fn approximate_split_keys(&mut self, region: &Region, engine: &E) -> Result<Vec<Vec<u8>>> { let ks = box_try!(get_region_approximate_middle(engine, region) .map(|keys| keys.map_or(vec![], |key| vec![key]))); Ok(ks) } fn policy(&self) -> CheckPolicy { self.policy } } #[derive(Clone)] pub struct HalfCheckObserver; impl Coprocessor for HalfCheckObserver {} impl<E> SplitCheckObserver<E> for HalfCheckObserver where E: KvEngine, { fn add_checker( &self, _: &mut ObserverContext<'_>, host: &mut Host<'_, E>, _: &E, policy: CheckPolicy, ) { if host.auto_split() { return; } host.add_checker(Box::new(Checker::new( half_split_torus_size(host.cfg.region_max_size.0), policy, ))) } } fn half_split_torus_size(region_max_size: u64) -> u64 { let mut half_split_torus_size = region_max_size / TORUS_NUMBER_LIMIT as u64; let torus_size_limit = ReadableSize::mb(TORUS_SIZE_LIMIT_MB).0; if half_split_torus_size == 0 { half_split_torus_size = 1; } else if half_split_torus_size > torus_size_limit { half_split_torus_size = torus_size_limit; } half_split_torus_size } /// Get region approximate middle key based on default and write brane size. pub fn get_region_approximate_middle( db: &impl KvEngine, region: &Region, ) -> Result<Option<Vec<u8>>> { let start_key = keys::enc_start_key(region); let end_key = keys::enc_end_key(region); let range = Range::new(&start_key, &end_key); Ok(box_try!( db.get_range_approximate_middle(range, region.get_id()) )) } #[cfg(test)] fn get_region_approximate_middle_cf( db: &impl KvEngine, cfname: &str, region: &Region, ) -> Result<Option<Vec<u8>>> { let start_key = keys::enc_start_key(region); let end_key = keys::enc_end_key(region); let range = Range::new(&start_key, &end_key); Ok(box_try!(db.get_range_approximate_middle_cf( cfname, range, region.get_id() ))) } #[cfg(test)] mod tests { use std::iter; use std::sync::mpsc; use std::sync::Arc; use lmdb::raw::Writable; use lmdb::raw::{BraneOptions, DBOptions}; use lmdb::raw_util::{new_engine_opt, BRANEOptions}; use lmdb::Compat; use engine_traits::{ALL_BRANES, BRANE_DEFAULT, LARGE_BRANES}; use ekvproto::metapb::Peer; use ekvproto::metapb::Region; use ekvproto::pdpb::CheckPolicy; use tempfile::Builder; use crate::store::{SplitCheckRunner, SplitCheckTask}; use lmdb::properties::RangePropertiesCollectorFactory; use einsteindb_util::config::ReadableSize; use einsteindb_util::escape; use einsteindb_util::worker::Runnable; use txn_types::Key; use super::super::size::tests::must_split_at; use super::*; use crate::coprocessor::{Config, CoprocessorHost}; #[test] fn test_split_check() { let path = Builder::new().prefix("test-violetabftstore").tempdir().unwrap(); let path_str = path.path().to_str().unwrap(); let db_opts = DBOptions::new(); let cfs_opts = ALL_BRANES .iter() .map(|brane| { let mut cf_opts = BraneOptions::new(); let f = Box::new(RangePropertiesCollectorFactory::default()); cf_opts.add_table_properties_collector_factory("einsteindb.size-collector", f); BRANEOptions::new(brane, cf_opts) }) .collect(); let engine = Arc::new(new_engine_opt(path_str, db_opts, cfs_opts).unwrap()); let mut region = Region::default(); region.set_id(1); region.mut_peers().push(Peer::default()); region.mut_region_epoch().set_version(2); region.mut_region_epoch().set_conf_ver(5); let (tx, rx) = mpsc::sync_channel(100); let mut cfg = Config::default(); cfg.region_max_size = ReadableSize(TORUS_NUMBER_LIMIT as u64); let mut runnable = SplitCheckRunner::new( engine.c().clone(), tx.clone(), CoprocessorHost::new(tx), cfg, ); // so split key will be z0005 let cf_handle = engine.cf_handle(BRANE_DEFAULT).unwrap(); for i in 0..11 { let k = format!("{:04}", i).into_bytes(); let k = keys::data_key(Key::from_raw(&k).as_encoded()); engine.put_cf(cf_handle, &k, &k).unwrap(); // Flush for every key so that we can know the exact middle key. engine.flush_cf(cf_handle, true).unwrap(); } runnable.run(SplitCheckTask::split_check( region.clone(), false, CheckPolicy::Scan, )); let split_key = Key::from_raw(b"0005"); must_split_at(&rx, &region, vec![split_key.clone().into_encoded()]); runnable.run(SplitCheckTask::split_check( region.clone(), false, CheckPolicy::Approximate, )); must_split_at(&rx, &region, vec![split_key.into_encoded()]); } #[test] fn test_get_region_approximate_middle_cf() { let tmp = Builder::new() .prefix("test_violetabftstore_util") .tempdir() .unwrap(); let path = tmp.path().to_str().unwrap(); let db_opts = DBOptions::new(); let mut cf_opts = BraneOptions::new(); cf_opts.set_level_zero_file_num_compaction_trigger(10); let f = Box::new(RangePropertiesCollectorFactory::default()); cf_opts.add_table_properties_collector_factory("einsteindb.size-collector", f); let cfs_opts = LARGE_BRANES .iter() .map(|brane| BRANEOptions::new(brane, cf_opts.clone())) .collect(); let engine = Arc::new(lmdb::raw_util::new_engine_opt(path, db_opts, cfs_opts).unwrap()); let cf_handle = engine.cf_handle(BRANE_DEFAULT).unwrap(); let mut big_value = Vec::with_capacity(256); big_value.extend(iter::repeat(b'v').take(256)); for i in 0..100 { let k = format!("key_{:03}", i).into_bytes(); let k = keys::data_key(Key::from_raw(&k).as_encoded()); engine.put_cf(cf_handle, &k, &big_value).unwrap(); // Flush for every key so that we can know the exact middle key. engine.flush_cf(cf_handle, true).unwrap(); } let mut region = Region::default(); region.mut_peers().push(Peer::default()); let middle_key = get_region_approximate_middle_cf(engine.c(), BRANE_DEFAULT, &region) .unwrap() .unwrap(); let middle_key = Key::from_encoded_slice(keys::origin_key(&middle_key)) .into_raw() .unwrap(); assert_eq!(escape(&middle_key), "key_049"); } }
32.934866
95
0.608772
9b768d9f873db2258c880b1b357dcb6f23ec4ebb
53
#[derive(Default)] pub struct SystemActionManager {}
17.666667
33
0.773585
33f0b6af7ebe0c5f4d8426c5f41329ac7fb3b81d
4,397
/// A runtime module template with necessary imports /// Feel free to remove or edit this file as needed. /// If you change the name of this file, make sure to update its references in runtime/src/lib.rs /// If you remove this file, you can remove those references /// For more guidance on Substrate modules, see the example module /// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs use support::{decl_module, decl_storage, decl_event, dispatch::Result}; use system::ensure_signed; /// The module's configuration trait. pub trait Trait: system::Trait { // TODO: Add other types and constants required configure this module. /// The overarching event type. type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>; } // This module's storage items. decl_storage! { trait Store for Module<T: Trait> as TemplateModule { // Just a dummy storage item. // Here we are declaring a StorageValue, `Something` as a Option<u32> // `get(fn something)` is the default getter which returns either the stored `u32` or `None` if nothing stored Something get(fn something): Option<u32>; } } // The module's dispatchable functions. decl_module! { /// The module declaration. pub struct Module<T: Trait> for enum Call where origin: T::Origin { // Initializing events // this is needed only if you are using events in your module fn deposit_event() = default; // Just a dummy entry point. // function that can be called by the external world as an extrinsics call // takes a parameter of the type `AccountId`, stores it and emits an event pub fn do_something(origin, something: u32) -> Result { // TODO: You only need this if you want to check it was signed. let who = ensure_signed(origin)?; // TODO: Code to execute when something calls this. // For example: the following line stores the passed in u32 in the storage Something::put(something); // here we are raising the Something event Self::deposit_event(RawEvent::SomethingStored(something, who)); Ok(()) } } } decl_event!( pub enum Event<T> where AccountId = <T as system::Trait>::AccountId { // Just a dummy event. // Event `Something` is declared with a parameter of the type `u32` and `AccountId` // To emit this event, we call the deposit funtion, from our runtime funtions SomethingStored(u32, AccountId), } ); /// tests for this module #[cfg(test)] mod tests { use super::*; use primitives::H256; use support::{impl_outer_origin, assert_ok, parameter_types, weights::Weight}; use sr_primitives::{ traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, }; impl_outer_origin! { pub enum Origin for Test {} } // For testing the module, we construct most of a mock runtime. This means // first constructing a configuration type (`Test`) which `impl`s each of the // configuration traits of modules we want to use. #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } impl system::Trait for Test { type Origin = Origin; type Call = (); type Index = u64; type BlockNumber = u64; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup<Self::AccountId>; type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; type MaximumBlockWeight = MaximumBlockWeight; type MaximumBlockLength = MaximumBlockLength; type AvailableBlockRatio = AvailableBlockRatio; type Version = (); } impl Trait for Test { type Event = (); } type TemplateModule = Module<Test>; // This function basically just builds a genesis storage key/value store according to // our desired mockup. fn new_test_ext() -> runtime_io::TestExternalities { system::GenesisConfig::default().build_storage::<Test>().unwrap().into() } #[test] fn it_works_for_default_value() { new_test_ext().execute_with(|| { // Just a dummy test for the dummy funtion `do_something` // calling the `do_something` function with a value 42 assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); // asserting that the stored value is equal to what we stored assert_eq!(TemplateModule::something(), Some(42)); }); } }
33.310606
112
0.718444
b94e39ba4cbe0445211fa09caa9975e17d282454
68,334
#[macro_use] extern crate lazy_static; mod client; use { crate::client::*, clap::{ crate_description, crate_name, crate_version, value_t, value_t_or_exit, App, AppSettings, Arg, ArgGroup, SubCommand, }, solana_clap_utils::{ input_parsers::{keypair_of, pubkey_of}, input_validators::{ is_amount, is_keypair, is_keypair_or_ask_keyword, is_parsable, is_pubkey, is_url, }, keypair::signer_from_path, }, solana_client::rpc_client::RpcClient, solana_program::{ borsh::{get_instance_packed_len, get_packed_len}, instruction::Instruction, program_pack::Pack, pubkey::Pubkey, }, solana_sdk::{ commitment_config::CommitmentConfig, native_token::{self, Sol}, signature::{Keypair, Signer}, system_instruction, transaction::Transaction, }, spl_associated_token_account::{create_associated_token_account, get_associated_token_address}, spl_stake_pool::{ self, find_stake_program_address, find_withdraw_authority_program_address, instruction::PreferredValidatorType, stake_program::{self, StakeState}, state::{Fee, StakePool, ValidatorList}, }, std::process::exit, }; struct Config { rpc_client: RpcClient, verbose: bool, manager: Box<dyn Signer>, staker: Box<dyn Signer>, depositor: Option<Box<dyn Signer>>, token_owner: Box<dyn Signer>, fee_payer: Box<dyn Signer>, dry_run: bool, no_update: bool, } type Error = Box<dyn std::error::Error>; type CommandResult = Result<(), Error>; const STAKE_STATE_LEN: usize = 200; lazy_static! { static ref MIN_STAKE_BALANCE: u64 = native_token::sol_to_lamports(1.0); } macro_rules! unique_signers { ($vec:ident) => { $vec.sort_by_key(|l| l.pubkey()); $vec.dedup(); }; } fn check_fee_payer_balance(config: &Config, required_balance: u64) -> Result<(), Error> { let balance = config.rpc_client.get_balance(&config.fee_payer.pubkey())?; if balance < required_balance { Err(format!( "Fee payer, {}, has insufficient balance: {} required, {} available", config.fee_payer.pubkey(), Sol(required_balance), Sol(balance) ) .into()) } else { Ok(()) } } fn send_transaction( config: &Config, transaction: Transaction, ) -> solana_client::client_error::Result<()> { if config.dry_run { let result = config.rpc_client.simulate_transaction(&transaction)?; println!("Simulate result: {:?}", result); } else { let signature = config .rpc_client .send_and_confirm_transaction_with_spinner(&transaction)?; println!("Signature: {}", signature); } Ok(()) } fn command_create_pool( config: &Config, deposit_authority: Option<Pubkey>, fee: Fee, max_validators: u32, stake_pool_keypair: Option<Keypair>, mint_keypair: Option<Keypair>, ) -> CommandResult { let reserve_stake = Keypair::new(); println!("Creating reserve stake {}", reserve_stake.pubkey()); let mint_keypair = mint_keypair.unwrap_or_else(Keypair::new); println!("Creating mint {}", mint_keypair.pubkey()); let pool_fee_account = Keypair::new(); println!( "Creating pool fee collection account {}", pool_fee_account.pubkey() ); let stake_pool_keypair = stake_pool_keypair.unwrap_or_else(Keypair::new); let validator_list = Keypair::new(); let reserve_stake_balance = config .rpc_client .get_minimum_balance_for_rent_exemption(STAKE_STATE_LEN)? + 1; let mint_account_balance = config .rpc_client .get_minimum_balance_for_rent_exemption(spl_token::state::Mint::LEN)?; let pool_fee_account_balance = config .rpc_client .get_minimum_balance_for_rent_exemption(spl_token::state::Account::LEN)?; let stake_pool_account_lamports = config .rpc_client .get_minimum_balance_for_rent_exemption(get_packed_len::<StakePool>())?; let empty_validator_list = ValidatorList::new(max_validators); let validator_list_size = get_instance_packed_len(&empty_validator_list)?; let validator_list_balance = config .rpc_client .get_minimum_balance_for_rent_exemption(validator_list_size)?; let total_rent_free_balances = reserve_stake_balance + mint_account_balance + pool_fee_account_balance + stake_pool_account_lamports + validator_list_balance; let default_decimals = spl_token::native_mint::DECIMALS; // Calculate withdraw authority used for minting pool tokens let (withdraw_authority, _) = find_withdraw_authority_program_address( &spl_stake_pool::id(), &stake_pool_keypair.pubkey(), ); if config.verbose { println!("Stake pool withdraw authority {}", withdraw_authority); } let mut setup_transaction = Transaction::new_with_payer( &[ // Account for the stake pool reserve system_instruction::create_account( &config.fee_payer.pubkey(), &reserve_stake.pubkey(), reserve_stake_balance, STAKE_STATE_LEN as u64, &stake_program::id(), ), stake_program::initialize( &reserve_stake.pubkey(), &stake_program::Authorized { staker: withdraw_authority, withdrawer: withdraw_authority, }, &stake_program::Lockup::default(), ), // Account for the stake pool mint system_instruction::create_account( &config.fee_payer.pubkey(), &mint_keypair.pubkey(), mint_account_balance, spl_token::state::Mint::LEN as u64, &spl_token::id(), ), // Account for the pool fee accumulation system_instruction::create_account( &config.fee_payer.pubkey(), &pool_fee_account.pubkey(), pool_fee_account_balance, spl_token::state::Account::LEN as u64, &spl_token::id(), ), // Initialize pool token mint account spl_token::instruction::initialize_mint( &spl_token::id(), &mint_keypair.pubkey(), &withdraw_authority, None, default_decimals, )?, // Initialize fee receiver account spl_token::instruction::initialize_account( &spl_token::id(), &pool_fee_account.pubkey(), &mint_keypair.pubkey(), &config.manager.pubkey(), )?, ], Some(&config.fee_payer.pubkey()), ); let mut initialize_transaction = Transaction::new_with_payer( &[ // Validator stake account list storage system_instruction::create_account( &config.fee_payer.pubkey(), &validator_list.pubkey(), validator_list_balance, validator_list_size as u64, &spl_stake_pool::id(), ), // Account for the stake pool system_instruction::create_account( &config.fee_payer.pubkey(), &stake_pool_keypair.pubkey(), stake_pool_account_lamports, get_packed_len::<StakePool>() as u64, &spl_stake_pool::id(), ), // Initialize stake pool spl_stake_pool::instruction::initialize( &spl_stake_pool::id(), &stake_pool_keypair.pubkey(), &config.manager.pubkey(), &config.staker.pubkey(), &validator_list.pubkey(), &reserve_stake.pubkey(), &mint_keypair.pubkey(), &pool_fee_account.pubkey(), &spl_token::id(), deposit_authority, fee, max_validators, ), ], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance( config, total_rent_free_balances + fee_calculator.calculate_fee(&setup_transaction.message()) + fee_calculator.calculate_fee(&initialize_transaction.message()), )?; let mut setup_signers = vec![ config.fee_payer.as_ref(), &mint_keypair, &pool_fee_account, &reserve_stake, ]; unique_signers!(setup_signers); setup_transaction.sign(&setup_signers, recent_blockhash); send_transaction(&config, setup_transaction)?; println!("Creating stake pool {}", stake_pool_keypair.pubkey()); let mut initialize_signers = vec![ config.fee_payer.as_ref(), &stake_pool_keypair, &validator_list, config.manager.as_ref(), ]; unique_signers!(initialize_signers); initialize_transaction.sign(&initialize_signers, recent_blockhash); send_transaction(&config, initialize_transaction)?; Ok(()) } fn command_vsa_create( config: &Config, stake_pool_address: &Pubkey, vote_account: &Pubkey, ) -> CommandResult { println!("Creating stake account on {}", vote_account); let mut transaction = Transaction::new_with_payer( &[ // Create new validator stake account address spl_stake_pool::instruction::create_validator_stake_account_with_vote( &spl_stake_pool::id(), &stake_pool_address, &config.staker.pubkey(), &config.fee_payer.pubkey(), &vote_account, ), ], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; transaction.sign( &[config.fee_payer.as_ref(), config.staker.as_ref()], recent_blockhash, ); send_transaction(&config, transaction)?; Ok(()) } fn command_vsa_add( config: &Config, stake_pool_address: &Pubkey, vote_account: &Pubkey, ) -> CommandResult { let (stake_account_address, _) = find_stake_program_address(&spl_stake_pool::id(), vote_account, stake_pool_address); let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let validator_list = get_validator_list(&config.rpc_client, &stake_pool.validator_list)?; if validator_list.contains(vote_account) { println!( "Stake pool already contains validator {}, ignoring", vote_account ); return Ok(()); } let stake_state = get_stake_state(&config.rpc_client, &stake_account_address)?; if let stake_program::StakeState::Stake(meta, _stake) = stake_state { if meta.authorized.withdrawer != config.staker.pubkey() { let error = format!( "Stake account withdraw authority must be the staker {}, actual {}", config.staker.pubkey(), meta.authorized.withdrawer ); return Err(error.into()); } } else { return Err("Stake account is not active.".into()); } if !config.no_update { command_update(config, stake_pool_address, false, false)?; } let instruction = spl_stake_pool::instruction::add_validator_to_pool_with_vote( &spl_stake_pool::id(), &stake_pool, &stake_pool_address, &vote_account, ); let mut transaction = Transaction::new_with_payer(&[instruction], Some(&config.fee_payer.pubkey())); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; let mut signers = vec![config.fee_payer.as_ref(), config.staker.as_ref()]; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn command_vsa_remove( config: &Config, stake_pool_address: &Pubkey, vote_account: &Pubkey, new_authority: &Option<Pubkey>, ) -> CommandResult { if !config.no_update { command_update(config, stake_pool_address, false, false)?; } let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let staker_pubkey = config.staker.pubkey(); let new_authority = new_authority.as_ref().unwrap_or(&staker_pubkey); let mut transaction = Transaction::new_with_payer( &[ // Create new validator stake account address spl_stake_pool::instruction::remove_validator_from_pool_with_vote( &spl_stake_pool::id(), &stake_pool, stake_pool_address, vote_account, new_authority, ), ], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; transaction.sign( &[config.fee_payer.as_ref(), config.staker.as_ref()], recent_blockhash, ); send_transaction(&config, transaction)?; Ok(()) } fn command_increase_validator_stake( config: &Config, stake_pool_address: &Pubkey, vote_account: &Pubkey, amount: f64, ) -> CommandResult { let lamports = native_token::sol_to_lamports(amount); if !config.no_update { command_update(config, stake_pool_address, false, false)?; } let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let instruction = spl_stake_pool::instruction::increase_validator_stake_with_vote( &spl_stake_pool::id(), &stake_pool, stake_pool_address, vote_account, lamports, ); let mut transaction = Transaction::new_with_payer(&[instruction], Some(&config.fee_payer.pubkey())); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; transaction.sign( &[config.fee_payer.as_ref(), config.staker.as_ref()], recent_blockhash, ); send_transaction(&config, transaction)?; Ok(()) } fn command_decrease_validator_stake( config: &Config, stake_pool_address: &Pubkey, vote_account: &Pubkey, amount: f64, ) -> CommandResult { let lamports = native_token::sol_to_lamports(amount); if !config.no_update { command_update(config, stake_pool_address, false, false)?; } let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let instruction = spl_stake_pool::instruction::decrease_validator_stake_with_vote( &spl_stake_pool::id(), &stake_pool, stake_pool_address, vote_account, lamports, ); let mut transaction = Transaction::new_with_payer(&[instruction], Some(&config.fee_payer.pubkey())); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; transaction.sign( &[config.fee_payer.as_ref(), config.staker.as_ref()], recent_blockhash, ); send_transaction(&config, transaction)?; Ok(()) } fn command_set_preferred_validator( config: &Config, stake_pool_address: &Pubkey, preferred_type: PreferredValidatorType, vote_address: Option<Pubkey>, ) -> CommandResult { let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let mut transaction = Transaction::new_with_payer( &[spl_stake_pool::instruction::set_preferred_validator( &spl_stake_pool::id(), &stake_pool_address, &config.staker.pubkey(), &stake_pool.validator_list, preferred_type, vote_address, )], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; let mut signers = vec![config.fee_payer.as_ref(), config.staker.as_ref()]; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn add_associated_token_account( config: &Config, mint: &Pubkey, instructions: &mut Vec<Instruction>, rent_free_balances: &mut u64, ) -> Pubkey { // Account for tokens not specified, creating one let account = get_associated_token_address(&config.fee_payer.pubkey(), mint); if get_token_account(&config.rpc_client, &account, mint).is_err() { println!("Creating account to receive tokens {}", account); let min_account_balance = config .rpc_client .get_minimum_balance_for_rent_exemption(spl_token::state::Account::LEN) .unwrap(); instructions.push(create_associated_token_account( &config.fee_payer.pubkey(), &config.fee_payer.pubkey(), mint, )); *rent_free_balances += min_account_balance; } account } fn command_deposit( config: &Config, stake_pool_address: &Pubkey, stake: &Pubkey, token_receiver: &Option<Pubkey>, ) -> CommandResult { if !config.no_update { command_update(config, stake_pool_address, false, false)?; } let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let stake_state = get_stake_state(&config.rpc_client, &stake)?; if config.verbose { println!("Depositing stake account {:?}", stake_state); } let vote_account = match stake_state { StakeState::Stake(_, stake) => Ok(stake.delegation.voter_pubkey), _ => Err("Wrong stake account state, must be delegated to validator"), }?; // Check if this vote account has staking account in the pool let validator_list = get_validator_list(&config.rpc_client, &stake_pool.validator_list)?; if !validator_list.contains(&vote_account) { return Err("Stake account for this validator does not exist in the pool.".into()); } // Calculate validator stake account address linked to the pool let (validator_stake_account, _) = find_stake_program_address(&spl_stake_pool::id(), &vote_account, stake_pool_address); let validator_stake_state = get_stake_state(&config.rpc_client, &validator_stake_account)?; println!("Depositing into stake account {}", validator_stake_account); if config.verbose { println!("{:?}", validator_stake_state); } let mut instructions: Vec<Instruction> = vec![]; let mut signers = vec![config.fee_payer.as_ref(), config.staker.as_ref()]; let mut total_rent_free_balances: u64 = 0; // Create token account if not specified let token_receiver = token_receiver.unwrap_or(add_associated_token_account( &config, &stake_pool.pool_mint, &mut instructions, &mut total_rent_free_balances, )); let pool_withdraw_authority = find_withdraw_authority_program_address(&spl_stake_pool::id(), stake_pool_address).0; let mut deposit_instructions = if let Some(deposit_authority) = config.depositor.as_ref() { signers.push(deposit_authority.as_ref()); if deposit_authority.pubkey() != stake_pool.deposit_authority { let error = format!( "Invalid deposit authority specified, expected {}, received {}", stake_pool.deposit_authority, deposit_authority.pubkey() ); return Err(error.into()); } spl_stake_pool::instruction::deposit_with_authority( &spl_stake_pool::id(), &stake_pool_address, &stake_pool.validator_list, &deposit_authority.pubkey(), &pool_withdraw_authority, &stake, &config.staker.pubkey(), &validator_stake_account, &token_receiver, &stake_pool.pool_mint, &spl_token::id(), ) } else { spl_stake_pool::instruction::deposit( &spl_stake_pool::id(), &stake_pool_address, &stake_pool.validator_list, &pool_withdraw_authority, &stake, &config.staker.pubkey(), &validator_stake_account, &token_receiver, &stake_pool.pool_mint, &spl_token::id(), ) }; instructions.append(&mut deposit_instructions); let mut transaction = Transaction::new_with_payer(&instructions, Some(&config.fee_payer.pubkey())); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance( config, total_rent_free_balances + fee_calculator.calculate_fee(&transaction.message()), )?; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn command_list(config: &Config, stake_pool_address: &Pubkey) -> CommandResult { let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let validator_list = get_validator_list(&config.rpc_client, &stake_pool.validator_list)?; let pool_mint = get_token_mint(&config.rpc_client, &stake_pool.pool_mint)?; let epoch_info = config.rpc_client.get_epoch_info()?; for validator in validator_list.validators { println!( "Validator Vote Account: {}\tBalance: {}\tLast Update Epoch: {}{}", validator.vote_account_address, Sol(validator.stake_lamports()), validator.last_update_epoch, if validator.last_update_epoch != epoch_info.epoch { " [UPDATE REQUIRED]" } else { "" } ); } println!( "Total Pool Stake: {}{}", Sol(stake_pool.total_stake_lamports), if stake_pool.last_update_epoch != epoch_info.epoch { " [UPDATE REQUIRED]" } else { "" } ); println!( "Total Pool Tokens: {}", spl_token::amount_to_ui_amount(stake_pool.pool_token_supply, pool_mint.decimals) ); if config.verbose { println!(); let pool_withdraw_authority = find_withdraw_authority_program_address(&spl_stake_pool::id(), stake_pool_address).0; let accounts = get_stake_accounts_by_withdraw_authority(&config.rpc_client, &pool_withdraw_authority)?; if accounts.is_empty() { return Err(format!("No stake accounts found for {}", pool_withdraw_authority).into()); } let mut total_stake_lamports: u64 = 0; for (pubkey, stake_lamports, stake_state) in accounts { total_stake_lamports += stake_lamports; println!( "Stake Account: {}\tVote Account: {}\t{}", pubkey, stake_state.delegation().expect("delegation").voter_pubkey, Sol(stake_lamports) ); } println!("Total Stake Account Balance: {}", Sol(total_stake_lamports)); if pool_mint.supply != stake_pool.pool_token_supply { println!( "BUG! Pool Tokens supply mismatch. Pool mint reports {}", spl_token::amount_to_ui_amount(pool_mint.supply, pool_mint.decimals) ); } } Ok(()) } fn command_update( config: &Config, stake_pool_address: &Pubkey, force: bool, no_merge: bool, ) -> CommandResult { let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let epoch_info = config.rpc_client.get_epoch_info()?; if stake_pool.last_update_epoch == epoch_info.epoch { if force { println!("Update not required, but --force flag specified, so doing it anyway"); } else { println!("Update not required"); return Ok(()); } } let validator_list = get_validator_list(&config.rpc_client, &stake_pool.validator_list)?; let instructions = spl_stake_pool::instruction::update_stake_pool( &spl_stake_pool::id(), &stake_pool, &validator_list, stake_pool_address, no_merge, ); // TODO: A faster solution would be to send all the `update_validator_list_balance` instructions concurrently for instruction in instructions { let mut transaction = Transaction::new_with_payer(&[instruction], Some(&config.fee_payer.pubkey())); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; transaction.sign(&[config.fee_payer.as_ref()], recent_blockhash); send_transaction(&config, transaction)?; } Ok(()) } #[derive(PartialEq, Debug)] struct WithdrawAccount { address: Pubkey, pool_amount: u64, } fn prepare_withdraw_accounts( rpc_client: &RpcClient, stake_pool: &StakePool, pool_withdraw_authority: &Pubkey, pool_amount: u64, ) -> Result<Vec<WithdrawAccount>, Error> { let mut accounts = get_stake_accounts_by_withdraw_authority(rpc_client, &pool_withdraw_authority)?; if accounts.is_empty() { return Err("No accounts found.".to_string().into()); } let min_balance = rpc_client.get_minimum_balance_for_rent_exemption(STAKE_STATE_LEN)? + 1; let pool_mint = get_token_mint(rpc_client, &stake_pool.pool_mint)?; // Sort from highest to lowest balance accounts.sort_by(|a, b| b.1.cmp(&a.1)); // Prepare the list of accounts to withdraw from let mut withdraw_from: Vec<WithdrawAccount> = vec![]; let mut remaining_amount = pool_amount; // Go through available accounts and withdraw from largest to smallest for (address, lamports, _) in accounts { if lamports <= min_balance { continue; } let available_for_withdrawal = stake_pool .calc_lamports_withdraw_amount(lamports - *MIN_STAKE_BALANCE) .unwrap(); let pool_amount = u64::min(available_for_withdrawal, remaining_amount); // Those accounts will be withdrawn completely with `claim` instruction withdraw_from.push(WithdrawAccount { address, pool_amount, }); remaining_amount -= pool_amount; if remaining_amount == 0 { break; } } // Not enough stake to withdraw the specified amount if remaining_amount > 0 { return Err(format!( "No stake accounts found in this pool with enough balance to withdraw {} pool tokens.", spl_token::amount_to_ui_amount(pool_amount, pool_mint.decimals) ) .into()); } Ok(withdraw_from) } fn command_withdraw( config: &Config, stake_pool_address: &Pubkey, use_reserve: bool, vote_account_address: &Option<Pubkey>, stake_receiver_param: &Option<Pubkey>, pool_token_account: &Option<Pubkey>, pool_amount: f64, ) -> CommandResult { if !config.no_update { command_update(config, stake_pool_address, false, false)?; } let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; let pool_mint = get_token_mint(&config.rpc_client, &stake_pool.pool_mint)?; let pool_amount = spl_token::ui_amount_to_amount(pool_amount, pool_mint.decimals); let pool_withdraw_authority = find_withdraw_authority_program_address(&spl_stake_pool::id(), stake_pool_address).0; let pool_token_account = pool_token_account.unwrap_or(get_associated_token_address( &config.fee_payer.pubkey(), &stake_pool.pool_mint, )); let token_account = get_token_account( &config.rpc_client, &pool_token_account, &stake_pool.pool_mint, )?; // Check withdraw_from balance if token_account.amount < pool_amount { return Err(format!( "Not enough token balance to withdraw {} pool tokens.\nMaximum withdraw amount is {} pool tokens.", spl_token::amount_to_ui_amount(pool_amount, pool_mint.decimals), spl_token::amount_to_ui_amount(token_account.amount, pool_mint.decimals) ) .into()); } let withdraw_accounts = if use_reserve { vec![WithdrawAccount { address: stake_pool.reserve_stake, pool_amount, }] } else if let Some(vote_account_address) = vote_account_address { let (stake_account_address, _) = find_stake_program_address( &spl_stake_pool::id(), &vote_account_address, stake_pool_address, ); let stake_account = config.rpc_client.get_account(&stake_account_address)?; let available_for_withdrawal = stake_pool .calc_lamports_withdraw_amount(stake_account.lamports - *MIN_STAKE_BALANCE) .unwrap(); if available_for_withdrawal < pool_amount { return Err(format!( "Not enough lamports available for withdrawal from {}, {} asked, {} available", stake_account_address, pool_amount, available_for_withdrawal ) .into()); } vec![WithdrawAccount { address: stake_account_address, pool_amount, }] } else { // Get the list of accounts to withdraw from prepare_withdraw_accounts( &config.rpc_client, &stake_pool, &pool_withdraw_authority, pool_amount, )? }; // Construct transaction to withdraw from withdraw_accounts account list let mut instructions: Vec<Instruction> = vec![]; let user_transfer_authority = Keypair::new(); // ephemeral keypair just to do the transfer let mut signers = vec![ config.fee_payer.as_ref(), config.token_owner.as_ref(), &user_transfer_authority, ]; let stake_receiver_account = Keypair::new(); // Will be added to signers if creating new account instructions.push( // Approve spending token spl_token::instruction::approve( &spl_token::id(), &pool_token_account, &user_transfer_authority.pubkey(), &config.token_owner.pubkey(), &[], pool_amount, )?, ); // Use separate mutable variable because withdraw might create a new account let mut stake_receiver: Option<Pubkey> = *stake_receiver_param; let mut total_rent_free_balances = 0; // Go through prepared accounts and withdraw/claim them for withdraw_account in withdraw_accounts { // Convert pool tokens amount to lamports let sol_withdraw_amount = stake_pool .calc_lamports_withdraw_amount(withdraw_account.pool_amount) .unwrap(); println!( "Withdrawing from account {}, amount {}, {} pool tokens", withdraw_account.address, Sol(sol_withdraw_amount), spl_token::amount_to_ui_amount(withdraw_account.pool_amount, pool_mint.decimals), ); if stake_receiver.is_none() { // Account for tokens not specified, creating one println!( "Creating account to receive stake {}", stake_receiver_account.pubkey() ); let stake_receiver_account_balance = config .rpc_client .get_minimum_balance_for_rent_exemption(STAKE_STATE_LEN)?; instructions.push( // Creating new account system_instruction::create_account( &config.fee_payer.pubkey(), &stake_receiver_account.pubkey(), stake_receiver_account_balance, STAKE_STATE_LEN as u64, &stake_program::id(), ), ); signers.push(&stake_receiver_account); total_rent_free_balances += stake_receiver_account_balance; stake_receiver = Some(stake_receiver_account.pubkey()); } instructions.push(spl_stake_pool::instruction::withdraw( &spl_stake_pool::id(), &stake_pool_address, &stake_pool.validator_list, &pool_withdraw_authority, &withdraw_account.address, &stake_receiver.unwrap(), // Cannot be none at this point &config.staker.pubkey(), &user_transfer_authority.pubkey(), &pool_token_account, &stake_pool.pool_mint, &spl_token::id(), withdraw_account.pool_amount, )); } let mut transaction = Transaction::new_with_payer(&instructions, Some(&config.fee_payer.pubkey())); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance( config, total_rent_free_balances + fee_calculator.calculate_fee(&transaction.message()), )?; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn command_set_manager( config: &Config, stake_pool_address: &Pubkey, new_manager: &Option<Pubkey>, new_fee_receiver: &Option<Pubkey>, ) -> CommandResult { let stake_pool = get_stake_pool(&config.rpc_client, stake_pool_address)?; // If new accounts are missing in the arguments use the old ones let new_manager = match new_manager { None => stake_pool.manager, Some(value) => *value, }; let new_fee_receiver = match new_fee_receiver { None => stake_pool.manager_fee_account, Some(value) => { // Check for fee receiver being a valid token account and have to same mint as the stake pool let token_account = get_token_account(&config.rpc_client, value, &stake_pool.pool_mint)?; if token_account.mint != stake_pool.pool_mint { return Err("Fee receiver account belongs to a different mint" .to_string() .into()); } *value } }; let mut transaction = Transaction::new_with_payer( &[spl_stake_pool::instruction::set_manager( &spl_stake_pool::id(), &stake_pool_address, &config.manager.pubkey(), &new_manager, &new_fee_receiver, )], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; let mut signers = vec![config.fee_payer.as_ref(), config.manager.as_ref()]; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn command_set_staker( config: &Config, stake_pool_address: &Pubkey, new_staker: &Pubkey, ) -> CommandResult { let mut transaction = Transaction::new_with_payer( &[spl_stake_pool::instruction::set_staker( &spl_stake_pool::id(), &stake_pool_address, &config.manager.pubkey(), &new_staker, )], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; let mut signers = vec![config.fee_payer.as_ref(), config.manager.as_ref()]; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn command_set_fee(config: &Config, stake_pool_address: &Pubkey, new_fee: Fee) -> CommandResult { let mut transaction = Transaction::new_with_payer( &[spl_stake_pool::instruction::set_fee( &spl_stake_pool::id(), &stake_pool_address, &config.manager.pubkey(), new_fee, )], Some(&config.fee_payer.pubkey()), ); let (recent_blockhash, fee_calculator) = config.rpc_client.get_recent_blockhash()?; check_fee_payer_balance(config, fee_calculator.calculate_fee(&transaction.message()))?; let mut signers = vec![config.fee_payer.as_ref(), config.manager.as_ref()]; unique_signers!(signers); transaction.sign(&signers, recent_blockhash); send_transaction(&config, transaction)?; Ok(()) } fn main() { solana_logger::setup_with_default("solana=info"); let matches = App::new(crate_name!()) .about(crate_description!()) .version(crate_version!()) .setting(AppSettings::SubcommandRequiredElseHelp) .arg({ let arg = Arg::with_name("config_file") .short("C") .long("config") .value_name("PATH") .takes_value(true) .global(true) .help("Configuration file to use"); if let Some(ref config_file) = *solana_cli_config::CONFIG_FILE { arg.default_value(&config_file) } else { arg } }) .arg( Arg::with_name("verbose") .long("verbose") .short("v") .takes_value(false) .global(true) .help("Show additional information"), ) .arg( Arg::with_name("dry_run") .long("dry-run") .takes_value(false) .global(true) .help("Simulate transaction instead of executing"), ) .arg( Arg::with_name("no_update") .long("no-update") .takes_value(false) .global(true) .help("Do not automatically update the stake pool if needed"), ) .arg( Arg::with_name("json_rpc_url") .long("url") .value_name("URL") .takes_value(true) .validator(is_url) .help("JSON RPC URL for the cluster. Default from the configuration file."), ) .arg( Arg::with_name("staker") .long("staker") .value_name("KEYPAIR") .validator(is_keypair) .takes_value(true) .help( "Specify the stake pool staker. \ This may be a keypair file, the ASK keyword. \ Defaults to the client keypair.", ), ) .arg( Arg::with_name("manager") .long("manager") .value_name("KEYPAIR") .validator(is_keypair) .takes_value(true) .help( "Specify the stake pool manager. \ This may be a keypair file, the ASK keyword. \ Defaults to the client keypair.", ), ) .arg( Arg::with_name("depositor") .long("depositor") .value_name("KEYPAIR") .validator(is_keypair) .takes_value(true) .help( "Specify the stake pool depositor. \ This may be a keypair file, the ASK keyword.", ), ) .arg( Arg::with_name("token_owner") .long("token-owner") .value_name("KEYPAIR") .validator(is_keypair) .takes_value(true) .help( "Specify the owner of the pool token account. \ This may be a keypair file, the ASK keyword. \ Defaults to the client keypair.", ), ) .arg( Arg::with_name("fee_payer") .long("fee-payer") .value_name("KEYPAIR") .validator(is_keypair) .takes_value(true) .help( "Specify the fee-payer account. \ This may be a keypair file, the ASK keyword. \ Defaults to the client keypair.", ), ) .subcommand(SubCommand::with_name("create-pool") .about("Create a new stake pool") .arg( Arg::with_name("fee_numerator") .long("fee-numerator") .short("n") .validator(is_parsable::<u64>) .value_name("NUMERATOR") .takes_value(true) .required(true) .help("Fee numerator, fee amount is numerator divided by denominator."), ) .arg( Arg::with_name("fee_denominator") .long("fee-denominator") .short("d") .validator(is_parsable::<u64>) .value_name("DENOMINATOR") .takes_value(true) .required(true) .help("Fee denominator, fee amount is numerator divided by denominator."), ) .arg( Arg::with_name("max_validators") .long("max-validators") .short("m") .validator(is_parsable::<u32>) .value_name("NUMBER") .takes_value(true) .required(true) .help("Max number of validators included in the stake pool"), ) .arg( Arg::with_name("deposit_authority") .long("deposit-authority") .short("a") .validator(is_pubkey) .value_name("DEPOSIT_AUTHORITY_ADDRESS") .takes_value(true) .help("Deposit authority required to sign all deposits into the stake pool"), ) .arg( Arg::with_name("pool_keypair") .long("pool-keypair") .short("p") .validator(is_keypair_or_ask_keyword) .value_name("PATH") .takes_value(true) .help("Stake pool keypair [default: new keypair]"), ) .arg( Arg::with_name("mint_keypair") .long("mint-keypair") .validator(is_keypair_or_ask_keyword) .value_name("PATH") .takes_value(true) .help("Stake pool mint keypair [default: new keypair]"), ) ) .subcommand(SubCommand::with_name("create-validator-stake") .about("Create a new stake account to use with the pool. Must be signed by the pool staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("vote_account") .index(2) .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("The validator vote account that this stake will be delegated to"), ) ) .subcommand(SubCommand::with_name("add-validator") .about("Add validator account to the stake pool. Must be signed by the pool staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("vote_account") .index(2) .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("The validator vote account that the stake is delegated to"), ) ) .subcommand(SubCommand::with_name("remove-validator") .about("Remove validator account from the stake pool. Must be signed by the pool staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("vote_account") .index(2) .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("Vote account for the validator to remove from the pool"), ) .arg( Arg::with_name("new_authority") .long("new-authority") .validator(is_pubkey) .value_name("ADDRESS") .takes_value(true) .help("New authority to set as Staker and Withdrawer in the stake account removed from the pool. Defaults to the wallet owner pubkey."), ) ) .subcommand(SubCommand::with_name("increase-validator-stake") .about("Increase stake to a validator, drawing from the stake pool reserve. Must be signed by the pool staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("vote_account") .index(2) .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("Vote account for the validator to increase stake to"), ) .arg( Arg::with_name("amount") .index(3) .validator(is_amount) .value_name("AMOUNT") .takes_value(true) .help("Amount in SOL to add to the validator stake account. Must be at least the rent-exempt amount for a stake plus 1 SOL for merging."), ) ) .subcommand(SubCommand::with_name("decrease-validator-stake") .about("Decrease stake to a validator, splitting from the active stake. Must be signed by the pool staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("vote_account") .index(2) .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("Vote account for the validator to decrease stake from"), ) .arg( Arg::with_name("amount") .index(3) .validator(is_amount) .value_name("AMOUNT") .takes_value(true) .help("Amount in lamports to remove from the validator stake account. Must be at least the rent-exempt amount for a stake."), ) ) .subcommand(SubCommand::with_name("set-preferred-validator") .about("Set the preferred validator for deposits or withdrawals. Must be signed by the pool staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("preferred_type") .index(2) .value_name("OPERATION") .possible_values(&["deposit", "withdraw"]) // PreferredValidatorType enum .takes_value(true) .required(true) .help("Operation for which to restrict the validator"), ) .arg( Arg::with_name("vote_account") .long("vote-account") .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .help("Vote account for the validator that users must deposit into."), ) .arg( Arg::with_name("unset") .long("unset") .takes_value(false) .help("Unset the preferred validator."), ) .group(ArgGroup::with_name("validator") .arg("vote_account") .arg("unset") .required(true) ) ) .subcommand(SubCommand::with_name("deposit") .about("Add stake account to the stake pool") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address"), ) .arg( Arg::with_name("stake_account") .index(2) .validator(is_pubkey) .value_name("STAKE_ACCOUNT_ADDRESS") .takes_value(true) .required(true) .help("Stake address to join the pool"), ) .arg( Arg::with_name("token_receiver") .long("token-receiver") .validator(is_pubkey) .value_name("ADDRESS") .takes_value(true) .help("Account to receive pool token. Must be initialized account of the stake pool token. \ Defaults to the fee payer's associated pool token account."), ) ) .subcommand(SubCommand::with_name("list") .about("List stake accounts managed by this pool") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address."), ) ) .subcommand(SubCommand::with_name("update") .about("Updates all balances in the pool after validator stake accounts receive rewards.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address."), ) .arg( Arg::with_name("force") .long("force") .takes_value(false) .help("Update all balances, even if it has already been performed this epoch."), ) .arg( Arg::with_name("no_merge") .long("no-merge") .takes_value(false) .help("Do not automatically merge transient stakes. Useful if the stake pool is in an expected state, but the balances still need to be updated."), ) ) .subcommand(SubCommand::with_name("withdraw") .about("Withdraw amount from the stake pool") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address."), ) .arg( Arg::with_name("amount") .index(2) .validator(is_amount) .value_name("AMOUNT") .takes_value(true) .required(true) .help("Amount of pool tokens to withdraw for activated stake."), ) .arg( Arg::with_name("pool_account") .long("pool-account") .validator(is_pubkey) .value_name("ADDRESS") .takes_value(true) .help("Pool token account to withdraw tokens from. Must be owned by the fee payer. Defaults to their associated token account."), ) .arg( Arg::with_name("stake_receiver") .long("stake-receiver") .validator(is_pubkey) .value_name("STAKE_ACCOUNT_ADDRESS") .takes_value(true) .help("Stake account to receive SOL from the stake pool. Defaults to a new stake account."), ) .arg( Arg::with_name("vote_account") .long("vote-account") .validator(is_pubkey) .value_name("VOTE_ACCOUNT_ADDRESS") .takes_value(true) .help("Validator to withdraw from. Defaults to the largest validator stakes in the pool."), ) .arg( Arg::with_name("use_reserve") .long("use-reserve") .takes_value(false) .help("Withdraw from the stake pool's reserve. Only possible if all validator stakes are at the minimum possible amount."), ) .group(ArgGroup::with_name("withdraw_from") .arg("use_reserve") .arg("vote_account") ) ) .subcommand(SubCommand::with_name("set-manager") .about("Change manager or fee receiver account for the stake pool. Must be signed by the current manager.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address."), ) .arg( Arg::with_name("new_manager") .long("new-manager") .validator(is_pubkey) .value_name("ADDRESS") .takes_value(true) .help("Public key for the new stake pool manager."), ) .arg( Arg::with_name("new_fee_receiver") .long("new-fee-receiver") .validator(is_pubkey) .value_name("ADDRESS") .takes_value(true) .help("Public key for the new account to set as the stake pool fee receiver."), ) .group(ArgGroup::with_name("new_accounts") .arg("new_manager") .arg("new_fee_receiver") .required(true) .multiple(true) ) ) .subcommand(SubCommand::with_name("set-staker") .about("Change staker account for the stake pool. Must be signed by the manager or current staker.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address."), ) .arg( Arg::with_name("new_staker") .index(2) .validator(is_pubkey) .value_name("ADDRESS") .takes_value(true) .help("Public key for the new stake pool staker."), ) ) .subcommand(SubCommand::with_name("set-fee") .about("Change the fee assessed by the stake pool. Must be signed by the manager.") .arg( Arg::with_name("pool") .index(1) .validator(is_pubkey) .value_name("POOL_ADDRESS") .takes_value(true) .required(true) .help("Stake pool address."), ) .arg( Arg::with_name("fee_numerator") .index(2) .validator(is_parsable::<u64>) .value_name("NUMERATOR") .takes_value(true) .required(true) .help("Fee numerator, fee amount is numerator divided by denominator."), ) .arg( Arg::with_name("fee_denominator") .index(3) .validator(is_parsable::<u64>) .value_name("DENOMINATOR") .takes_value(true) .required(true) .help("Fee denominator, fee amount is numerator divided by denominator."), ) ) .get_matches(); let mut wallet_manager = None; let config = { let cli_config = if let Some(config_file) = matches.value_of("config_file") { solana_cli_config::Config::load(config_file).unwrap_or_default() } else { solana_cli_config::Config::default() }; let json_rpc_url = value_t!(matches, "json_rpc_url", String) .unwrap_or_else(|_| cli_config.json_rpc_url.clone()); let staker = signer_from_path( &matches, &cli_config.keypair_path, "staker", &mut wallet_manager, ) .unwrap_or_else(|e| { eprintln!("error: {}", e); exit(1); }); let depositor = if matches.is_present("depositor") { Some( signer_from_path( &matches, &cli_config.keypair_path, "depositor", &mut wallet_manager, ) .unwrap_or_else(|e| { eprintln!("error: {}", e); exit(1); }), ) } else { None }; let manager = signer_from_path( &matches, &cli_config.keypair_path, "manager", &mut wallet_manager, ) .unwrap_or_else(|e| { eprintln!("error: {}", e); exit(1); }); let token_owner = signer_from_path( &matches, &cli_config.keypair_path, "token_owner", &mut wallet_manager, ) .unwrap_or_else(|e| { eprintln!("error: {}", e); exit(1); }); let fee_payer = signer_from_path( &matches, &cli_config.keypair_path, "fee_payer", &mut wallet_manager, ) .unwrap_or_else(|e| { eprintln!("error: {}", e); exit(1); }); let verbose = matches.is_present("verbose"); let dry_run = matches.is_present("dry_run"); let no_update = matches.is_present("no_update"); Config { rpc_client: RpcClient::new_with_commitment(json_rpc_url, CommitmentConfig::confirmed()), verbose, manager, staker, depositor, token_owner, fee_payer, dry_run, no_update, } }; let _ = match matches.subcommand() { ("create-pool", Some(arg_matches)) => { let deposit_authority = pubkey_of(arg_matches, "deposit_authority"); let numerator = value_t_or_exit!(arg_matches, "fee_numerator", u64); let denominator = value_t_or_exit!(arg_matches, "fee_denominator", u64); let max_validators = value_t_or_exit!(arg_matches, "max_validators", u32); let pool_keypair = keypair_of(arg_matches, "pool_keypair"); let mint_keypair = keypair_of(arg_matches, "mint_keypair"); command_create_pool( &config, deposit_authority, Fee { denominator, numerator, }, max_validators, pool_keypair, mint_keypair, ) } ("create-validator-stake", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let vote_account_address = pubkey_of(arg_matches, "vote_account").unwrap(); command_vsa_create(&config, &stake_pool_address, &vote_account_address) } ("add-validator", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let vote_account_address = pubkey_of(arg_matches, "vote_account").unwrap(); command_vsa_add(&config, &stake_pool_address, &vote_account_address) } ("remove-validator", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let vote_account = pubkey_of(arg_matches, "vote_account").unwrap(); let new_authority: Option<Pubkey> = pubkey_of(arg_matches, "new_authority"); command_vsa_remove(&config, &stake_pool_address, &vote_account, &new_authority) } ("increase-validator-stake", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let vote_account = pubkey_of(arg_matches, "vote_account").unwrap(); let amount = value_t_or_exit!(arg_matches, "amount", f64); command_increase_validator_stake(&config, &stake_pool_address, &vote_account, amount) } ("decrease-validator-stake", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let vote_account = pubkey_of(arg_matches, "vote_account").unwrap(); let amount = value_t_or_exit!(arg_matches, "amount", f64); command_decrease_validator_stake(&config, &stake_pool_address, &vote_account, amount) } ("set-preferred-validator", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let preferred_type = match arg_matches.value_of("preferred_type").unwrap() { "deposit" => PreferredValidatorType::Deposit, "withdraw" => PreferredValidatorType::Withdraw, _ => unreachable!(), }; let vote_account = pubkey_of(arg_matches, "vote_account"); let _unset = arg_matches.is_present("unset"); // since unset and vote_account can't both be set, if unset is set // then vote_account will be None, which is valid for the program command_set_preferred_validator( &config, &stake_pool_address, preferred_type, vote_account, ) } ("deposit", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let stake_account = pubkey_of(arg_matches, "stake_account").unwrap(); let token_receiver: Option<Pubkey> = pubkey_of(arg_matches, "token_receiver"); command_deposit( &config, &stake_pool_address, &stake_account, &token_receiver, ) } ("list", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); command_list(&config, &stake_pool_address) } ("update", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let no_merge = arg_matches.is_present("no_merge"); let force = arg_matches.is_present("force"); command_update(&config, &stake_pool_address, force, no_merge) } ("withdraw", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let vote_account = pubkey_of(arg_matches, "vote_account"); let pool_account = pubkey_of(arg_matches, "pool_account"); let pool_amount = value_t_or_exit!(arg_matches, "amount", f64); let stake_receiver = pubkey_of(arg_matches, "stake_receiver"); let use_reserve = arg_matches.is_present("use_reserve"); command_withdraw( &config, &stake_pool_address, use_reserve, &vote_account, &stake_receiver, &pool_account, pool_amount, ) } ("set-manager", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let new_manager: Option<Pubkey> = pubkey_of(arg_matches, "new_manager"); let new_fee_receiver: Option<Pubkey> = pubkey_of(arg_matches, "new_fee_receiver"); command_set_manager( &config, &stake_pool_address, &new_manager, &new_fee_receiver, ) } ("set-staker", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let new_staker = pubkey_of(arg_matches, "new_staker").unwrap(); command_set_staker(&config, &stake_pool_address, &new_staker) } ("set-fee", Some(arg_matches)) => { let stake_pool_address = pubkey_of(arg_matches, "pool").unwrap(); let numerator = value_t_or_exit!(arg_matches, "fee_numerator", u64); let denominator = value_t_or_exit!(arg_matches, "fee_denominator", u64); let new_fee = Fee { denominator, numerator, }; command_set_fee(&config, &stake_pool_address, new_fee) } _ => unreachable!(), } .map_err(|err| { eprintln!("{}", err); exit(1); }); }
37.138043
167
0.561609
142aed079b20a0efa13d3199e6b59af2ca0f2039
18,475
use postgis::ewkb::AsEwkbPoint; use postgis::ewkb::EwkbWrite; use regex::{Regex, RegexSet}; use crate::{hecate, types::name::InputName, Context, Name, Names, Source}; /// A representation of a single Address #[derive(Debug)] pub struct Address { /// An optional identifier for the address pub id: Option<i64>, pub version: i64, /// The address number, can be numeric or semi-numeric (100 vs 100a) pub number: String, /// Vector of all street name synonyms pub names: Names, /// String source/provider/timestamp for the given data pub source: String, /// Should the feature be output pub output: bool, /// Should the address feature be used to generate interpolation pub interpolate: bool, /// JSON representation of properties pub props: serde_json::Map<String, serde_json::Value>, /// Simple representation of Lng/Lat geometry pub geom: geojson::PointType, } impl Address { pub fn new(feat: geojson::GeoJson, context: &Context) -> Result<Self, String> { let feat = match feat { geojson::GeoJson::Feature(feat) => feat, _ => { return Err(String::from("Not a GeoJSON Feature")); } }; let mut props = match feat.properties { Some(props) => props, None => { return Err(String::from("Feature has no properties")); } }; let number = get_number(&mut props)?; let version = match feat.foreign_members { Some(mut props) => get_version(&mut props)?, None => 0, }; let source = get_source(&mut props)?; let interpolate = get_interpolate(&mut props)?; let output = get_output(&mut props)?; let geom = match feat.geometry { Some(geom) => match geom.value { geojson::Value::Point(pt) => { if pt.len() != 2 { return Err(String::from("Geometry must have 2 coordinates")); } if pt[0] < -180.0 || pt[0] > 180.0 { return Err(String::from("Geometry exceeds +/-180deg coord bounds")); } else if pt[1] < -85.0 || pt[1] > 85.0 { return Err(String::from("Geometry exceeds +/-85deg coord bounds")); } pt } _ => { return Err(String::from("Addresses must have Point geometry")); } }, None => { return Err(String::from("Addresses must have geometry")); } }; lazy_static! { static ref STREET_KEY: String = String::from("street"); } let street = match props.remove(&*STREET_KEY) { Some(street) => { props.insert(STREET_KEY.clone(), street.clone()); Some(street) } None => None, }; let names = Names::from_value(street, Some(Source::Address), &context)?; if names.names.len() == 0 { return Err(String::from("Feature has no valid non-whitespace name")); } let mut addr = Address { id: match feat.id { Some(geojson::feature::Id::Number(id)) => id.as_i64(), _ => None, }, number: number, version: version, names: names, output: output, source: source, interpolate: interpolate, props: props, geom: geom, }; addr.std()?; Ok(addr) } /// /// Construct an address instance via a Row JSON Value /// pub fn from_value(value: serde_json::Value) -> Result<Self, String> { let mut value = match value { serde_json::Value::Object(obj) => obj, _ => { return Err(String::from( "Address::from_value value must be JSON Object", )); } }; lazy_static! { static ref NAMES_KEY: String = String::from("names"); } let names: Names = match value.remove(&*NAMES_KEY) { Some(names) => { let names: Vec<Name> = match serde_json::from_value(names) { Ok(names) => names, Err(err) => { return Err(format!("Names Conversion Error: {}", err.to_string())); } }; Names { names: names } } None => { return Err(String::from("names key/value is required")); } }; lazy_static! { static ref PROPS_KEY: String = String::from("props"); } let props = match value.remove(&*PROPS_KEY) { Some(props) => match props { serde_json::Value::Object(obj) => obj, _ => { return Err(String::from( "Address::from_value value must be JSON Object", )); } }, None => { return Err(String::from("props key/value is required")); } }; lazy_static! { static ref GEOM_KEY: String = String::from("geom"); } let geom = match value.remove(&*GEOM_KEY) { Some(geom) => match geom { serde_json::value::Value::String(geom) => match geom.parse::<geojson::GeoJson>() { Ok(geom) => match geom { geojson::GeoJson::Geometry(geom) => match geom.value { geojson::Value::Point(pt) => pt, _ => { return Err(String::from("Geometry must be point type")); } }, _ => { return Err(String::from("Geometry must be point type")); } }, Err(err) => { return Err(format!("geom parse error: {}", err.to_string())); } }, _ => { return Err(String::from("geom only supports TEXT type")); } }, None => { return Err(String::from("geom key/value is required")); } }; Ok(Address { id: get_id(&mut value)?, number: get_number(&mut value)?, version: get_version(&mut value)?, names: names, output: get_output(&mut value)?, source: get_source(&mut value)?, interpolate: get_interpolate(&mut value)?, props: props, geom: geom, }) } pub fn std(&mut self) -> Result<(), String> { self.number = self.number.to_lowercase(); // Remove 1/2 Numbers from addresses as they are not currently supported lazy_static! { static ref HALF: Regex = Regex::new(r"\s1/2$").unwrap(); static ref UNIT: Regex = Regex::new(r"^(?P<num>\d+)\s(?P<unit>[a-z])$").unwrap(); static ref SUPPORTED: RegexSet = RegexSet::new(&[ r"^\d+[a-z]?$", r"^(\d+)-(\d+)[a-z]?$", r"^(\d+)([nsew])(\d+)[a-z]?$", r"^([nesw])(\d+)([nesw]\d+)?$", r"^\d+(к\d+)?(с\d+)?$" ]) .unwrap(); } self.number = HALF.replace(self.number.as_str(), "").to_string(); // Transform '123 B' = '123B' so it is supported self.number = UNIT.replace(self.number.as_str(), "$num$unit").to_string(); if !SUPPORTED.is_match(self.number.as_str()) { return Err(String::from("Number is not a supported address/unit type")); } if self.number.len() > 10 { return Err(String::from("Number should not exceed 10 chars")); } Ok(()) } /// ///Return a PG Copyable String of the feature /// ///name, number, source, props, geom /// pub fn to_tsv(self) -> String { let geom = postgis::ewkb::Point::new(self.geom[0], self.geom[1], Some(4326)) .as_ewkb() .to_hex_ewkb(); format!( "{id}\t{version}\t{names}\t{number}\t{source}\t{output}\t{interpolate}\t{props}\t{geom}\n", id = match self.id { None => String::from(""), Some(id) => id.to_string(), }, version = self.version, names = serde_json::to_string(&self.names.names).unwrap_or(String::from("")), output = self.output, interpolate = self.interpolate, number = self.number, source = self.source, props = serde_json::value::Value::from(self.props), geom = geom ) } /// ///Insert an address into a given database /// ///Only use this function for a small number or address ///features or if they are being infrequently written. ///to_tsv with a copy stream is far more efficient /// pub fn to_db( &self, conn: &impl postgres::GenericConnection, table: impl ToString, ) -> Result<(), postgres::error::Error> { conn.execute( format!( " INSERT INTO {table} ( id, version, names, number, source, output, props, geom ) VALUES ( $1, $2, $3, $4, $5, $6, $7, ST_SetSRID(ST_MakePoint($8, $9), 4326) ) ", table = table.to_string() ) .as_str(), &[ &self.id, &self.version, &serde_json::to_value(&self.names.names).unwrap(), &self.number, &self.source, &self.output, &serde_json::value::Value::from(self.props.clone()), &self.geom[0], &self.geom[1], ], )?; Ok(()) } /// /// Outputs Hecate Compatible GeoJSON feature, /// omitting PT2ITP specific properties /// /// action: Hecate action to conditionally attach to output geojson feature /// generated: Should generated synonyms be output /// pub fn to_geojson(mut self, action: hecate::Action, generated: bool) -> geojson::Feature { let mut members: serde_json::map::Map<String, serde_json::Value> = serde_json::map::Map::new(); if action != hecate::Action::None { members.insert( String::from("version"), serde_json::value::Value::Number(serde_json::Number::from(self.version)), ); } match action { hecate::Action::Create => { members.insert( String::from("action"), serde_json::value::Value::String("create".to_string()), ); members.remove(&String::from("version")); } hecate::Action::Modify => { members.insert( String::from("action"), serde_json::value::Value::String("modify".to_string()), ); } hecate::Action::Delete => { members.insert( String::from("action"), serde_json::value::Value::String("delete".to_string()), ); } hecate::Action::Restore => { members.insert( String::from("action"), serde_json::value::Value::String("restore".to_string()), ); } _ => (), }; let names: Vec<InputName> = self .names .names .into_iter() .filter(|name| { if !generated { name.source != Some(Source::Generated) } else { true } }) .map(|name| InputName::from(name)) .collect(); self.props .insert(String::from("street"), serde_json::to_value(names).unwrap()); if self.source != String::from("") { self.props.insert( String::from("source"), serde_json::value::Value::String(self.source), ); } self.props.insert( String::from("number"), serde_json::value::Value::String(self.number), ); geojson::Feature { id: match self.id { None => None, Some(id) => Some(geojson::feature::Id::Number(serde_json::Number::from(id))), }, bbox: None, geometry: Some(geojson::Geometry { bbox: None, value: geojson::Value::Point(self.geom), foreign_members: None, }), properties: Some(self.props), foreign_members: Some(members), } } } fn get_id(map: &mut serde_json::Map<String, serde_json::Value>) -> Result<Option<i64>, String> { lazy_static! { static ref ID_KEY: String = String::from("id"); } match map.remove(&*ID_KEY) { Some(id) => match id.as_i64() { Some(id) => Ok(Some(id)), None => Err(String::from("ID must be numeric")), }, None => Ok(None), } } fn get_number(map: &mut serde_json::Map<String, serde_json::Value>) -> Result<String, String> { lazy_static! { static ref NUMBER_KEY: String = String::from("number"); } match map.get(&*NUMBER_KEY) { Some(number) => match number.clone() { serde_json::value::Value::Number(num) => Ok(String::from(num.to_string())), serde_json::value::Value::String(num) => Ok(num), _ => Err(String::from("Number property must be String or Numeric")), }, None => Err(String::from("Number property required")), } } fn get_version(map: &mut serde_json::Map<String, serde_json::Value>) -> Result<i64, String> { lazy_static! { static ref VERSION_KEY: String = String::from("version"); } match map.remove(&*VERSION_KEY) { Some(version) => match version.as_i64() { Some(version) => Ok(version), _ => Err(String::from("Version must be numeric")), }, None => Ok(0), } } fn get_source(map: &mut serde_json::Map<String, serde_json::Value>) -> Result<String, String> { lazy_static! { static ref SOURCE_KEY: String = String::from("source"); } match map.get(&*SOURCE_KEY) { Some(source) => match source.clone() { serde_json::value::Value::String(source) => Ok(source), _ => Ok(String::from("")), }, None => Ok(String::from("")), } } fn get_output(map: &mut serde_json::Map<String, serde_json::Value>) -> Result<bool, String> { lazy_static! { static ref OUTPUT_KEY: String = String::from("output"); } match map.remove(&*OUTPUT_KEY) { Some(output) => match output.as_bool() { None => Ok(true), Some(output) => Ok(output), }, None => Ok(true), } } fn get_interpolate(map: &mut serde_json::Map<String, serde_json::Value>) -> Result<bool, String> { lazy_static! { static ref INTERPOLATE_KEY: String = String::from("interpolate"); } match map.remove(&*INTERPOLATE_KEY) { Some(itp) => match itp.as_bool() { None => Ok(true), Some(itp) => Ok(itp), }, None => Ok(true), } } mod tests { use super::*; use crate::Tokens; #[test] fn test_address_simple_geom() { // street value is object { let feat: geojson::GeoJson = String::from(r#"{"id":80614173,"key":null,"type":"Feature","version":3,"geometry":{"type":"Point","coordinates":[-84.7395102,39.1618162]},"properties":{"type":"residential","number":"726","source":"hamilton","street":[{"display":"Rosewynne Ct","priority":0}],"accuracy":"rooftop","override:postcode":"45002"}}"#).parse().unwrap(); let context = Context::new( String::from("us"), Some(String::from("mn")), Tokens::generate(vec![String::from("en")]), ); let addr = Address::new(feat, &context).unwrap(); assert_eq!(addr.to_tsv(), "80614173\t3\t[{\"display\":\"Rosewynne Ct\",\"priority\":-1,\"source\":\"Address\",\"tokenized\":[{\"token\":\"rosewynne\",\"token_type\":null},{\"token\":\"ct\",\"token_type\":\"Way\"}],\"freq\":1}]\t726\thamilton\ttrue\ttrue\t{\"accuracy\":\"rooftop\",\"number\":\"726\",\"override:postcode\":\"45002\",\"source\":\"hamilton\",\"street\":[{\"display\":\"Rosewynne Ct\",\"priority\":0}],\"type\":\"residential\"}\t0101000020E6100000BD039722542F55C0437BAB64B6944340\n"); } // street value is string { let feat: geojson::GeoJson = String::from(r#"{"type":"Feature","properties":{"street":"Hickory Hills Dr","number":1272,"source":"TIGER-2016","output":false},"interpolate":true, "geometry":{"type":"Point","coordinates":[-84.21414376368934,39.21812703085023]}}"#).parse().unwrap(); let context = Context::new( String::from("us"), Some(String::from("mn")), Tokens::generate(vec![String::from("en")]), ); let addr = Address::new(feat, &context).unwrap(); assert_eq!(addr.to_tsv(), "\t0\t[{\"display\":\"Hickory Hills Dr\",\"priority\":-1,\"source\":\"Address\",\"tokenized\":[{\"token\":\"hickory\",\"token_type\":null},{\"token\":\"hls\",\"token_type\":null},{\"token\":\"dr\",\"token_type\":\"Way\"}],\"freq\":1}]\t1272\tTIGER-2016\tfalse\ttrue\t{\"number\":1272,\"source\":\"TIGER-2016\",\"street\":\"Hickory Hills Dr\"}\t0101000020E6100000096C0B88B40D55C00BF02796EB9B4340\n"); } } }
34.212963
509
0.485575
e255c4d47a7069db173fc31eedf3add2852d81bc
1,210
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // run-pass #![allow(stable_features)] #![allow(non_camel_case_types)] // Test that removed LLVM SIMD intrinsics continue // to work via the "AutoUpgrade" mechanism. #![feature(cfg_target_feature, repr_simd)] #![feature(platform_intrinsics, stmt_expr_attributes)] #[repr(simd)] #[derive(PartialEq, Debug)] struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); fn main() { #[cfg(target_feature = "sse2")] unsafe { extern "platform-intrinsic" { fn x86_mm_min_epi16(x: i16x8, y: i16x8) -> i16x8; } assert_eq!(x86_mm_min_epi16(i16x8(0, 1, 2, 3, 4, 5, 6, 7), i16x8(7, 6, 5, 4, 3, 2, 1, 0)), i16x8(0, 1, 2, 3, 3, 2, 1, 0)); }; }
34.571429
68
0.63719
18c1e207835cc60081387e0450c676cf86d9c31c
4,122
#![cfg_attr(feature = "strict", deny(warnings))] extern crate text_io; extern crate tokio; use std::env; use wrangler::cli::{exec, Cli, Command}; use wrangler::commands; use wrangler::installer; use wrangler::reporter; use wrangler::terminal::message::{Message, StdOut}; use wrangler::terminal::styles; use wrangler::version::background_check_for_updates; use anyhow::Result; use structopt::StructOpt; fn main() -> Result<()> { if !cfg!(debug_assertions) { reporter::init(); } env_logger::init(); let latest_version_receiver = background_check_for_updates(); if let Ok(me) = env::current_exe() { // If we're actually running as the installer then execute our // self-installation, otherwise just continue as usual. if me .file_stem() .and_then(|s| s.to_str()) .expect("executable should have a filename") .starts_with("wrangler-init") { installer::install()?; } } run()?; if let Ok(latest_version) = latest_version_receiver.try_recv() { let latest_version = styles::highlight(latest_version.to_string()); let new_version_available = format!( "A new version of Wrangler ({}) is available!", latest_version ); let update_message = "You can learn more about updating here:".to_string(); let update_docs_url = styles::url( "https://developers.cloudflare.com/workers/cli-wrangler/install-update#update", ); StdOut::billboard(&format!( "{}\n{}\n{}", new_version_available, update_message, update_docs_url )); } Ok(()) } fn run() -> Result<()> { let cli = Cli::from_args(); let cli_params = cli.clone(); match cli.command { Command::Config { api_key, no_verify } => exec::configure(api_key, no_verify), Command::Generate { name, site, template, target_type, } => exec::generate(name, site, template, target_type), Command::Init { name, site, target_type, } => exec::init(name, site, target_type), Command::Build => exec::build(&cli_params), Command::Preview { method, url, body, watch, headless, } => exec::preview(method, url, body, watch, headless, &cli_params), Command::Dev { host, ip, port, local_protocol, upstream_protocol, } => exec::dev( host, ip, port, local_protocol, upstream_protocol, &cli_params, ), Command::Whoami => exec::whoami(), Command::Publish { release, output, migration, } => exec::publish(release, output, migration, &cli_params), Command::Subdomain { name } => exec::subdomain(name, &cli_params), Command::Route(route) => exec::route(route, &cli_params), Command::Secret(secret) => exec::secret(secret, &cli_params), Command::KvNamespace(namespace) => exec::kv_namespace(namespace, &cli_params), Command::KvKey(key) => exec::kv_key(key, &cli_params), Command::KvBulk(bulk) => exec::kv_bulk(bulk, &cli_params), Command::Tail { name, url, format, once, sampling_rate, status, method, header, ip_address, search, .. } => exec::tail( name, url, format, once, sampling_rate, status, method, header, ip_address, search, &cli_params, ), Command::Login => commands::login::run(), Command::Report { log } => commands::report::run(log.as_deref()).map(|_| { eprintln!("Report submission sucessful. Thank you!"); }), } }
29.442857
91
0.531295
759a471948c3dbea2c354f8161245bc8745d9608
1,807
mod with_big_integer_augend; mod with_float_augend; mod with_small_integer_augend; use std::sync::Arc; use proptest::arbitrary::any; use proptest::strategy::{Just, Strategy}; use proptest::{prop_assert, prop_assert_eq}; use liblumen_alloc::erts::process::Process; use liblumen_alloc::erts::term::prelude::*; use crate::otp::erlang::add_2::native; use crate::scheduler::with_process; use crate::test::{run, strategy}; #[test] fn without_number_augend_errors_badarith() { run!( |arc_process| { ( Just(arc_process.clone()), strategy::term::is_not_number(arc_process.clone()), strategy::term::is_number(arc_process.clone()), ) }, |(arc_process, augend, addend)| { prop_assert_badarith!( native(&arc_process, augend, addend), format!( "augend ({}) and addend ({}) aren't both numbers", augend, addend ) ); Ok(()) }, ); } fn without_number_addend_errors_badarith<F, S>(source_file: &'static str, augend_strategy: F) where F: Fn(Arc<Process>) -> S, S: Strategy<Value = Term>, { run( source_file, |arc_process| { ( Just(arc_process.clone()), augend_strategy(arc_process.clone()), strategy::term::is_not_number(arc_process), ) }, |(arc_process, augend, addend)| { prop_assert_badarith!( native(&arc_process, augend, addend), format!( "augend ({}) and addend ({}) aren't both numbers", augend, addend ) ); Ok(()) }, ); }
26.188406
93
0.526287
1c9bb401c19c031b44bec953d4c83d9d4c0104ed
7,348
// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //! Defines the structures needed for saving/restoring balloon devices. use std::sync::atomic::AtomicUsize; use std::sync::Arc; use std::time::Duration; use timerfd::{SetTimeFlags, TimerState}; use snapshot::Persist; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; use vm_memory::GuestMemoryMmap; use super::*; use crate::virtio::balloon::device::{BalloonStats, ConfigSpace}; use crate::virtio::persist::VirtioDeviceState; use crate::virtio::{DeviceState, TYPE_BALLOON}; #[derive(Clone, Versionize)] // NOTICE: Any changes to this structure require a snapshot version bump. pub struct BalloonConfigSpaceState { num_pages: u32, actual_pages: u32, } #[derive(Clone, Versionize)] // NOTICE: Any changes to this structure require a snapshot version bump. pub struct BalloonStatsState { swap_in: Option<u64>, swap_out: Option<u64>, major_faults: Option<u64>, minor_faults: Option<u64>, free_memory: Option<u64>, total_memory: Option<u64>, available_memory: Option<u64>, disk_caches: Option<u64>, hugetlb_allocations: Option<u64>, hugetlb_failures: Option<u64>, } impl BalloonStatsState { fn from_stats(stats: &BalloonStats) -> Self { Self { swap_in: stats.swap_in, swap_out: stats.swap_out, major_faults: stats.major_faults, minor_faults: stats.minor_faults, free_memory: stats.free_memory, total_memory: stats.total_memory, available_memory: stats.available_memory, disk_caches: stats.disk_caches, hugetlb_allocations: stats.hugetlb_allocations, hugetlb_failures: stats.hugetlb_failures, } } fn create_stats(&self) -> BalloonStats { BalloonStats { target_pages: 0, actual_pages: 0, target_mb: 0, actual_mb: 0, swap_in: self.swap_in, swap_out: self.swap_out, major_faults: self.major_faults, minor_faults: self.minor_faults, free_memory: self.free_memory, total_memory: self.total_memory, available_memory: self.available_memory, disk_caches: self.disk_caches, hugetlb_allocations: self.hugetlb_allocations, hugetlb_failures: self.hugetlb_failures, } } } #[derive(Clone, Versionize)] // NOTICE: Any changes to this structure require a snapshot version bump. pub struct BalloonState { stats_polling_interval_s: u16, stats_desc_index: Option<u16>, latest_stats: BalloonStatsState, config_space: BalloonConfigSpaceState, virtio_state: VirtioDeviceState, } pub struct BalloonConstructorArgs { pub mem: GuestMemoryMmap, } impl Persist<'_> for Balloon { type State = BalloonState; type ConstructorArgs = BalloonConstructorArgs; type Error = super::Error; fn save(&self) -> Self::State { BalloonState { stats_polling_interval_s: self.stats_polling_interval_s, stats_desc_index: self.stats_desc_index, latest_stats: BalloonStatsState::from_stats(&self.latest_stats), config_space: BalloonConfigSpaceState { num_pages: self.config_space.num_pages, actual_pages: self.config_space.actual_pages, }, virtio_state: VirtioDeviceState::from_device(self), } } fn restore( constructor_args: Self::ConstructorArgs, state: &Self::State, ) -> std::result::Result<Self, Self::Error> { // We can safely create the balloon with arbitrary flags and // num_pages because we will overwrite them after. let mut balloon = Balloon::new(0, false, state.stats_polling_interval_s, true)?; let mut num_queues = NUM_QUEUES; // As per the virtio 1.1 specification, the statistics queue // should not exist if the statistics are not enabled. if state.stats_polling_interval_s == 0 { num_queues -= 1; } balloon.queues = state .virtio_state .build_queues_checked(&constructor_args.mem, TYPE_BALLOON, num_queues, QUEUE_SIZE) .map_err(|_| Self::Error::QueueRestoreError)?; balloon.interrupt_status = Arc::new(AtomicUsize::new(state.virtio_state.interrupt_status)); balloon.avail_features = state.virtio_state.avail_features; balloon.acked_features = state.virtio_state.acked_features; balloon.latest_stats = state.latest_stats.create_stats(); balloon.config_space = ConfigSpace { num_pages: state.config_space.num_pages, actual_pages: state.config_space.actual_pages, }; if state.virtio_state.activated { balloon.device_state = DeviceState::Activated(constructor_args.mem); // Restart timer if needed. if balloon.stats_enabled() { let timer_state = TimerState::Periodic { current: Duration::from_secs(state.stats_polling_interval_s as u64), interval: Duration::from_secs(state.stats_polling_interval_s as u64), }; balloon .stats_timer .set_state(timer_state, SetTimeFlags::Default); } } Ok(balloon) } } #[cfg(test)] mod tests { use super::*; use crate::virtio::device::VirtioDevice; use crate::virtio::TYPE_BALLOON; use crate::virtio::test_utils::default_mem; use std::sync::atomic::Ordering; #[test] fn test_persistence() { let guest_mem = default_mem(); let mut mem = vec![0; 4096]; let version_map = VersionMap::new(); // Create and save the balloon device. let balloon = Balloon::new(0x42, false, 2, false).unwrap(); <Balloon as Persist>::save(&balloon) .serialize(&mut mem.as_mut_slice(), &version_map, 1) .unwrap(); // Deserialize and restore the balloon device. let restored_balloon = Balloon::restore( BalloonConstructorArgs { mem: guest_mem }, &BalloonState::deserialize(&mut mem.as_slice(), &version_map, 1).unwrap(), ) .unwrap(); assert_eq!(restored_balloon.device_type(), TYPE_BALLOON); assert!(restored_balloon.restored); assert_eq!(restored_balloon.acked_features, balloon.acked_features); assert_eq!(restored_balloon.avail_features, balloon.avail_features); assert_eq!(restored_balloon.config_space, balloon.config_space); assert_eq!(restored_balloon.queues(), balloon.queues()); assert_eq!( restored_balloon.interrupt_status().load(Ordering::Relaxed), balloon.interrupt_status().load(Ordering::Relaxed) ); assert_eq!(restored_balloon.is_activated(), balloon.is_activated()); assert_eq!( restored_balloon.stats_polling_interval_s, balloon.stats_polling_interval_s ); assert_eq!(restored_balloon.stats_desc_index, balloon.stats_desc_index); assert_eq!(restored_balloon.latest_stats, balloon.latest_stats); } }
35.157895
99
0.653919
694d98bb5f1272e4ddee79dbcafdc599e224582c
3,498
use alloc::vec::*; use crate::parse::*; use crate::FontResult; // Color pallete information used by the COLR and sometimes SVG tables // Microsoft: https://docs.microsoft.com/en-us/typography/opentype/spec/cpal #[derive(Debug)] pub struct TableCpal { pub header: Header, pub color_records: Vec<BGRA8Color> } #[derive(Copy, Clone, Debug)] pub struct BGRA8Color { pub b: u8, pub g: u8, pub r: u8, pub a: u8, } #[derive(Clone, Debug)] pub struct Header { pub version: u16, pub num_palette_entries: u16, pub num_palettes: u16, pub num_color_records: u16, pub offset_first_color_record: u32, pub color_record_indicies: Vec<u16>, } impl TableCpal { pub fn new(cpal: &[u8]) -> FontResult<TableCpal> { let mut stream = Stream::new(cpal); let version = stream.read_u16(); let header; match version { 0x0000 | 0x0001 => header = Self::read_header(&mut stream, version), _ => return Err("Font.cpal: Unsupported cpal table version."), } let color_records = Self::read_color_records(&mut stream, header.offset_first_color_record, header.num_color_records); Ok(TableCpal { header, color_records, }) } fn read_header(stream: &mut Stream, version: u16) -> Header { let num_palette_entries = stream.read_u16(); let num_palettes = stream.read_u16(); let num_color_records = stream.read_u16(); let offset_first_color_record = stream.read_u32(); let mut color_record_indicies = Vec::with_capacity(num_palettes as usize); for _ in 0..num_palettes { color_record_indicies.push(stream.read_u16()); } // version 1 then has offset palette type array, offset palette label array, and offset palette entry label array, // the later two of which just provide UI names for colors and paletes, none of which matters for rasterizing // the first provides flags for weather a palette is usable on light and or dark backgrounds... which could be useful, maybe. Header { version, num_palette_entries, num_palettes, num_color_records, offset_first_color_record, color_record_indicies, } } fn read_color_records(stream: &mut Stream, offset_first_color_record: u32, num_color_records: u16) -> Vec<BGRA8Color> { stream.seek(offset_first_color_record as usize); let mut color_records = Vec::with_capacity(num_color_records as usize); for _ in 0..num_color_records { color_records.push(BGRA8Color { b: stream.read_u8(), g: stream.read_u8(), r: stream.read_u8(), a: stream.read_u8(), }); } color_records } /// Gets the color at the given index from palette zero (which can be useful if you dont want to intelligently choose a palette) pub fn get_color(self: &Self, color_index: u16) -> BGRA8Color { self.get_color_from_palette(0, color_index) } /// Gets the color at a given index from a specific palette pub fn get_color_from_palette(self: &Self, palette: u16, color_index: u16) -> BGRA8Color { self.color_records[(self.header.color_record_indicies[palette as usize] + color_index) as usize] } }
35.693878
134
0.625786
b972da65d6458c3e46506abba1f9e384b69dd93d
6,144
use super::*; use crate::providers::info::SafeAppInfo; use serde::Serialize; /// TransactionSummary - object returned for [TransactionListItem::Transaction] /// /// <details> /// <summary>Sample 1: History of executed transactions with date labels per day</summary> /// /// ```json /// { /// "next": null, /// "previous": null, /// "results": [ /// { /// "type": "DATE_LABEL", /// "timestamp": 1604620800000 /// }, /// { /// "type": "TRANSACTION", /// "transaction": { /// "id": "multisig_0x126ab4d9e87b5cba98Ddeb75Df703E83500b6B7f_0x28b4cc29c036c2df40a1ba8d684cdab736abaf7d5cb84b217428462a2b4e3318", /// "timestamp": 1604700419000, /// "txStatus": "SUCCESS", /// "txInfo": { /// "type": "Custom", /// "to": { /// "value": "0x8D29bE29923b68abfDD21e541b9374737B49cdAD", /// "name": "Gnosis Safe: Multi Send 1.1.1", /// "logoUri": "https://safe-transaction-assets.staging.gnosisdev.com/contracts/logos/0x8D29bE29923b68abfDD21e541b9374737B49cdAD.png" /// }, /// "dataSize": "580", /// "value": "0", /// "methodName": "multiSend", /// "actionCount": 6, /// "isCancellation": false /// }, /// "executionInfo": { /// "type": "MULTISIG_EXECUTION_INFO", /// "nonce": 2, /// "confirmationsRequired": 1, /// "confirmationsSubmitted": 1 /// } /// }, /// "conflictType": "None" /// }, /// { /// "type": "DATE_LABEL", /// "timestamp": 1604448000000 /// }, /// { /// "type": "TRANSACTION", /// "transaction": { /// "id": "module_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0xcd10b23687bf336d0f4c0a3383590d3d1722aaa99a41fd0d289a5f69a8266c8f_0x53b6e88b578a6313", /// "timestamp": 1604533603000, /// "txStatus": "SUCCESS", /// "txInfo": { /// "type": "Custom", /// "to": "0x8D29bE29923b68abfDD21e541b9374737B49cdAD", /// "dataSize": "260", /// "value": "0", /// "methodName": "multiSend" /// }, /// "executionInfo": { /// "type" : "MODULE_EXECUTION_INFO", /// "address": { /// "value": "0xCFbFaC74C26F8647cBDb8c5caf80BB5b32E43134" /// } /// } /// }, /// "conflictType": "None" /// }, /// { /// "type": "TRANSACTION", /// "transaction": { /// "id": "ethereum_0x126ab4d9e87b5cba98Ddeb75Df703E83500b6B7f_0x7e95b9df8b1c1385665d0bccfbd5d6f913e18915750395d84dd490c7d9be9940_0xbf9e8a462afc9675", /// "timestamp": 1604531696000, /// "txStatus": "SUCCESS", /// "txInfo": { /// "type": "Transfer", /// "sender": { /// "value": "0x05c85Ab5B09Eb8A55020d72daf6091E04e264af9" /// }, /// "recipient": { /// "value": "0x126ab4d9e87b5cba98Ddeb75Df703E83500b6B7f" /// }, /// "direction": "INCOMING", /// "transferInfo": { /// "type": "NATIVE_COIN", /// "value": "100000000000000000" /// } /// } /// }, /// "conflictType": "None" /// }, /// { /// "type": "TRANSACTION", /// "transaction": { /// "id": "creation_0x126ab4d9e87b5cba98Ddeb75Df703E83500b6B7f", /// "timestamp": 1604531396000, /// "txStatus": "SUCCESS", /// "txInfo": { /// "type": "Creation", /// "creator": { /// "value": "0x05c85Ab5B09Eb8A55020d72daf6091E04e264af9" /// }, /// "transactionHash": "0xbfe5f021d0cfaf98ec445f757802be9e86b818301e2d892bcf3a9ee5e688d37f", /// "implementation": { /// "value": "0x34CfAC646f301356fAa8B21e94227e3583Fe3F5F", /// "name": "Gnosis Safe: Mastercopy 1.1.1", /// "logoUri": "https://safe-transaction-assets.staging.gnosisdev.com/contracts/logos/0x34CfAC646f301356fAa8B21e94227e3583Fe3F5F.png" /// }, /// "factory": { /// "value": "0x76E2cFc1F5Fa8F6a5b3fC4c8F4788F0116861F9B" /// } /// } /// }, /// "conflictType": "None" /// } /// ] /// } /// ``` /// /// </details> #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] #[cfg_attr(test, derive(PartialEq))] pub struct TransactionSummary { pub id: String, pub timestamp: i64, pub tx_status: TransactionStatus, pub tx_info: TransactionInfo, #[serde(skip_serializing_if = "Option::is_none")] pub execution_info: Option<ExecutionInfo>, #[serde(skip_serializing_if = "Option::is_none")] pub safe_app_info: Option<SafeAppInfo>, } #[derive(Serialize, Debug)] #[serde(tag = "type", rename_all = "SCREAMING_SNAKE_CASE")] #[cfg_attr(test, derive(PartialEq))] pub enum ExecutionInfo { Multisig(MultisigExecutionInfo), Module(ModuleExecutionInfo), } #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] #[cfg_attr(test, derive(PartialEq))] pub struct MultisigExecutionInfo { pub nonce: u64, pub confirmations_required: u64, pub confirmations_submitted: u64, #[serde(skip_serializing_if = "Option::is_none")] pub missing_signers: Option<Vec<AddressEx>>, } #[derive(Serialize, Debug)] #[serde(rename_all = "camelCase")] #[cfg_attr(test, derive(PartialEq))] pub struct ModuleExecutionInfo { pub address: AddressEx, } #[derive(Serialize, Debug)] #[serde(tag = "type")] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] #[cfg_attr(test, derive(PartialEq))] pub enum TransactionListItem { #[serde(rename_all = "camelCase")] Transaction { transaction: TransactionSummary, conflict_type: ConflictType, }, DateLabel { timestamp: i64, }, Label { label: Label, }, ConflictHeader { nonce: u64, }, } #[derive(Serialize, Debug)] #[cfg_attr(test, derive(PartialEq))] pub enum Label { Next, Queued, } #[derive(Serialize, Debug, PartialEq, Clone)] pub enum ConflictType { None, HasNext, End, }
30.874372
158
0.564616
e64cfe669ddeb8d6d2ab1438457834dd67a2e0a8
47,207
// Copyright 2018 TiKV Project Authors. Licensed under Apache-2.0. use super::{Error, Result}; /// Memory-comparable encoding and decoding utility for bytes. pub struct MemComparableByteCodec; const MEMCMP_GROUP_SIZE: usize = 8; const MEMCMP_PAD_BYTE: u8 = 0; impl MemComparableByteCodec { /// Calculates the length of the value after encoding. #[inline] pub fn encoded_len(src_len: usize) -> usize { (src_len / MEMCMP_GROUP_SIZE + 1) * (MEMCMP_GROUP_SIZE + 1) } /// Encodes all bytes in the `src` into `dest` in ascending memory-comparable format. /// /// Returns the number of bytes encoded. /// /// `dest` must not overlaps `src`, otherwise encoded results will be incorrect. /// /// # Panics /// /// Panics if there is not enough space in `dest` for writing encoded bytes. /// /// You can calculate required space size via `encoded_len`. pub fn encode_all(src: &[u8], dest: &mut [u8]) -> usize { // Refer: https://github.com/facebook/mysql-5.6/wiki/MyRocks-record-format#memcomparable-format unsafe { let src_len = src.len(); let dest_len = dest.len(); assert!(dest_len >= Self::encoded_len(src_len)); let mut src_ptr = src.as_ptr(); let mut dest_ptr = dest.as_mut_ptr(); let src_ptr_end = src_ptr.add(src_len); // There must be 0 or more zero padding groups and 1 non-zero padding groups // in the output. let zero_padding_groups = src.len() / MEMCMP_GROUP_SIZE; // Let's first write these zero padding groups. for _ in 0..zero_padding_groups { std::ptr::copy_nonoverlapping(src_ptr, dest_ptr, MEMCMP_GROUP_SIZE); src_ptr = src_ptr.add(MEMCMP_GROUP_SIZE); dest_ptr = dest_ptr.add(MEMCMP_GROUP_SIZE); dest_ptr.write(!0); dest_ptr = dest_ptr.add(1); } // Then, write the last group, which should never be zero padding. let remaining_size = src_ptr_end.offset_from(src_ptr) as usize; let padding_size = MEMCMP_GROUP_SIZE - remaining_size; let padding_marker = !(padding_size as u8); std::ptr::copy_nonoverlapping(src_ptr, dest_ptr, remaining_size); std::ptr::write_bytes(dest_ptr.add(remaining_size), MEMCMP_PAD_BYTE, padding_size); dest_ptr = dest_ptr.add(MEMCMP_GROUP_SIZE); dest_ptr.write(padding_marker); (dest_ptr.offset_from(dest.as_mut_ptr()) + 1) as usize } } /// Performs in place bitwise NOT for specified memory region. /// /// # Panics /// /// Panics if `len` exceeds `src.len()`. #[inline] fn flip_bytes_in_place(src: &mut [u8], len: usize) { // This is already super efficient after compiling. // It is even faster than a manually written "flip by 64bit". for k in &mut src[0..len] { *k = !*k; } } /// Encodes all bytes in the `src` into `dest` in descending memory-comparable format. /// /// Returns the number of bytes encoded. /// /// `dest` must not overlaps `src`, otherwise encoded results will be incorrect. /// /// # Panics /// /// Panics if there is not enough space in `dest` for writing encoded bytes. /// /// You can calculate required space size via `encoded_len`. pub fn encode_all_desc(src: &[u8], dest: &mut [u8]) -> usize { let encoded_len = Self::encode_all(src, dest); Self::flip_bytes_in_place(dest, encoded_len); encoded_len } /// Decodes bytes in ascending memory-comparable format in the `src` into `dest`. /// /// If there are multiple encoded byte slices in `src`, only the first one will be decoded. /// /// Returns `(read_bytes, written_bytes)` where `read_bytes` is the number of bytes read in /// `src` and `written_bytes` is the number of bytes written in `dest`. /// /// Note that actual written data may be larger than `written_bytes`. Bytes more than /// `written_bytes` are junk and should be ignored. /// /// If `src == dest`, please use `try_decode_first_in_place`. /// /// # Panics /// /// Panics if `dest.len() < src.len()`, although actual written data may be less. /// /// When there is a panic, `dest` may contain partially written data. /// /// # Errors /// /// Returns `Error::UnexpectedEOF` if `src` is drained while expecting more data. /// /// Returns `Error::BadPadding` if padding in `src` is incorrect. /// /// When there is an error, `dest` may contain partially written data. pub fn try_decode_first(src: &[u8], dest: &mut [u8]) -> Result<(usize, usize)> { Self::try_decode_first_internal( src.as_ptr(), src.len(), dest.as_mut_ptr(), dest.len(), AscendingMemComparableCodecHelper, ) } /// Decodes bytes in descending memory-comparable format in the `src` into `dest`. /// /// If there are multiple encoded byte slices in `src`, only the first one will be decoded. /// /// Returns `(read_bytes, written_bytes)` where `read_bytes` is the number of bytes read in /// `src` and `written_bytes` is the number of bytes written in `dest`. /// /// Note that actual written data may be larger than `written_bytes`. Bytes more than /// `written_bytes` are junk and should be ignored. /// /// If `src == dest`, please use `try_decode_first_in_place_desc`. /// /// # Panics /// /// Panics if `dest.len() < src.len()`, although actual written data may be less. /// /// When there is a panic, `dest` may contain partially written data. /// /// # Errors /// /// Returns `Error::UnexpectedEOF` if `src` is drained while expecting more data. /// /// Returns `Error::BadPadding` if padding in `src` is incorrect. /// /// When there is an error, `dest` may contain partially written data. pub fn try_decode_first_desc(src: &[u8], dest: &mut [u8]) -> Result<(usize, usize)> { let (read_bytes, written_bytes) = Self::try_decode_first_internal( src.as_ptr(), src.len(), dest.as_mut_ptr(), dest.len(), DescendingMemComparableCodecHelper, )?; Self::flip_bytes_in_place(dest, written_bytes); Ok((read_bytes, written_bytes)) } /// Decodes bytes in ascending memory-comparable format in place, i.e. decoded data will /// overwrite the encoded data. /// /// If there are multiple encoded byte slices in `buffer`, only the first one will be decoded. /// /// Returns `(read_bytes, written_bytes)` where `read_bytes` is the number of bytes read /// and `written_bytes` is the number of bytes written. /// /// Note that actual written data may be larger than `written_bytes`. Bytes more than /// `written_bytes` are junk and should be ignored. /// /// # Errors /// /// Returns `Error::UnexpectedEOF` if `buffer` is drained while expecting more data. /// /// Returns `Error::BadPadding` if padding in `buffer` is incorrect. /// /// When there is an error, `dest` may contain partially written data. pub fn try_decode_first_in_place(buffer: &mut [u8]) -> Result<(usize, usize)> { Self::try_decode_first_internal( buffer.as_ptr(), buffer.len(), buffer.as_mut_ptr(), buffer.len(), AscendingMemComparableCodecHelper, ) } /// Decodes bytes in descending memory-comparable format in place, i.e. decoded data will /// overwrite the encoded data. /// /// If there are multiple encoded byte slices in `buffer`, only the first one will be decoded. /// /// Returns `(read_bytes, written_bytes)` where `read_bytes` is the number of bytes read /// and `written_bytes` is the number of bytes written. /// /// Note that actual written data may be larger than `written_bytes`. Bytes more than /// `written_bytes` are junk and should be ignored. /// /// # Errors /// /// Returns `Error::UnexpectedEOF` if `buffer` is drained while expecting more data. /// /// Returns `Error::BadPadding` if padding in `buffer` is incorrect. /// /// When there is an error, `dest` may contain partially written data. pub fn try_decode_first_in_place_desc(buffer: &mut [u8]) -> Result<(usize, usize)> { let (read_bytes, written_bytes) = Self::try_decode_first_internal( buffer.as_ptr(), buffer.len(), buffer.as_mut_ptr(), buffer.len(), DescendingMemComparableCodecHelper, )?; Self::flip_bytes_in_place(buffer, written_bytes); Ok((read_bytes, written_bytes)) } /// The internal implementation for: /// - `try_decode_first` /// - `try_decode_first_desc` /// - `try_decode_first_in_place` /// - `try_decode_first_in_place_desc` /// /// This function uses pointers to accept the scenario that `src == dest`. /// /// This function also uses generics to specialize different code path for ascending and /// descending decoding, which performs better than inlining a flag. /// /// Please refer to `try_decode_first` for the meaning of return values, panics and errors. #[inline] fn try_decode_first_internal<T: MemComparableCodecHelper>( mut src_ptr: *const u8, src_len: usize, mut dest_ptr: *mut u8, dest_len: usize, _helper: T, ) -> Result<(usize, usize)> { assert!(dest_len >= src_len); // Make copies for the original pointer for calculating filled / read bytes. let src_ptr_untouched = src_ptr; let dest_ptr_untouched = dest_ptr; unsafe { let src_ptr_end = src_ptr.add(src_len); loop { let src_ptr_next = src_ptr.add(MEMCMP_GROUP_SIZE + 1); if std::intrinsics::unlikely(src_ptr_next > src_ptr_end) { return Err(Error::UnexpectedEOF); } // Copy `MEMCMP_GROUP_SIZE` bytes any way. However we will truncate the returned // length according to padding size if it is the last block. std::ptr::copy(src_ptr, dest_ptr, MEMCMP_GROUP_SIZE); let padding_size = T::parse_padding_size(*src_ptr.add(MEMCMP_GROUP_SIZE)); src_ptr = src_ptr_next; dest_ptr = dest_ptr.add(MEMCMP_GROUP_SIZE); // If there is a padding, check whether or not it is correct. if std::intrinsics::unlikely(padding_size > 0) { // First check padding size. if std::intrinsics::unlikely(padding_size > MEMCMP_GROUP_SIZE) { return Err(Error::BadPadding); } // Then check padding content. Use `libc::memcmp` to compare two memory blocks // is faster than checking pad bytes one by one, since it will compare multiple // bytes at once. let base_padding_ptr = dest_ptr.sub(padding_size); let expected_padding_ptr = T::get_raw_padding_ptr(); let cmp_result = libc::memcmp( base_padding_ptr as *const libc::c_void, expected_padding_ptr as *const libc::c_void, padding_size, ); if std::intrinsics::unlikely(cmp_result != 0) { return Err(Error::BadPadding); } let read_bytes = src_ptr.offset_from(src_ptr_untouched) as usize; let written_bytes = dest_ptr.offset_from(dest_ptr_untouched) as usize - padding_size; return Ok((read_bytes, written_bytes)); } } } } } trait MemComparableCodecHelper { const PADDING: [u8; MEMCMP_GROUP_SIZE]; /// Given a raw padding size byte, interprets the padding size according to correct order. fn parse_padding_size(raw_marker: u8) -> usize; /// Returns a pointer to the raw padding bytes in 8 bytes for current ordering. fn get_raw_padding_ptr() -> *const u8; } struct AscendingMemComparableCodecHelper; struct DescendingMemComparableCodecHelper; impl MemComparableCodecHelper for AscendingMemComparableCodecHelper { const PADDING: [u8; MEMCMP_GROUP_SIZE] = [MEMCMP_PAD_BYTE; MEMCMP_GROUP_SIZE]; #[inline] fn parse_padding_size(raw_marker: u8) -> usize { (!raw_marker) as usize } #[inline] fn get_raw_padding_ptr() -> *const u8 { Self::PADDING.as_ptr() } } impl MemComparableCodecHelper for DescendingMemComparableCodecHelper { const PADDING: [u8; MEMCMP_GROUP_SIZE] = [!MEMCMP_PAD_BYTE; MEMCMP_GROUP_SIZE]; #[inline] fn parse_padding_size(raw_marker: u8) -> usize { raw_marker as usize } #[inline] fn get_raw_padding_ptr() -> *const u8 { Self::PADDING.as_ptr() } } #[cfg(test)] mod tests { use rand; use super::MemComparableByteCodec; #[test] fn test_memcmp_flip_bytes() { for container_len in 0..50 { for payload_begin in 0..container_len { for payload_end in payload_begin..container_len { let mut base_container: Vec<u8> = Vec::with_capacity(container_len); for _ in 0..container_len { base_container.push(rand::random()); } let mut container = base_container.clone(); MemComparableByteCodec::flip_bytes_in_place( &mut container.as_mut_slice()[payload_begin..], payload_end - payload_begin, ); // bytes before payload_begin should not flip for i in 0..payload_begin { assert_eq!(container[i], base_container[i]); } // bytes between payload_begin and payload_end should flip for i in payload_begin..payload_end { assert_eq!(container[i], !base_container[i]); } // bytes after payload_end should not flip for i in payload_end..container_len { assert_eq!(container[i], base_container[i]); } } } } } #[test] fn test_memcmp_encoded_len() { use super::MEMCMP_GROUP_SIZE as N; let cases = vec![ (0, N + 1), (N / 2, N + 1), (N - 1, N + 1), (N, 2 * (N + 1)), (N + 1, 2 * (N + 1)), (2 * N, 3 * (N + 1)), (2 * N + 1, 3 * (N + 1)), ]; for (src_len, encoded_len) in cases { assert_eq!(MemComparableByteCodec::encoded_len(src_len), encoded_len); } } #[test] fn test_memcmp_encode_all() { // Checks whether encoded result matches expectation. let cases = vec![ ( vec![], vec![0, 0, 0, 0, 0, 0, 0, 0, 247], vec![255, 255, 255, 255, 255, 255, 255, 255, 8], ), ( vec![0], vec![0, 0, 0, 0, 0, 0, 0, 0, 248], vec![255, 255, 255, 255, 255, 255, 255, 255, 7], ), ( vec![1, 2, 3], vec![1, 2, 3, 0, 0, 0, 0, 0, 250], vec![254, 253, 252, 255, 255, 255, 255, 255, 5], ), ( vec![1, 2, 3, 0], vec![1, 2, 3, 0, 0, 0, 0, 0, 251], vec![254, 253, 252, 255, 255, 255, 255, 255, 4], ), ( vec![1, 2, 3, 4, 5, 6, 7], vec![1, 2, 3, 4, 5, 6, 7, 0, 254], vec![254, 253, 252, 251, 250, 249, 248, 255, 1], ), ( vec![0, 0, 0, 0, 0, 0, 0, 0], vec![0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247], vec![ 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 255, 255, 255, 255, 255, 255, 255, 8, ], ), ( vec![1, 2, 3, 4, 5, 6, 7, 8], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247], vec![ 254, 253, 252, 251, 250, 249, 248, 247, 0, 255, 255, 255, 255, 255, 255, 255, 255, 8, ], ), ( vec![1, 2, 3, 4, 5, 6, 7, 8, 9], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 9, 0, 0, 0, 0, 0, 0, 0, 248], vec![ 254, 253, 252, 251, 250, 249, 248, 247, 0, 246, 255, 255, 255, 255, 255, 255, 255, 7, ], ), ]; for (src, expect_encoded_asc, expect_encoded_desc) in cases { let encoded_len = MemComparableByteCodec::encoded_len(src.len()); let buffer_len = encoded_len + 50; let mut base_buffer: Vec<u8> = Vec::with_capacity(buffer_len); for _ in 0..buffer_len { base_buffer.push(rand::random()); } for output_offset in 0..buffer_len { for output_slice_len in encoded_len..buffer_len - output_offset { // Test encode ascending let mut output_buffer = base_buffer.clone(); let output_len = MemComparableByteCodec::encode_all( src.as_slice(), &mut output_buffer.as_mut_slice() [output_offset..output_offset + output_slice_len], ); assert_eq!(output_len, encoded_len); assert_eq!(output_len, expect_encoded_asc.len()); // output buffer before output_offset should remain unchanged assert_eq!( &output_buffer[0..output_offset], &base_buffer[0..output_offset] ); // output buffer between output_offset and ..+encoded_len should be encoded assert_eq!( &output_buffer[output_offset..output_offset + encoded_len], expect_encoded_asc.as_slice() ); // output buffer after output_offset+encoded_len should remain unchanged assert_eq!( &output_buffer[output_offset + encoded_len..], &base_buffer[output_offset + encoded_len..] ); // Test encode descending let mut output_buffer = base_buffer.clone(); let output_len = MemComparableByteCodec::encode_all_desc( src.as_slice(), &mut output_buffer.as_mut_slice() [output_offset..output_offset + output_slice_len], ); assert_eq!(output_len, encoded_len); assert_eq!(output_len, expect_encoded_desc.len()); assert_eq!( &output_buffer[0..output_offset], &base_buffer[0..output_offset] ); assert_eq!( &output_buffer[output_offset..output_offset + encoded_len], expect_encoded_desc.as_slice() ); assert_eq!( &output_buffer[output_offset + encoded_len..], &base_buffer[output_offset + encoded_len..] ); } } } } #[test] fn test_memcmp_encode_all_panic() { let cases = vec![(0, 0), (0, 7), (0, 8), (7, 8), (8, 9), (8, 17)]; for (src_len, dest_len) in cases { let src = vec![0; src_len]; let mut dest = vec![0; dest_len]; let result = panic_hook::recover_safe(move || { let _ = MemComparableByteCodec::encode_all(src.as_slice(), dest.as_mut_slice()); }); assert!(result.is_err()); } } #[test] fn test_memcmp_try_decode_first() { use super::MEMCMP_GROUP_SIZE as N; // We have ensured correctness in `test_memcmp_encode_all`, so we use `encode_all` to // generate fixtures in different length, used for decoding. fn do_test( is_desc: bool, payload_len: usize, prefix_len: usize, suffix_len: usize, encoded_prefix_len: usize, encoded_suffix_len: usize, ) { let mut payload_raw: Vec<u8> = Vec::with_capacity(payload_len); for _ in 0..payload_len { payload_raw.push(rand::random()); } let encoded_len = MemComparableByteCodec::encoded_len(payload_len); let mut payload_encoded: Vec<u8> = vec![0; encoded_prefix_len + encoded_len + encoded_suffix_len]; #[allow(clippy::needless_range_loop)] for i in 0..encoded_prefix_len { payload_encoded[i] = rand::random(); } { let src = payload_raw.as_slice(); let dest = &mut payload_encoded.as_mut_slice()[encoded_prefix_len..]; if is_desc { MemComparableByteCodec::encode_all_desc(src, dest); } else { MemComparableByteCodec::encode_all(src, dest); } } #[allow(clippy::needless_range_loop)] for i in encoded_prefix_len + encoded_len..encoded_suffix_len { payload_encoded[i] = rand::random(); } let mut base_buffer: Vec<u8> = Vec::with_capacity(prefix_len + encoded_len + suffix_len); for _ in 0..prefix_len + encoded_len + encoded_suffix_len + suffix_len { base_buffer.push(rand::random()); } // Test `dest` doesn't overlap `src` let mut output_buffer = base_buffer.clone(); let output_len = { let src = &payload_encoded.as_slice()[encoded_prefix_len..]; let dest = &mut output_buffer.as_mut_slice()[prefix_len..]; if is_desc { MemComparableByteCodec::try_decode_first_desc(src, dest).unwrap() } else { MemComparableByteCodec::try_decode_first(src, dest).unwrap() } }; assert_eq!(output_len.0, encoded_len); assert_eq!(output_len.1, payload_len); assert_eq!(&output_buffer[0..prefix_len], &base_buffer[0..prefix_len]); assert_eq!( &output_buffer[prefix_len..prefix_len + payload_len], payload_raw.as_slice() ); // Although required space for output is encoded_len + encoded_suffix_len, // only first `encoded_len` bytes may be changed, so we only skip `encoded_len`. assert_eq!( &output_buffer[prefix_len + encoded_len..], &base_buffer[prefix_len + encoded_len..] ); // Test `dest` overlaps `src` let mut buffer = payload_encoded.clone(); let output_len = unsafe { let src_ptr = buffer.as_mut_ptr().add(encoded_prefix_len); let slice_len = buffer.len() - encoded_prefix_len; let src = std::slice::from_raw_parts(src_ptr, slice_len); let dest = std::slice::from_raw_parts_mut(src_ptr, slice_len); if is_desc { MemComparableByteCodec::try_decode_first_desc(src, dest).unwrap() } else { MemComparableByteCodec::try_decode_first(src, dest).unwrap() } }; assert_eq!(output_len.0, encoded_len); assert_eq!(output_len.1, payload_len); assert_eq!( &buffer[0..encoded_prefix_len], &payload_encoded[0..encoded_prefix_len] ); assert_eq!( &buffer[encoded_prefix_len..encoded_prefix_len + payload_len], payload_raw.as_slice() ); assert_eq!( &buffer[encoded_prefix_len + encoded_len..], &payload_encoded[encoded_prefix_len + encoded_len..] ); } // Whether it is descending order for is_desc in &[false, true] { // How long is the raw value for payload_len in &[ 0, 1, N - 1, N, N + 1, N * 2 - 1, N * 2, N * 2 + 1, N * 3 - 1, N * 3, N * 3 + 1, ] { // How long is the prefix prepended before the output slice for prefix_len in &[0, 1, N - 1, N, N + 1] { // How long is the suffix appended after the output slice for suffix_len in &[0, 1, N - 1, N, N + 1] { // How long is the prefix prepended before the encoded slice. // Used in overlap tests. for encoded_prefix_len in &[0, 1, N - 1, N, N + 1] { // How long is the suffix appended after the encoded slice to simulate // extra data. Decoding should ignore these extra data in src. for encoded_suffix_len in &[0, 1, N - 1, N, N + 1] { do_test( *is_desc, *payload_len, *prefix_len, *suffix_len, *encoded_prefix_len, *encoded_suffix_len, ); } } } } } } } #[test] fn test_memcmp_try_decode_first_error() { let cases = vec![ vec![1, 2, 3, 4], vec![0, 0, 0, 0, 0, 0, 0, 247], vec![0, 0, 0, 0, 0, 0, 0, 0, 246], vec![0, 0, 0, 0, 0, 0, 0, 1, 247], vec![1, 2, 3, 4, 5, 6, 7, 8, 0], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 1], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8, 255], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 2, 3, 4, 5, 6, 7, 8, 0], vec![1, 2, 3, 4, 5, 6, 7, 8, 255, 1, 0, 0, 0, 0, 0, 0, 0, 247], ]; for invalid_src in cases { let mut dest = vec![0; invalid_src.len()]; let result = MemComparableByteCodec::try_decode_first( invalid_src.as_slice(), dest.as_mut_slice(), ); assert!(result.is_err()); } } #[test] fn test_memcmp_try_decode_first_panic() { let cases = vec![ vec![0, 0, 0, 0, 0, 0, 0, 0, 247], vec![1, 2, 3, 4, 5, 6, 7, 0, 254], vec![0, 0, 0, 0, 0, 0, 0, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 247], ]; for src in cases { { let src = src.clone(); let mut dest = vec![0; src.len() - 1]; let result = panic_hook::recover_safe(move || { let _ = MemComparableByteCodec::try_decode_first( src.as_slice(), dest.as_mut_slice(), ); }); assert!(result.is_err()); } { let mut dest = vec![0; src.len()]; MemComparableByteCodec::try_decode_first(src.as_slice(), dest.as_mut_slice()) .unwrap(); } } } #[test] fn test_memcmp_compare() { use std::cmp::Ordering; let cases: Vec<(&[u8], &[u8], _)> = vec![ (b"", b"\x00", Ordering::Less), (b"\x00", b"\x00", Ordering::Equal), (b"\xFF", b"\x00", Ordering::Greater), (b"\xFF", b"\xFF\x00", Ordering::Less), (b"a", b"b", Ordering::Less), (b"a", b"\x00", Ordering::Greater), (b"\x00", b"\x01", Ordering::Less), (b"\x00\x01", b"\x00\x00", Ordering::Greater), (b"\x00\x00\x00", b"\x00\x00", Ordering::Greater), (b"\x00\x00\x00", b"\x00\x00", Ordering::Greater), ( b"\x00\x00\x00\x00\x00\x00\x00\x00", b"\x00\x00\x00\x00\x00\x00\x00\x00\x00", Ordering::Less, ), (b"\x01\x02\x03\x00", b"\x01\x02\x03", Ordering::Greater), (b"\x01\x03\x03\x04", b"\x01\x03\x03\x05", Ordering::Less), ( b"\x01\x02\x03\x04\x05\x06\x07", b"\x01\x02\x03\x04\x05\x06\x07\x08", Ordering::Less, ), ( b"\x01\x02\x03\x04\x05\x06\x07\x08\x09", b"\x01\x02\x03\x04\x05\x06\x07\x08", Ordering::Greater, ), ( b"\x01\x02\x03\x04\x05\x06\x07\x08\x00", b"\x01\x02\x03\x04\x05\x06\x07\x08", Ordering::Greater, ), ]; fn encode_asc(src: &[u8]) -> Vec<u8> { let mut buf = vec![0; MemComparableByteCodec::encoded_len(src.len())]; let encoded = MemComparableByteCodec::encode_all(src, buf.as_mut_slice()); assert_eq!(encoded, buf.len()); buf } fn encode_desc(src: &[u8]) -> Vec<u8> { let mut buf = vec![0; MemComparableByteCodec::encoded_len(src.len())]; let encoded = MemComparableByteCodec::encode_all_desc(src, buf.as_mut_slice()); assert_eq!(encoded, buf.len()); buf } for (x, y, ord) in cases { assert_eq!(x.cmp(y), ord); assert_eq!(encode_asc(x).cmp(&encode_asc(y)), ord); assert_eq!(encode_desc(x).cmp(&encode_desc(y)), ord.reverse()); } } } #[cfg(test)] mod benches { use crate::test; /// A naive implementation of encoding in mem-comparable format. /// It does not process non zero-padding groups separately. fn mem_comparable_encode_all_naive(src: &[u8], dest: &mut [u8]) -> usize { unsafe { let mut src_ptr = src.as_ptr(); let mut dest_ptr = dest.as_mut_ptr(); let src_ptr_end = src_ptr.add(src.len()); let dest_ptr_end = dest_ptr.add(dest.len()); while src_ptr <= src_ptr_end { // We needs to write GROUP_SIZE + 1 bytes then, so we assert a bound. assert!( dest_ptr.add(super::MEMCMP_GROUP_SIZE + 1) <= dest_ptr_end, "dest out of bound" ); let remaining_size = src_ptr_end.offset_from(src_ptr) as usize; let padding_size; if remaining_size > super::MEMCMP_GROUP_SIZE { padding_size = 0; std::ptr::copy_nonoverlapping(src_ptr, dest_ptr, super::MEMCMP_GROUP_SIZE); } else { padding_size = super::MEMCMP_GROUP_SIZE - remaining_size; std::ptr::copy_nonoverlapping(src_ptr, dest_ptr, remaining_size); std::ptr::write_bytes( dest_ptr.add(remaining_size), super::MEMCMP_PAD_BYTE, padding_size, ); } src_ptr = src_ptr.add(super::MEMCMP_GROUP_SIZE); dest_ptr = dest_ptr.add(super::MEMCMP_GROUP_SIZE); let padding_marker = !(padding_size as u8); dest_ptr.write(padding_marker); dest_ptr = dest_ptr.add(1); } dest_ptr.offset_from(dest.as_mut_ptr()) as usize } } const ENC_GROUP_SIZE: usize = 8; const ENC_MARKER: u8 = b'\xff'; const ENC_ASC_PADDING: [u8; ENC_GROUP_SIZE] = [0; ENC_GROUP_SIZE]; const ENC_DESC_PADDING: [u8; ENC_GROUP_SIZE] = [!0; ENC_GROUP_SIZE]; /// The original implementation of `encode_bytes` in TiKV. trait OldBytesEncoder: std::io::Write { fn encode_bytes(&mut self, key: &[u8], desc: bool) -> super::Result<()> { let len = key.len(); let mut index = 0; let mut buf = [0; ENC_GROUP_SIZE]; while index <= len { let remain = len - index; let mut pad: usize = 0; if remain > ENC_GROUP_SIZE { self.write_all(adjust_bytes_order( &key[index..index + ENC_GROUP_SIZE], desc, &mut buf, )) .map_err(|_| super::Error::BufferTooSmall)?; } else { pad = ENC_GROUP_SIZE - remain; self.write_all(adjust_bytes_order(&key[index..], desc, &mut buf)) .map_err(|_| super::Error::BufferTooSmall)?; if desc { self.write_all(&ENC_DESC_PADDING[..pad]) .map_err(|_| super::Error::BufferTooSmall)?; } else { self.write_all(&ENC_ASC_PADDING[..pad]) .map_err(|_| super::Error::BufferTooSmall)?; } } self.write_all(adjust_bytes_order( &[ENC_MARKER - (pad as u8)], desc, &mut buf, )) .map_err(|_| super::Error::BufferTooSmall)?; index += ENC_GROUP_SIZE; } Ok(()) } } fn adjust_bytes_order<'a>(bs: &'a [u8], desc: bool, buf: &'a mut [u8]) -> &'a [u8] { if desc { let mut buf_idx = 0; for &b in bs { buf[buf_idx] = !b; buf_idx += 1; } &buf[..buf_idx] } else { bs } } impl<T: std::io::Write> OldBytesEncoder for T {} /// The original implementation of `decode_bytes` in TiKV. fn original_decode_bytes(data: &mut &[u8], desc: bool) -> super::Result<Vec<u8>> { use std::io::Write; let mut key = Vec::with_capacity(data.len() / (ENC_GROUP_SIZE + 1) * ENC_GROUP_SIZE); let mut offset = 0; let chunk_len = ENC_GROUP_SIZE + 1; loop { // everytime make ENC_GROUP_SIZE + 1 elements as a decode unit let next_offset = offset + chunk_len; let chunk = if next_offset <= data.len() { &data[offset..next_offset] } else { return Err(super::Error::UnexpectedEOF); }; offset = next_offset; // the last byte in decode unit is for marker which indicates pad size let (&marker, bytes) = chunk.split_last().unwrap(); let pad_size = if desc { marker as usize } else { (ENC_MARKER - marker) as usize }; // no padding, just push 8 bytes if pad_size == 0 { key.write_all(bytes).unwrap(); continue; } if pad_size > ENC_GROUP_SIZE { return Err(super::Error::BadPadding); } // if has padding, split the padding pattern and push rest bytes let (bytes, padding) = bytes.split_at(ENC_GROUP_SIZE - pad_size); key.write_all(bytes).unwrap(); let pad_byte = if desc { !0 } else { 0 }; // check the padding pattern whether validate or not if padding.iter().any(|x| *x != pad_byte) { return Err(super::Error::BadPadding); } if desc { for k in &mut key { *k = !*k; } } // data will point to following unencoded bytes, maybe timestamp *data = &data[offset..]; return Ok(key); } } /// The original implementation of `decode_bytes_in_place` in TiKV. fn original_decode_bytes_in_place(data: &mut Vec<u8>, desc: bool) -> super::Result<()> { let mut write_offset = 0; let mut read_offset = 0; loop { let marker_offset = read_offset + ENC_GROUP_SIZE; if marker_offset >= data.len() { return Err(super::Error::UnexpectedEOF); }; unsafe { // it is semantically equivalent to C's memmove() // and the src and dest may overlap // if src == dest do nothing std::ptr::copy( data.as_ptr().add(read_offset), data.as_mut_ptr().add(write_offset), ENC_GROUP_SIZE, ); } write_offset += ENC_GROUP_SIZE; // everytime make ENC_GROUP_SIZE + 1 elements as a decode unit read_offset += ENC_GROUP_SIZE + 1; // the last byte in decode unit is for marker which indicates pad size let marker = data[marker_offset]; let pad_size = if desc { marker as usize } else { (ENC_MARKER - marker) as usize }; if pad_size > 0 { if pad_size > ENC_GROUP_SIZE { return Err(super::Error::BadPadding); } // check the padding pattern whether validate or not let padding_slice = if desc { &ENC_DESC_PADDING[..pad_size] } else { &ENC_ASC_PADDING[..pad_size] }; if &data[write_offset - pad_size..write_offset] != padding_slice { return Err(super::Error::BadPadding); } unsafe { data.set_len(write_offset - pad_size); } if desc { for k in data { *k = !*k; } } return Ok(()); } } } #[bench] fn bench_memcmp_encode_all_asc_small(b: &mut test::Bencher) { let src = [b'x'; 100]; let mut dest = [0; 200]; b.iter(|| { let encoded = super::MemComparableByteCodec::encode_all( test::black_box(&src), test::black_box(&mut dest), ); test::black_box(encoded); test::black_box(&dest); }); } #[bench] fn bench_memcmp_encode_all_desc_small(b: &mut test::Bencher) { let src = [b'x'; 100]; let mut dest = [0; 200]; b.iter(|| { let encoded = super::MemComparableByteCodec::encode_all_desc( test::black_box(&src), test::black_box(&mut dest), ); test::black_box(encoded); test::black_box(&dest); }); } #[bench] fn bench_memcmp_encode_all_asc_small_naive(b: &mut test::Bencher) { let src = [b'x'; 100]; let mut dest = [0; 200]; b.iter(|| { let encoded = mem_comparable_encode_all_naive(test::black_box(&src), test::black_box(&mut dest)); test::black_box(encoded); test::black_box(&dest); }); } #[bench] fn bench_memcmp_encode_all_asc_large(b: &mut test::Bencher) { let src = [b'x'; 1000]; let mut dest = [0; 2000]; b.iter(|| { let encoded = super::MemComparableByteCodec::encode_all( test::black_box(&src), test::black_box(&mut dest), ); test::black_box(encoded); test::black_box(&dest); }); } #[bench] fn bench_memcmp_encode_all_asc_large_naive(b: &mut test::Bencher) { let src = [b'x'; 1000]; let mut dest = [0; 2000]; b.iter(|| { let encoded = mem_comparable_encode_all_naive(test::black_box(&src), test::black_box(&mut dest)); test::black_box(encoded); test::black_box(&dest); }); } #[bench] fn bench_memcmp_encode_all_desc_large(b: &mut test::Bencher) { let src = [b'x'; 1000]; let mut dest = [0; 2000]; b.iter(|| { let encoded = super::MemComparableByteCodec::encode_all_desc( test::black_box(&src), test::black_box(&mut dest), ); test::black_box(encoded); test::black_box(&dest); }); } #[bench] fn bench_memcmp_encode_all_asc_large_original(b: &mut test::Bencher) { let src = [b'x'; 1000]; let mut dest: Vec<u8> = Vec::with_capacity(2000); b.iter(|| { let dest = test::black_box(&mut dest); dest.encode_bytes(test::black_box(&src), test::black_box(false)) .unwrap(); test::black_box(&dest); unsafe { dest.set_len(0) }; }); } #[bench] fn bench_memcmp_encode_all_desc_large_original(b: &mut test::Bencher) { let src = [b'x'; 1000]; let mut dest: Vec<u8> = Vec::with_capacity(2000); b.iter(|| { let dest = test::black_box(&mut dest); dest.encode_bytes(test::black_box(&src), test::black_box(true)) .unwrap(); test::black_box(&dest); unsafe { dest.set_len(0) }; }); } #[bench] fn bench_memcmp_decode_first_asc_large(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all(&raw, encoded.as_mut_slice()); let mut decoded = vec![0; encoded.len()]; b.iter(|| { let src = test::black_box(&encoded).as_slice(); let dest = test::black_box(&mut decoded).as_mut_slice(); super::MemComparableByteCodec::try_decode_first(src, dest).unwrap(); test::black_box(&dest); }); } #[bench] fn bench_memcmp_decode_first_desc_large(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all_desc(&raw, encoded.as_mut_slice()); let mut decoded = vec![0; encoded.len()]; b.iter(|| { let src = test::black_box(&encoded).as_slice(); let dest = test::black_box(&mut decoded).as_mut_slice(); super::MemComparableByteCodec::try_decode_first_desc(src, dest).unwrap(); test::black_box(&dest); }); } #[bench] fn bench_memcmp_decode_first_asc_large_original(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all(&raw, encoded.as_mut_slice()); b.iter(|| { let mut data = test::black_box(&encoded).as_slice(); let decoded = original_decode_bytes(&mut data, test::black_box(false)).unwrap(); test::black_box(&data); test::black_box(decoded); }); } #[bench] fn bench_memcmp_decode_first_desc_large_original(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all_desc(&raw, encoded.as_mut_slice()); b.iter(|| { let mut data = test::black_box(&encoded).as_slice(); let decoded = original_decode_bytes(&mut data, test::black_box(true)).unwrap(); test::black_box(&data); test::black_box(decoded); }); } #[bench] fn bench_memcmp_decode_first_in_place_asc_large(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all(&raw, encoded.as_mut_slice()); b.iter(|| { let mut encoded = test::black_box(encoded.clone()); let src = encoded.as_mut_slice(); super::MemComparableByteCodec::try_decode_first_in_place(src).unwrap(); test::black_box(&src); }); } #[bench] fn bench_memcmp_decode_first_in_place_desc_large(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all_desc(&raw, encoded.as_mut_slice()); b.iter(|| { let mut encoded = test::black_box(encoded.clone()); let src = encoded.as_mut_slice(); super::MemComparableByteCodec::try_decode_first_in_place_desc(src).unwrap(); test::black_box(&src); }); } #[bench] fn bench_memcmp_decode_first_in_place_asc_large_original(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all(&raw, encoded.as_mut_slice()); b.iter(|| { let mut encoded = test::black_box(encoded.clone()); original_decode_bytes_in_place(&mut encoded, test::black_box(false)).unwrap(); test::black_box(&encoded); }); } #[bench] fn bench_memcmp_decode_first_in_place_desc_large_original(b: &mut test::Bencher) { let raw = [b'x'; 1000]; let mut encoded = vec![0; super::MemComparableByteCodec::encoded_len(1000)]; super::MemComparableByteCodec::encode_all_desc(&raw, encoded.as_mut_slice()); b.iter(|| { let mut encoded = test::black_box(encoded.clone()); original_decode_bytes_in_place(&mut encoded, test::black_box(true)).unwrap(); test::black_box(&encoded); }); } }
39.01405
103
0.518631
f50f0ab4e521453e68cdb22dd9d185f52a1dfda5
39
pub mod day; pub mod gbbq; pub mod lc;
9.75
13
0.692308
dd868d0010fadae6445bfc1f313349ef316a620a
7,066
#[macro_use] extern crate criterion; // TODO: Should be using black_box for constant inputs //use criterion::black_box; use criterion::Criterion; use ndarray::Array2; use rand::prelude::*; use rand::Rng; #[cfg(target_arch = "x86_64")] use fast_linear_estimator::matrix_avx::MatrixF32; #[cfg(target_arch = "aarch64")] use fast_linear_estimator::matrix_arm::MatrixF32; // sizes const NUM_INPUT: usize = 20; const NUM_OUTPUT: usize = 20; const NUM_INPUT_SETS: usize = 250; fn bench_logistic(crit: &mut Criterion) { // build random input sets let mut rnd = rand::thread_rng(); let input_sets: Vec<Vec<f32>> = std::iter::repeat_with(|| { std::iter::repeat_with(|| rnd.gen_range(-5.0 * 0.5, 5.0 * 0.5)) .take(NUM_INPUT) .collect() }) .take(NUM_INPUT_SETS) .collect(); // coefficients let coeff_min: f32 = -0.05; let coeff_max: f32 = 0.05; let intercept_min: f32 = -0.01; let intercept_max: f32 = 0.01; // build random coefficients -- row major order; // inputs are rows sequential; outputs are columns stride let mut coeff = [[0f32; NUM_INPUT]; NUM_OUTPUT]; let mut coeff_transpose = [[0f32; NUM_OUTPUT]; NUM_INPUT]; let mut coeff_nd = Array2::<f32>::zeros((NUM_OUTPUT, NUM_INPUT)); let mut coeff_nd_transpose = Array2::<f32>::zeros((NUM_INPUT, NUM_OUTPUT)); for ip in 0..NUM_INPUT { for op in 0..NUM_OUTPUT { let v = rnd.gen_range(coeff_min, coeff_max); // normal arrays coeff[op][ip] = v; coeff_transpose[ip][op] = v; // ndarray coeff_nd[[op, ip]] = v; coeff_nd_transpose[[ip, op]] = v; } } let mut intercepts = [0f32; NUM_OUTPUT]; for op in 0..NUM_OUTPUT { let v = rnd.gen_range(intercept_min, intercept_max); intercepts[op] = v; } crit.bench_function("choose-input", |b| { b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); input[0] }) }); // MatrixF32 benchmark { let vec_coeff: Vec<Vec<f32>> = coeff_transpose.iter().map(|r| r.to_vec()).collect(); let mat = MatrixF32::create_from_rows(&vec_coeff, &intercepts).unwrap(); // copied to f32 output directly let mut output_f32 = vec![0f32; mat.num_columns]; crit.bench_function("matrix-product", |b| { b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); let some = mat.product(&input, &mut output_f32); assert!(some.is_some()); output_f32[0] }) }); // copied to f32 output directly let mut output_f32 = vec![0f32; mat.num_columns]; crit.bench_function("matrix-softmax", |b| { b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); let some = mat.product_softmax_cumulative_approx(&input, &mut output_f32); assert!(some.is_some()); output_f32[0] }) }); } // directly implemented with iterators { // directly implemented with iterators // note: this is misleadingly fast; it relies on the fact that the dimensions // are constant, and the compiler takes advantage of this. // performance is a lot lower with variable input sizes :) // // the MatrixAvxF32 accepts different dimensions. This does not, essentially. // crit.bench_function("matrix-direct-product", |b| { b.iter(|| { let a = input_sets.iter().choose(&mut rnd).unwrap(); let mut r = [0.0; NUM_OUTPUT]; // matrix mult for j in 0..NUM_OUTPUT { r[j] = a .iter() .zip(coeff[j].iter()) .map(|(u, v)| u * v) .sum::<f32>(); } r[0] }) }); crit.bench_function("matrix-direct-softmax", |b| { b.iter(|| { let input_index = rnd.gen_range(0, NUM_INPUT_SETS); let a = &(input_sets[input_index]); let mut r = [0.0; NUM_OUTPUT]; // matrix mult for j in 0..NUM_OUTPUT { r[j] = a .iter() .zip(coeff[j].iter()) .map(|(u, v)| u * v) .sum::<f32>(); } // softmax with normalise r.iter_mut().for_each(|v| *v = f32::exp(*v)); let mut cumulative = 0f32; for v in r.iter_mut() { cumulative += *v; *v = cumulative; } r[0] }) }); } // ndarray benchmark { use ndarray::*; // without allocation crit.bench_function("ndarray-setup-input-view", |b| { b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); let a = ArrayView1::from(input); a[0] }) }); // you're still paying for allocation from `dot` crit.bench_function("ndarray-product", |b| { b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); let a = ArrayView1::from(input); let res = coeff_nd.dot(&a); res[0] }) }); // you're still paying for allocation from `dot` crit.bench_function("ndarray-product-transposed", |b| { b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); let a = ArrayView1::from(input); let res = &a.dot(&coeff_nd_transpose); res[0] }) }); // using an iterator crit.bench_function("ndarray-product-transposed-iterators", |b| { // pre-allocated and reusable result let mut res = ndarray::Array1::<f32>::zeros((NUM_OUTPUT,)); b.iter(|| { let input = input_sets.iter().choose(&mut rnd).unwrap(); let a = ArrayView1::from(input); res.fill(0.0); Zip::from(coeff_nd_transpose.genrows()) .and(&a) .apply(|cf,inp| { Zip::from(cf) .and(&mut res) .apply(|cc,rr| { *rr = cc * inp; }); }); res[0] }) }); } } // long form, with samples specified criterion_group! { name = benches; config = Criterion::default().sample_size(10); targets = //example_benchmark, bench_logistic } criterion_main!(benches);
31.686099
92
0.491792