hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
d6c405572102718d0cb82b01bdfa1080bc97cfac
504
use std::env; use std::process; fn main() { let args: Vec<String> = env::args().collect(); if args.len() != 2 { eprintln!("Usage: {} <hex u32>", args[0]); process::exit(1); } let op = u32::from_str_radix(args[1].trim_start_matches("0x"), 16) .expect(&format!("Could not parse {} as hex u32", args[1])); let decoded = bad64::decode(op, 0x1000).expect(&format!("Could not decode {:#x}", op)); println!("{:#x?}", decoded); println!("{}", decoded); }
25.2
91
0.551587
23d0b177c78ab46b16a3665cdb11971aa3db7c37
2,989
//! A utility for cross compiling binaries using Cross use std::path::{Path, PathBuf}; use std::process::{self, Command}; use std::{env, fs}; use clap::Parser; #[derive(Parser, Debug)] struct Options { /// The path to an artifacts directory expecting to contain directories /// named after platform tripes with binaries inside. #[clap(long)] pub output: PathBuf, /// A url prefix where the artifacts can be found #[clap(long)] pub target: String, } /// This function is required until an upstream PR lands /// https://github.com/rust-embedded/cross/pull/597 fn prepare_workspace(workspace_root: &Path) { let src = PathBuf::from(env!("CROSS_CONFIG")); let dest = workspace_root.join("Cross.toml"); println!("{:?} -> {:?}", src, dest); fs::copy(src, dest).unwrap(); // Unfortunately, cross runs into issues when cross compiling incramentally. // To avoid this, the workspace must be cleaned let cargo = env::current_dir().unwrap().join(env!("CARGO")); Command::new(cargo) .current_dir(workspace_root) .arg("clean") .status() .unwrap(); } /// Execute a build for the provided platform fn execute_cross(working_dir: &Path, target_triple: &str) { let cross = env::current_dir().unwrap().join(env!("CROSS_BIN")); let status = Command::new(cross) .current_dir(working_dir) .arg("build") .arg("--release") .arg("--locked") .arg("--bin") .arg("cargo-bazel") .arg(format!("--target={}", target_triple)) .status() .unwrap(); if !status.success() { process::exit(status.code().unwrap_or(1)); } } /// Install results to the output directory fn install_outputs(working_dir: &Path, triple: &str, output_dir: &Path) { let is_windows_target = triple.contains("windows"); let binary_name = if is_windows_target { "cargo-bazel.exe" } else { "cargo-bazel" }; // Since we always build from the workspace root, and the output // is always expected to be `./target/{triple}`, we build a path // to the expected output and write it. let artifact = working_dir .join("target") .join(triple) .join("release") .join(binary_name); let dest = output_dir.join(triple).join(binary_name); fs::create_dir_all(dest.parent().unwrap()).unwrap(); fs::rename(artifact, &dest).unwrap(); println!("Installed: {}", dest.display()); } fn main() { let opt = Options::parse(); // Locate the workspace root let workspace_root = PathBuf::from( env::var("BUILD_WORKSPACE_DIRECTORY") .expect("cross_installer is designed to run under Bazel"), ) .join("crate_universe"); // Do some setup prepare_workspace(&workspace_root); // Build the binary execute_cross(&workspace_root, &opt.target); // Install the results install_outputs(&workspace_root, &opt.target, &opt.output); }
29.594059
80
0.630646
1a73590c07cc0307105e30ddfc332330e4be2eeb
3,206
#[doc = "Reader of register EVENTS_ERRORECB"] pub type R = crate::R<u32, super::EVENTS_ERRORECB>; #[doc = "Writer for register EVENTS_ERRORECB"] pub type W = crate::W<u32, super::EVENTS_ERRORECB>; #[doc = "Register EVENTS_ERRORECB `reset()`'s with value 0"] impl crate::ResetValue for super::EVENTS_ERRORECB { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "ECB block encrypt aborted because of a STOPECB task or due to an error\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum EVENTS_ERRORECB_A { #[doc = "0: Event not generated"] NOTGENERATED = 0, #[doc = "1: Event generated"] GENERATED = 1, } impl From<EVENTS_ERRORECB_A> for bool { #[inline(always)] fn from(variant: EVENTS_ERRORECB_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `EVENTS_ERRORECB`"] pub type EVENTS_ERRORECB_R = crate::R<bool, EVENTS_ERRORECB_A>; impl EVENTS_ERRORECB_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> EVENTS_ERRORECB_A { match self.bits { false => EVENTS_ERRORECB_A::NOTGENERATED, true => EVENTS_ERRORECB_A::GENERATED, } } #[doc = "Checks if the value of the field is `NOTGENERATED`"] #[inline(always)] pub fn is_not_generated(&self) -> bool { *self == EVENTS_ERRORECB_A::NOTGENERATED } #[doc = "Checks if the value of the field is `GENERATED`"] #[inline(always)] pub fn is_generated(&self) -> bool { *self == EVENTS_ERRORECB_A::GENERATED } } #[doc = "Write proxy for field `EVENTS_ERRORECB`"] pub struct EVENTS_ERRORECB_W<'a> { w: &'a mut W, } impl<'a> EVENTS_ERRORECB_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EVENTS_ERRORECB_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Event not generated"] #[inline(always)] pub fn not_generated(self) -> &'a mut W { self.variant(EVENTS_ERRORECB_A::NOTGENERATED) } #[doc = "Event generated"] #[inline(always)] pub fn generated(self) -> &'a mut W { self.variant(EVENTS_ERRORECB_A::GENERATED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 0 - ECB block encrypt aborted because of a STOPECB task or due to an error"] #[inline(always)] pub fn events_errorecb(&self) -> EVENTS_ERRORECB_R { EVENTS_ERRORECB_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - ECB block encrypt aborted because of a STOPECB task or due to an error"] #[inline(always)] pub fn events_errorecb(&mut self) -> EVENTS_ERRORECB_W { EVENTS_ERRORECB_W { w: self } } }
31.431373
102
0.608858
ff9fc9f0d23bdcdc9565608ca3b1653063898bd7
2,173
use std::collections::HashMap; use serde::{Deserialize, Serialize}; use url::{form_urlencoded, Url}; use crate::error::Result; const DEFAULT_PAGE_SIZE: u32 = 15; pub struct PageOptions { params: HashMap<&'static str, String>, } impl PageOptions { fn new() -> Self { PageOptions { params: Default::default(), } } pub fn builder() -> PageOptionsBuilder { PageOptionsBuilder::new() } pub fn serialize(&self) -> Option<String> { if self.params.is_empty() { None } else { let encoded: String = form_urlencoded::Serializer::new(String::new()) .extend_pairs(&self.params) .finish(); Some(encoded) } } } impl Default for PageOptions { fn default() -> Self { Self::builder().page_size(DEFAULT_PAGE_SIZE).build() } } /// a mutable page builder pub struct PageOptionsBuilder(PageOptions); impl PageOptionsBuilder { fn new() -> Self { PageOptionsBuilder(PageOptions::new()) } pub fn page_size(&mut self, n: u32) -> &mut Self { self.0 .params .insert("linked_partitioning", "true".to_string()); self.0.params.insert("page_size", n.to_string()); self } pub fn build(&self) -> PageOptions { PageOptions { params: self.0.params.clone(), } } } /// Paginated response #[derive(Serialize, Deserialize, Debug)] pub struct Page<T> { /// The collection pub collection: Vec<T>, /// The url to the next page of results pub next_href: Option<String>, } impl<T> Page<T> { pub fn next_query(&self) -> Result<Option<HashMap<String, String>>> { if self.next_href.is_none() { return Ok(None); } let url = Url::parse(self.next_href.as_ref().unwrap())?; let next_query: HashMap<String, String> = url.query_pairs().into_owned().collect(); match next_query.is_empty() { true => Ok(None), false => Ok(Some(next_query)), } } pub fn is_empty(&self) -> bool { self.collection.is_empty() } }
23.365591
91
0.57156
798845dd5568d74695db0b660ecafc7c25b63068
16,874
// Copyright 2019 TiKV Project Authors. Licensed under Apache-2.0. use std::cmp::Ordering; use std::convert::TryFrom; use tidb_query_codegen::AggrFunction; use tidb_query_datatype::{Collation, EvalType, FieldTypeAccessor}; use tipb::{Expr, ExprType, FieldType}; use crate::codec::collation::*; use crate::codec::data_type::*; use crate::expr::EvalContext; use crate::rpn_expr::{RpnExpression, RpnExpressionBuilder}; use crate::Result; /// A trait for MAX/MIN aggregation functions pub trait Extremum: Clone + std::fmt::Debug + Send + Sync + 'static { const TP: ExprType; const ORD: Ordering; } #[derive(Debug, Clone, Copy)] pub struct Max; #[derive(Debug, Clone, Copy)] pub struct Min; impl Extremum for Max { const TP: ExprType = ExprType::Max; const ORD: Ordering = Ordering::Less; } impl Extremum for Min { const TP: ExprType = ExprType::Min; const ORD: Ordering = Ordering::Greater; } /// The parser for `MAX/MIN` aggregate functions. pub struct AggrFnDefinitionParserExtremum<T: Extremum>(std::marker::PhantomData<T>); impl<T: Extremum> AggrFnDefinitionParserExtremum<T> { pub fn new() -> Self { Self(std::marker::PhantomData) } } impl<T: Extremum> super::AggrDefinitionParser for AggrFnDefinitionParserExtremum<T> { fn check_supported(&self, aggr_def: &Expr) -> Result<()> { assert_eq!(aggr_def.get_tp(), T::TP); super::util::check_aggr_exp_supported_one_child(aggr_def) } fn parse( &self, mut aggr_def: Expr, ctx: &mut EvalContext, // We use the same structure for all data types, so this parameter is not needed. src_schema: &[FieldType], out_schema: &mut Vec<FieldType>, out_exp: &mut Vec<RpnExpression>, ) -> Result<Box<dyn super::AggrFunction>> { assert_eq!(aggr_def.get_tp(), T::TP); let child = aggr_def.take_children().into_iter().next().unwrap(); let eval_type = EvalType::try_from(child.get_field_type().as_accessor().tp()).unwrap(); let out_ft = aggr_def.take_field_type(); let out_et = box_try!(EvalType::try_from(out_ft.as_accessor().tp())); let out_coll = box_try!(out_ft.as_accessor().collation()); if out_et != eval_type { return Err(other_err!( "Unexpected return field type {}", out_ft.as_accessor().tp() )); } // `MAX/MIN` outputs one column which has the same type with its child out_schema.push(out_ft); out_exp.push(RpnExpressionBuilder::build_from_expr_tree( child, ctx, src_schema.len(), )?); if out_et == EvalType::Bytes { return match_template_collator! { C, match out_coll { Collation::C => Ok(Box::new(AggFnExtremumForBytes::<C, T>::new())) } }; } match_template_evaluable! { TT, match eval_type { EvalType::TT => Ok(Box::new(AggFnExtremum::<TT, T>::new())) } } } } #[derive(Debug, AggrFunction)] #[aggr_function(state = AggFnStateExtremum4Bytes::<C, E>::new())] pub struct AggFnExtremumForBytes<C, E> where C: Collator, E: Extremum, VectorValue: VectorValueExt<Bytes>, { _phantom: std::marker::PhantomData<(C, E)>, } impl<C, E> AggFnExtremumForBytes<C, E> where C: Collator, E: Extremum, VectorValue: VectorValueExt<Bytes>, { fn new() -> Self { Self { _phantom: std::marker::PhantomData, } } } #[derive(Debug)] pub struct AggFnStateExtremum4Bytes<C, E> where VectorValue: VectorValueExt<Bytes>, C: Collator, E: Extremum, { extremum: Option<Bytes>, _phantom: std::marker::PhantomData<(C, E)>, } impl<C, E> AggFnStateExtremum4Bytes<C, E> where VectorValue: VectorValueExt<Bytes>, C: Collator, E: Extremum, { pub fn new() -> Self { Self { extremum: None, _phantom: std::marker::PhantomData, } } } impl<C, E> super::ConcreteAggrFunctionState for AggFnStateExtremum4Bytes<C, E> where VectorValue: VectorValueExt<Bytes>, C: Collator, E: Extremum, { type ParameterType = Bytes; #[inline] fn update_concrete( &mut self, _ctx: &mut EvalContext, value: &Option<Self::ParameterType>, ) -> Result<()> { if value.is_none() { return Ok(()); } if self.extremum.is_none() { self.extremum = value.clone(); return Ok(()); } if C::sort_compare(&self.extremum.as_ref().unwrap(), &value.as_ref().unwrap())? == E::ORD { self.extremum = value.clone(); } Ok(()) } #[inline] fn push_result(&self, _ctx: &mut EvalContext, target: &mut [VectorValue]) -> Result<()> { target[0].push(self.extremum.clone()); Ok(()) } } /// The MAX/MIN aggregate functions. #[derive(Debug, AggrFunction)] #[aggr_function(state = AggFnStateExtremum::<T, E>::new())] pub struct AggFnExtremum<T, E> where T: Evaluable + Ord, E: Extremum, VectorValue: VectorValueExt<T>, { _phantom: std::marker::PhantomData<(T, E)>, } impl<T, E> AggFnExtremum<T, E> where T: Evaluable + Ord, E: Extremum, VectorValue: VectorValueExt<T>, { fn new() -> Self { Self { _phantom: std::marker::PhantomData, } } } /// The state of the MAX/MIN aggregate function. #[derive(Debug)] pub struct AggFnStateExtremum<T, E> where T: Evaluable + Ord, E: Extremum, VectorValue: VectorValueExt<T>, { extremum_value: Option<T>, _phantom: std::marker::PhantomData<E>, } impl<T, E> AggFnStateExtremum<T, E> where T: Evaluable + Ord, E: Extremum, VectorValue: VectorValueExt<T>, { pub fn new() -> Self { Self { extremum_value: None, _phantom: std::marker::PhantomData, } } } impl<T, E> super::ConcreteAggrFunctionState for AggFnStateExtremum<T, E> where T: Evaluable + Ord, E: Extremum, VectorValue: VectorValueExt<T>, { type ParameterType = T; #[inline] fn update_concrete( &mut self, _ctx: &mut EvalContext, value: &Option<Self::ParameterType>, ) -> Result<()> { if value.is_some() && (self.extremum_value.is_none() || self.extremum_value.cmp(value) == E::ORD) { self.extremum_value = value.clone(); } Ok(()) } #[inline] fn push_result(&self, _ctx: &mut EvalContext, target: &mut [VectorValue]) -> Result<()> { target[0].push(self.extremum_value.clone()); Ok(()) } } #[cfg(test)] mod tests { use tidb_query_datatype::EvalType; use tipb_helper::ExprDefBuilder; use super::*; use crate::aggr_fn::parser::AggrDefinitionParser; use crate::aggr_fn::AggrFunction; use crate::codec::batch::{LazyBatchColumn, LazyBatchColumnVec}; use tidb_query_datatype::{FieldTypeAccessor, FieldTypeTp}; #[test] fn test_max() { let mut ctx = EvalContext::default(); let function = AggFnExtremum::<Int, Max>::new(); let mut state = function.create_state(); let mut result = [VectorValue::with_capacity(0, EvalType::Int)]; state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[None]); state.update(&mut ctx, &Option::<Int>::None).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[None]); state.update(&mut ctx, &Some(7i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(7)]); state.update(&mut ctx, &Some(4i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(7)]); state.update_repeat(&mut ctx, &Some(20), 10).unwrap(); state .update_repeat(&mut ctx, &Option::<Int>::None, 7) .unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(20)]); // update vector state.update(&mut ctx, &Some(7i64)).unwrap(); state .update_vector(&mut ctx, &[Some(21i64), None, Some(22i64)], &[0, 1, 2]) .unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(22)]); state.update(&mut ctx, &Some(40i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(40)]); } #[test] fn test_min() { let mut ctx = EvalContext::default(); let function = AggFnExtremum::<Int, Min>::new(); let mut state = function.create_state(); let mut result = [VectorValue::with_capacity(0, EvalType::Int)]; state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[None]); state.update(&mut ctx, &Option::<Int>::None).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[None]); state.update(&mut ctx, &Some(100i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(100)]); state.update(&mut ctx, &Some(90i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(90)]); state.update_repeat(&mut ctx, &Some(80), 10).unwrap(); state .update_repeat(&mut ctx, &Option::<Int>::None, 10) .unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(80)]); // update vector state.update(&mut ctx, &Some(70i64)).unwrap(); state .update_vector(&mut ctx, &[Some(69i64), None, Some(68i64)], &[0, 1, 2]) .unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(68)]); state.update(&mut ctx, &Some(2i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(2)]); state.update(&mut ctx, &Some(-1i64)).unwrap(); result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_int_slice(), &[Some(-1i64)]); } #[test] fn test_collation() { let mut ctx = EvalContext::default(); let cases = vec![ (Collation::Binary, true, vec!["B", "a"], "a"), (Collation::Utf8Mb4Bin, true, vec!["B", "a"], "a"), (Collation::Utf8Mb4GeneralCi, true, vec!["B", "a"], "B"), (Collation::Utf8Mb4BinNoPadding, true, vec!["B", "a"], "a"), (Collation::Binary, false, vec!["B", "a"], "B"), (Collation::Utf8Mb4Bin, false, vec!["B", "a"], "B"), (Collation::Utf8Mb4GeneralCi, false, vec!["B", "a"], "a"), (Collation::Utf8Mb4BinNoPadding, false, vec!["B", "a"], "B"), ]; for (coll, is_max, args, expected) in cases { let function = match_template_collator! { TT, match coll { Collation::TT => { if is_max { Box::new(AggFnExtremumForBytes::<TT, Max>::new()) as Box<dyn AggrFunction> } else { Box::new(AggFnExtremumForBytes::<TT, Min>::new()) as Box<dyn AggrFunction> } } } }; let mut state = function.create_state(); let mut result = [VectorValue::with_capacity(0, EvalType::Bytes)]; state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!(result[0].as_bytes_slice(), &[None]); for arg in args { state .update(&mut ctx, &Some(String::from(arg).into_bytes())) .unwrap(); } result[0].clear(); state.push_result(&mut ctx, &mut result).unwrap(); assert_eq!( result[0].as_bytes_slice(), [Some(String::from(expected).into_bytes())] ); } } #[test] fn test_integration() { let max_parser = AggrFnDefinitionParserExtremum::<Max>::new(); let min_parser = AggrFnDefinitionParserExtremum::<Min>::new(); let max = ExprDefBuilder::aggr_func(ExprType::Max, FieldTypeTp::LongLong) .push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong)) .build(); max_parser.check_supported(&max).unwrap(); let min = ExprDefBuilder::aggr_func(ExprType::Min, FieldTypeTp::LongLong) .push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong)) .build(); min_parser.check_supported(&min).unwrap(); let src_schema = [FieldTypeTp::LongLong.into()]; let mut columns = LazyBatchColumnVec::from(vec![{ let mut col = LazyBatchColumn::decoded_with_capacity_and_tp(0, EvalType::Int); col.mut_decoded().push_int(Some(10000)); col.mut_decoded().push_int(Some(1)); col.mut_decoded().push_int(Some(23)); col.mut_decoded().push_int(Some(42)); col.mut_decoded().push_int(Some(-10000)); col.mut_decoded().push_int(None); col.mut_decoded().push_int(Some(99)); col.mut_decoded().push_int(Some(-1)); col }]); let logical_rows = vec![3, 2, 6, 5, 1, 7]; let mut schema = vec![]; let mut exp = vec![]; let mut ctx = EvalContext::default(); let max_fn = max_parser .parse(max, &mut ctx, &src_schema, &mut schema, &mut exp) .unwrap(); assert_eq!(schema.len(), 1); assert_eq!(schema[0].as_accessor().tp(), FieldTypeTp::LongLong); assert_eq!(exp.len(), 1); let min_fn = min_parser .parse(min, &mut ctx, &src_schema, &mut schema, &mut exp) .unwrap(); assert_eq!(schema.len(), 2); assert_eq!(schema[1].as_accessor().tp(), FieldTypeTp::LongLong); assert_eq!(exp.len(), 2); let mut ctx = EvalContext::default(); let mut max_state = max_fn.create_state(); let mut min_state = min_fn.create_state(); let mut aggr_result = [VectorValue::with_capacity(0, EvalType::Int)]; // max { let max_result = exp[0] .eval(&mut ctx, &src_schema, &mut columns, &logical_rows, 6) .unwrap(); let max_result = max_result.vector_value().unwrap(); let max_slice: &[Option<Int>] = max_result.as_ref().as_ref(); max_state .update_vector(&mut ctx, max_slice, max_result.logical_rows()) .unwrap(); max_state.push_result(&mut ctx, &mut aggr_result).unwrap(); } // min { let min_result = exp[0] .eval(&mut ctx, &src_schema, &mut columns, &logical_rows, 6) .unwrap(); let min_result = min_result.vector_value().unwrap(); let min_slice: &[Option<Int>] = min_result.as_ref().as_ref(); min_state .update_vector(&mut ctx, min_slice, min_result.logical_rows()) .unwrap(); min_state.push_result(&mut ctx, &mut aggr_result).unwrap(); } assert_eq!(aggr_result[0].as_int_slice(), &[Some(99), Some(-1i64),]); } #[test] fn test_illegal_request() { let expr = ExprDefBuilder::aggr_func(ExprType::Max, FieldTypeTp::Double) // Expect LongLong but give Real .push_child(ExprDefBuilder::column_ref(0, FieldTypeTp::LongLong)) .build(); AggrFnDefinitionParserExtremum::<Max>::new() .check_supported(&expr) .unwrap(); let src_schema = [FieldTypeTp::LongLong.into()]; let mut schema = vec![]; let mut exp = vec![]; let mut ctx = EvalContext::default(); AggrFnDefinitionParserExtremum::<Max>::new() .parse(expr, &mut ctx, &src_schema, &mut schema, &mut exp) .unwrap_err(); } }
32.018975
113
0.568686
0a02754d9dbf793b4b65177fdb2f54ddf1b25540
10,665
use crate::{ config::{DataType, SinkConfig, SinkContext, SinkDescription}, sinks::http::{HttpMethod, HttpSinkConfig}, sinks::util::{ encoding::{EncodingConfigWithDefault, EncodingConfiguration}, service2::{InFlightLimit, TowerRequestConfig}, BatchConfig, Compression, }, }; use http::Uri; use indexmap::IndexMap; use serde::{Deserialize, Serialize}; use snafu::Snafu; #[derive(Debug, Snafu)] enum BuildError { #[snafu(display( "Missing authentication key, must provide either 'license_key' or 'insert_key'" ))] MissingAuthParam, } #[derive(Deserialize, Serialize, Debug, Eq, PartialEq, Clone, Derivative)] #[serde(rename_all = "snake_case")] #[derivative(Default)] pub enum NewRelicLogsRegion { #[derivative(Default)] Us, Eu, } #[derive(Deserialize, Serialize, Debug, Derivative, Clone)] #[derivative(Default)] pub struct NewRelicLogsConfig { pub license_key: Option<String>, pub insert_key: Option<String>, pub region: Option<NewRelicLogsRegion>, #[serde(skip_serializing_if = "skip_serializing_if_default", default)] pub encoding: EncodingConfigWithDefault<Encoding>, #[serde(default)] pub compression: Compression, #[serde(default)] pub batch: BatchConfig, #[serde(default)] pub request: TowerRequestConfig, } inventory::submit! { SinkDescription::new::<NewRelicLogsConfig>("new_relic_logs") } #[derive(Deserialize, Serialize, Debug, Eq, PartialEq, Clone, Derivative)] #[serde(rename_all = "snake_case")] #[derivative(Default)] pub enum Encoding { #[derivative(Default)] Json, } impl From<Encoding> for crate::sinks::http::Encoding { fn from(v: Encoding) -> crate::sinks::http::Encoding { match v { Encoding::Json => crate::sinks::http::Encoding::Json, } } } // There is another one of these in `util::encoding`, but this one is specialized for New Relic. /// For encodings, answers "Is it possible to skip serializing this value, because it's the /// default?" pub(crate) fn skip_serializing_if_default(e: &EncodingConfigWithDefault<Encoding>) -> bool { e.codec() == &Encoding::default() } #[typetag::serde(name = "new_relic_logs")] impl SinkConfig for NewRelicLogsConfig { fn build(&self, cx: SinkContext) -> crate::Result<(super::RouterSink, super::Healthcheck)> { let http_conf = self.create_config()?; http_conf.build(cx) } fn input_type(&self) -> DataType { DataType::Log } fn sink_type(&self) -> &'static str { "new_relic_logs" } } impl NewRelicLogsConfig { fn create_config(&self) -> crate::Result<HttpSinkConfig> { let mut headers: IndexMap<String, String> = IndexMap::new(); if let Some(license_key) = &self.license_key { headers.insert("X-License-Key".to_owned(), license_key.clone()); } else if let Some(insert_key) = &self.insert_key { headers.insert("X-Insert-Key".to_owned(), insert_key.clone()); } else { return Err(Box::new(BuildError::MissingAuthParam)); } let uri = match self.region.as_ref().unwrap_or(&NewRelicLogsRegion::Us) { NewRelicLogsRegion::Us => Uri::from_static("https://log-api.newrelic.com/log/v1"), NewRelicLogsRegion::Eu => Uri::from_static("https://log-api.eu.newrelic.com/log/v1"), }; let batch = self.batch.use_size_as_bytes()?; let batch = BatchConfig { // The max request size is 10MiB, so in order to be comfortably // within this we batch up to 5MiB. max_bytes: Some(batch.max_bytes.unwrap_or(bytesize::mib(5u64) as usize)), max_events: None, ..batch }; let request = TowerRequestConfig { // The default throughput ceiling defaults are relatively // conservative so we crank them up for New Relic. in_flight_limit: (self.request.in_flight_limit).if_none(InFlightLimit::Fixed(100)), rate_limit_num: Some(self.request.rate_limit_num.unwrap_or(100)), ..self.request }; Ok(HttpSinkConfig { uri: uri.into(), method: Some(HttpMethod::Post), healthcheck_uri: None, auth: None, headers: Some(headers), compression: self.compression, encoding: self.encoding.clone().without_default(), batch, request, tls: None, }) } } #[cfg(test)] mod tests { use super::*; use crate::{ config::SinkConfig, event::Event, sinks::util::{service2::InFlightLimit, test::build_test_server}, test_util::next_addr, }; use bytes::buf::BufExt; use futures::compat::Future01CompatExt; use futures01::{stream, Sink, Stream}; use hyper::Method; use serde_json::Value; use std::io::BufRead; #[test] fn new_relic_logs_check_config_no_auth() { assert_eq!( format!( "{}", NewRelicLogsConfig::default().create_config().unwrap_err() ), "Missing authentication key, must provide either 'license_key' or 'insert_key'" .to_owned(), ); } #[test] fn new_relic_logs_check_config_defaults() { let mut nr_config = NewRelicLogsConfig::default(); nr_config.license_key = Some("foo".to_owned()); let http_config = nr_config.create_config().unwrap(); assert_eq!( format!("{}", http_config.uri), "https://log-api.newrelic.com/log/v1".to_string() ); assert_eq!(http_config.method, Some(HttpMethod::Post)); assert_eq!(http_config.encoding.codec(), &Encoding::Json.into()); assert_eq!( http_config.batch.max_bytes, Some(bytesize::mib(5u64) as usize) ); assert_eq!( http_config.request.in_flight_limit, InFlightLimit::Fixed(100) ); assert_eq!(http_config.request.rate_limit_num, Some(100)); assert_eq!( http_config.headers.unwrap()["X-License-Key"], "foo".to_owned() ); assert!(http_config.tls.is_none()); assert!(http_config.auth.is_none()); } #[test] fn new_relic_logs_check_config_custom() { let mut nr_config = NewRelicLogsConfig::default(); nr_config.insert_key = Some("foo".to_owned()); nr_config.region = Some(NewRelicLogsRegion::Eu); nr_config.batch.max_size = Some(bytesize::mib(8u64) as usize); nr_config.request.in_flight_limit = InFlightLimit::Fixed(12); nr_config.request.rate_limit_num = Some(24); let http_config = nr_config.create_config().unwrap(); assert_eq!( format!("{}", http_config.uri), "https://log-api.eu.newrelic.com/log/v1".to_string() ); assert_eq!(http_config.method, Some(HttpMethod::Post)); assert_eq!(http_config.encoding.codec(), &Encoding::Json.into()); assert_eq!( http_config.batch.max_bytes, Some(bytesize::mib(8u64) as usize) ); assert_eq!( http_config.request.in_flight_limit, InFlightLimit::Fixed(12) ); assert_eq!(http_config.request.rate_limit_num, Some(24)); assert_eq!( http_config.headers.unwrap()["X-Insert-Key"], "foo".to_owned() ); assert!(http_config.tls.is_none()); assert!(http_config.auth.is_none()); } #[test] fn new_relic_logs_check_config_custom_from_toml() { let config = r#" insert_key = "foo" region = "eu" [batch] max_size = 8388608 [request] in_flight_limit = 12 rate_limit_num = 24 "#; let nr_config: NewRelicLogsConfig = toml::from_str(&config).unwrap(); let http_config = nr_config.create_config().unwrap(); assert_eq!( format!("{}", http_config.uri), "https://log-api.eu.newrelic.com/log/v1".to_string() ); assert_eq!(http_config.method, Some(HttpMethod::Post)); assert_eq!(http_config.encoding.codec(), &Encoding::Json.into()); assert_eq!( http_config.batch.max_bytes, Some(bytesize::mib(8u64) as usize) ); assert_eq!( http_config.request.in_flight_limit, InFlightLimit::Fixed(12) ); assert_eq!(http_config.request.rate_limit_num, Some(24)); assert_eq!( http_config.headers.unwrap()["X-Insert-Key"], "foo".to_owned() ); assert!(http_config.tls.is_none()); assert!(http_config.auth.is_none()); } #[tokio::test(core_threads = 2)] async fn new_relic_logs_happy_path() { let in_addr = next_addr(); let mut nr_config = NewRelicLogsConfig::default(); nr_config.license_key = Some("foo".to_owned()); let mut http_config = nr_config.create_config().unwrap(); http_config.uri = format!("http://{}/fake_nr", in_addr) .parse::<http::Uri>() .unwrap() .into(); let (sink, _healthcheck) = http_config.build(SinkContext::new_test()).unwrap(); let (rx, trigger, server) = build_test_server(in_addr); let input_lines = (0..100).map(|i| format!("msg {}", i)).collect::<Vec<_>>(); let events = stream::iter_ok(input_lines.clone().into_iter().map(Event::from)); let pump = sink.send_all(events); tokio::spawn(server); let _ = pump.compat().await.unwrap(); drop(trigger); let output_lines = rx .wait() .map(Result::unwrap) .map(|(parts, body)| { assert_eq!(Method::POST, parts.method); assert_eq!("/fake_nr", parts.uri.path()); assert_eq!( parts .headers .get("X-License-Key") .and_then(|v| v.to_str().ok()), Some("foo") ); body.reader() }) .flat_map(BufRead::lines) .map(Result::unwrap) .flat_map(|s| -> Vec<String> { let vals: Vec<Value> = serde_json::from_str(&s).unwrap(); vals.iter() .map(|v| v.get("message").unwrap().as_str().unwrap().to_owned()) .collect() }) .collect::<Vec<_>>(); assert_eq!(input_lines, output_lines); } }
32.614679
97
0.586498
7a2c5ca8e243ee93f1619f9548b5fa02c1c140ac
346
// rustfmt-merge_imports: true use a::{c,d,b}; use a::{d, e, b, a, f}; use a::{f, g, c}; #[doc(hidden)] use a::b; use a::c; use a::d; use a::{c, d, e}; #[doc(hidden)] use a::b; use a::d; pub use foo::bar; use foo::{a, b, c}; pub use foo::foobar; use a::{b::{c::*}}; use a::{b::{c::{}}}; use a::{b::{c::d}}; use a::{b::{c::{xxx, yyy, zzz}}};
13.84
33
0.482659
fb59c7e068c2fec14b61c3d3ef67c4ba9a6a14af
80,135
use crate::{ packet::{ Mode, Route, RouteControlRequest, RouteUpdateRequest, CCP_CONTROL_DESTINATION, CCP_RESPONSE, CCP_UPDATE_DESTINATION, }, routing_table::RoutingTable, CcpRoutingAccount, CcpRoutingStore, RoutingRelation, }; use async_trait::async_trait; use futures::future::join_all; use interledger_errors::CcpRoutingStoreError; use interledger_packet::{hex::HexString, Address, ErrorCode, RejectBuilder}; use interledger_service::{ Account, AddressStore, IlpResult, IncomingRequest, IncomingService, OutgoingRequest, OutgoingService, }; use parking_lot::{Mutex, RwLock}; use ring::digest::{digest, SHA256}; use std::cmp::Ordering as StdOrdering; use std::collections::HashMap; use std::{ cmp::min, convert::TryFrom, str, sync::{ atomic::{AtomicU32, Ordering}, Arc, }, time::Duration, }; use tracing::{debug, error, trace, warn}; use uuid::Uuid; #[cfg(test)] use crate::packet::PEER_PROTOCOL_CONDITION; #[cfg(test)] use futures::TryFutureExt; #[cfg(test)] use once_cell::sync::Lazy; // TODO should the route expiry be longer? we use 30 seconds now // because the expiry shortener will lower the expiry to 30 seconds // otherwise. we could make it longer and make sure the BTP server // comes after the expiry shortener const DEFAULT_ROUTE_EXPIRY_TIME: u32 = 30000; const DEFAULT_BROADCAST_INTERVAL: u64 = 30000; const DUMMY_ROUTING_TABLE_ID: [u8; 16] = [0; 16]; fn hash(preimage: &[u8; 32]) -> [u8; 32] { let mut out = [0; 32]; out.copy_from_slice(digest(&SHA256, preimage).as_ref()); out } type NewAndWithdrawnRoutes = (Vec<Route>, Vec<String>); /// Builder for [CcpRouteManager](./CcpRouteManager.html) /// See documentation on fields for more details. pub struct CcpRouteManagerBuilder<I, O, S> { /// The next request handler that will be used both to pass on requests that are not CCP messages. next_incoming: I, /// The outgoing request handler that will be used to send outgoing CCP messages. /// Note that this service bypasses the Router because the Route Manager needs to be able to /// send messages directly to specific peers. outgoing: O, /// This represents the routing table we will forward to our peers. /// It is the same as the local_table with our own address added to the path of each route. store: S, ilp_address: Address, broadcast_interval: u64, } impl<I, O, S, A> CcpRouteManagerBuilder<I, O, S> where I: IncomingService<A> + Clone + Send + Sync + 'static, O: OutgoingService<A> + Clone + Send + Sync + 'static, S: AddressStore + CcpRoutingStore<Account = A> + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Send + Sync + 'static, { pub fn new(ilp_address: Address, store: S, outgoing: O, next_incoming: I) -> Self { CcpRouteManagerBuilder { ilp_address, next_incoming, outgoing, store, broadcast_interval: DEFAULT_BROADCAST_INTERVAL, } } pub fn ilp_address(&mut self, ilp_address: Address) -> &mut Self { self.ilp_address = ilp_address; self } /// Set the broadcast interval (in milliseconds) pub fn broadcast_interval(&mut self, ms: u64) -> &mut Self { self.broadcast_interval = ms; self } pub fn to_service(&self) -> CcpRouteManager<I, O, S, A> { #[allow(clippy::let_and_return)] let service = CcpRouteManager { ilp_address: Arc::new(RwLock::new(self.ilp_address.clone())), next_incoming: self.next_incoming.clone(), outgoing: self.outgoing.clone(), store: self.store.clone(), forwarding_table: Arc::new(RwLock::new(RoutingTable::default())), forwarding_table_updates: Arc::new(RwLock::new(Vec::new())), last_epoch_updates_sent_for: Arc::new(AtomicU32::new(0)), local_table: Arc::new(RwLock::new(RoutingTable::default())), incoming_tables: Arc::new(RwLock::new(HashMap::new())), unavailable_accounts: Arc::new(Mutex::new(HashMap::new())), }; #[cfg(not(test))] { let broadcast_interval = self.broadcast_interval; let service_clone = service.clone(); tokio::spawn(async move { service_clone .start_broadcast_interval(broadcast_interval) .await }); } service } } #[derive(Debug)] struct BackoffParams { /// The total number of route broadcast intervals we should wait before trying again /// This is incremented for each broadcast failure max: u8, /// How many more intervals we should wait before trying to send again /// (0 means we should try again on the next loop) skip_intervals: u8, } /// The Routing Manager Service. /// /// This implements the Connector-to-Connector Protocol (CCP) /// for exchanging route updates with peers. This service handles incoming CCP messages /// and sends updates to peers. It manages the routing table in the Store and updates it /// with the best routes determined by per-account configuration and the broadcasts we have /// received from peers. #[derive(Clone)] pub struct CcpRouteManager<I, O, S, A: Account> { ilp_address: Arc<RwLock<Address>>, /// The next request handler that will be used both to pass on requests that are not CCP messages. next_incoming: I, /// The outgoing request handler that will be used to send outgoing CCP messages. /// Note that this service bypasses the Router because the Route Manager needs to be able to /// send messages directly to specific peers. outgoing: O, /// This represents the routing table we will forward to our peers. /// It is the same as the local_table with our own address added to the path of each route. forwarding_table: Arc<RwLock<RoutingTable<A>>>, last_epoch_updates_sent_for: Arc<AtomicU32>, /// These updates are stored such that index 0 is the transition from epoch 0 to epoch 1 forwarding_table_updates: Arc<RwLock<Vec<NewAndWithdrawnRoutes>>>, /// This is the routing table we have compile from configuration and /// broadcasts we have received from our peers. It is saved to the Store so that /// the Router services forwards packets according to what it says. local_table: Arc<RwLock<RoutingTable<A>>>, /// We store a routing table for each peer we receive Route Update Requests from. /// When the peer sends us an update, we apply that update to this view of their table. /// Updates from peers are applied to our local_table if they are better than the /// existing best route and if they do not attempt to overwrite configured routes. incoming_tables: Arc<RwLock<HashMap<Uuid, RoutingTable<A>>>>, store: S, /// If we get final errors while sending to specific accounts, we'll /// wait before trying to broadcast to them /// This maps the account ID to the number of route brodcast intervals /// we should wait before trying again unavailable_accounts: Arc<Mutex<HashMap<Uuid, BackoffParams>>>, } impl<I, O, S, A> CcpRouteManager<I, O, S, A> where I: IncomingService<A> + Clone + Send + Sync + 'static, O: OutgoingService<A> + Clone + Send + Sync + 'static, S: AddressStore + CcpRoutingStore<Account = A> + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Send + Sync + 'static, { /// Returns a future that will trigger this service to update its routes and broadcast /// updates to peers on the given interval. `interval` is in milliseconds pub async fn start_broadcast_interval(&self, interval: u64) { self.request_all_routes().await; let mut interval = tokio::time::interval(Duration::from_millis(interval)); loop { interval.tick().await; // ensure we have the latest ILP Address from the store self.update_ilp_address(); // Do not consume the result if an error since we want to keep the loop going let _ = self.broadcast_routes().await; } } fn update_ilp_address(&self) { let current_ilp_address = self.ilp_address.read(); let ilp_address = self.store.get_ilp_address(); if ilp_address != *current_ilp_address { debug!( "Changing ILP address from {} to {}", *current_ilp_address, ilp_address ); // release the read lock drop(current_ilp_address); *self.ilp_address.write() = ilp_address; } } pub async fn broadcast_routes(&self) -> Result<(), CcpRoutingStoreError> { self.update_best_routes(None).await?; self.send_route_updates().await } /// Request routes from all the peers we are willing to receive routes from. /// This is mostly intended for when the CCP server starts up and doesn't have any routes from peers. async fn request_all_routes(&self) { let result = self.store.get_accounts_to_receive_routes_from().await; let accounts = result.unwrap_or_else(|_| Vec::new()); join_all( accounts .into_iter() .map(|account| self.send_route_control_request(account, DUMMY_ROUTING_TABLE_ID, 0)), ) .await; } /// Handle a CCP Route Control Request. If this is from an account that we broadcast routes to, /// we'll send an outgoing Route Update Request to them. async fn handle_route_control_request(&self, request: IncomingRequest<A>) -> IlpResult { if !request.from.should_send_routes() { return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"We are not configured to send routes to you, sorry", triggered_by: Some(&self.ilp_address.read()), data: &[], } .build()); } let control = RouteControlRequest::try_from(&request.prepare); if control.is_err() { return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Invalid route control request", triggered_by: Some(&self.ilp_address.read()), data: &[], } .build()); } let control = control.unwrap(); debug!( "Got route control request from account {} (id: {}): {:?}", request.from.username(), request.from.id(), control ); // TODO stop sending updates if they are in Idle mode if control.mode == Mode::Sync { // Don't skip them in the route update broadcasts anymore since this // tells us that they are online // TODO what happens if they can send to us but we can't send to them? { trace!("Checking whether account was previously listed as unavailable"); let mut unavailable_accounts = self.unavailable_accounts.lock(); if unavailable_accounts.remove(&request.from.id()).is_some() { debug!("Account {} (id: {}) is no longer unavailable, will resume broadcasting routes to it", request.from.username(), request.from.id()); } } let (from_epoch_index, to_epoch_index) = { let forwarding_table = self.forwarding_table.read(); let to_epoch_index = forwarding_table.epoch(); let from_epoch_index = if control.last_known_routing_table_id != forwarding_table.id() { 0 } else { min(control.last_known_epoch, to_epoch_index) }; (from_epoch_index, to_epoch_index) }; #[cfg(test)] self.send_route_update(request.from.clone(), from_epoch_index, to_epoch_index) .await; #[cfg(not(test))] { tokio::spawn({ let self_clone = self.clone(); async move { self_clone .send_route_update( request.from.clone(), from_epoch_index, to_epoch_index, ) .await } }); } } Ok(CCP_RESPONSE.clone()) } /// Remove invalid routes before processing the Route Update Request #[allow(clippy::cognitive_complexity)] fn filter_routes(&self, mut update: RouteUpdateRequest) -> RouteUpdateRequest { update.new_routes = update .new_routes .into_iter() .filter(|route| { let ilp_address = self.ilp_address.read(); let address_scheme = (*ilp_address).scheme(); if !route.prefix.starts_with(address_scheme) { warn!("Got route for a different global prefix: {:?}", route); false } else if route.prefix.len() <= address_scheme.len() + 1 { // note the + 1 is due to address_scheme not including a trailing "." warn!("Got route broadcast for the global prefix: {:?}", route); false } else if route.prefix.starts_with(&ilp_address as &str) { trace!("Ignoring route broadcast for a prefix that starts with our own address: {:?}", route); false } else if route.path.iter().any(|p| p == &ilp_address as &str) { trace!( "Ignoring route broadcast for a route that includes us: {:?}", route ); false } else { true } }) .collect(); update } /// Check if this Route Update Request is valid and, if so, apply any updates it contains. /// If updates are applied to the Incoming Routing Table for this peer, we will /// then check whether those routes are better than the current best ones we have in the /// Local Routing Table. async fn handle_route_update_request(&self, request: IncomingRequest<A>) -> IlpResult { // Ignore the request if we don't accept routes from them if !request.from.should_receive_routes() { return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Your route broadcasts are not accepted here", triggered_by: Some(&self.ilp_address.read()), data: &[], } .build()); } let update = RouteUpdateRequest::try_from(&request.prepare); if update.is_err() { return Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: b"Invalid route update request", triggered_by: Some(&self.ilp_address.read()), data: &[], } .build()); } let update = update.unwrap(); debug!( "Got route update request from account {}: {:?}", request.from.id(), update ); // Filter out routes that don't make sense or that we won't accept let update = self.filter_routes(update); // Ensure the mutex gets dropped before the async block let result = { let mut incoming_tables = self.incoming_tables.write(); if !&incoming_tables.contains_key(&request.from.id()) { incoming_tables.insert( request.from.id(), RoutingTable::new(update.routing_table_id), ); } incoming_tables .get_mut(&request.from.id()) .expect("Should have inserted a routing table for this account") .handle_update_request(request.from.clone(), update) }; // Update the routing table we maintain for the account we got this from. // Figure out whether we need to update our routes for any of the prefixes // that were included in this route update. match result { Ok(prefixes_updated) => { if prefixes_updated.is_empty() { trace!("Route update request did not contain any prefixes we need to update our routes for"); return Ok(CCP_RESPONSE.clone()); } debug!( "Recalculating best routes for prefixes: {}", prefixes_updated.join(", ") ); #[cfg(not(test))] { tokio::spawn({ let self_clone = self.clone(); async move { self_clone.update_best_routes(Some(prefixes_updated)).await } }); } #[cfg(test)] { let ilp_address = self.ilp_address.clone(); self.update_best_routes(Some(prefixes_updated)) .map_err(move |_| { RejectBuilder { code: ErrorCode::T00_INTERNAL_ERROR, message: b"Error processing route update", data: &[], triggered_by: Some(&ilp_address.read()), } .build() }) .await?; } Ok(CCP_RESPONSE.clone()) } Err(message) => { warn!("Error handling incoming Route Update request, sending a Route Control request to get updated routing table info from peer. Error was: {}", &message); let reject = RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: &message.as_bytes(), data: &[], triggered_by: Some(&self.ilp_address.read()), } .build(); let table = &self.incoming_tables.read().clone()[&request.from.id()]; #[cfg(not(test))] tokio::spawn({ let table = table.clone(); let self_clone = self.clone(); async move { self_clone .send_route_control_request( request.from.clone(), table.id(), table.epoch(), ) .await; } }); #[cfg(test)] self.send_route_control_request(request.from.clone(), table.id(), table.epoch()) .await; Err(reject) } } } /// Request a Route Update from the specified peer. This is sent when we get /// a Route Update Request from them with a gap in the epochs since the last one we saw. async fn send_route_control_request( &self, account: A, last_known_routing_table_id: [u8; 16], last_known_epoch: u32, ) { let account_id = account.id(); let control = RouteControlRequest { mode: Mode::Sync, last_known_routing_table_id, last_known_epoch, features: Vec::new(), }; debug!("Sending Route Control Request to account: {} (id: {}), last known table id: {:?}, last known epoch: {}", account.username(), account_id, HexString(&last_known_routing_table_id[..]), last_known_epoch); let prepare = control.to_prepare(); let result = self .clone() .outgoing .send_request(OutgoingRequest { // TODO If we start charging or paying for CCP broadcasts we'll need to // have a separate account that we send from, but for now it's fine to // set the peer's account as the from account as well as the to account from: account.clone(), to: account, original_amount: prepare.amount(), prepare, }) .await; if let Err(err) = result { warn!( "Error sending Route Control Request to account {}: {:?}", account_id, err ) } } /// Check whether the Local Routing Table currently has the best routes for the /// given prefixes. This is triggered when we get an incoming Route Update Request /// with some new or modified routes that might be better than our existing ones. /// /// If prefixes is None, this will check the best routes for all local and configured prefixes. async fn update_best_routes( &self, prefixes: Option<Vec<String>>, ) -> Result<(), CcpRoutingStoreError> { let local_table = self.local_table.clone(); let forwarding_table = self.forwarding_table.clone(); let forwarding_table_updates = self.forwarding_table_updates.clone(); let incoming_tables = self.incoming_tables.clone(); let ilp_address = self.ilp_address.read().clone(); let mut store = self.store.clone(); let (local_routes, configured_routes) = self.store.get_local_and_configured_routes().await?; // TODO: Should we extract this to a function and #[inline] it? let (better_routes, withdrawn_routes) = { // Note we only use a read lock here and later get a write lock if we need to update the table let local_table = local_table.read(); let incoming_tables = incoming_tables.read(); // Either check the given prefixes or check all of our local and configured routes let prefixes_to_check: Box<dyn Iterator<Item = &str>> = if let Some(ref prefixes) = prefixes { Box::new(prefixes.iter().map(|prefix| prefix.as_str())) } else { let routes = configured_routes.iter().chain(local_routes.iter()); Box::new(routes.map(|(prefix, _account)| prefix.as_str())) }; // Check all the prefixes to see which ones we have different routes for // and which ones we don't have routes for anymore let mut better_routes: Vec<(&str, A, Route)> = Vec::with_capacity(prefixes_to_check.size_hint().0); let mut withdrawn_routes: Vec<&str> = Vec::new(); for prefix in prefixes_to_check { // See which prefixes there is now a better route for if let Some((best_next_account, best_route)) = get_best_route_for_prefix( &local_routes, &configured_routes, &incoming_tables, prefix, ) { if let Some((ref next_account, ref _route)) = local_table.get_route(prefix) { if next_account.id() == best_next_account.id() { continue; } else { better_routes.push(( prefix, best_next_account.clone(), best_route.clone(), )); } } else { better_routes.push((prefix, best_next_account, best_route)); } } else { // No longer have a route to this prefix withdrawn_routes.push(prefix); } } (better_routes, withdrawn_routes) }; // Update the local and forwarding tables if !better_routes.is_empty() || !withdrawn_routes.is_empty() { let update_routes = { let mut local_table = local_table.write(); let mut forwarding_table = forwarding_table.write(); let mut forwarding_table_updates = forwarding_table_updates.write(); let mut new_routes: Vec<Route> = Vec::with_capacity(better_routes.len()); for (prefix, account, mut route) in better_routes { debug!( "Setting new route for prefix: {} -> Account: {} (id: {})", prefix, account.username(), account.id(), ); local_table.set_route(prefix.to_string(), account.clone(), route.clone()); // Update the forwarding table // Don't advertise routes that don't start with the global prefix // or that advertise the whole global prefix let address_scheme = ilp_address.scheme(); let correct_address_scheme = route.prefix.starts_with(address_scheme) && route.prefix != address_scheme; // We do want to advertise our address let is_our_address = route.prefix == &ilp_address as &str; // Don't advertise local routes because advertising only our address // will be enough to ensure the packet gets to us and we can route it // to the correct account on our node let is_local_route = route.prefix.starts_with(&ilp_address as &str) && route.path.is_empty(); let not_local_route = is_our_address || !is_local_route; // Don't include routes we're also withdrawing let not_withdrawn_route = !withdrawn_routes.contains(&prefix); if correct_address_scheme && not_local_route && not_withdrawn_route { let old_route = forwarding_table.get_route(prefix); if old_route.is_none() || old_route.unwrap().0.id() != account.id() { route.path.insert(0, ilp_address.to_string()); // Each hop hashes the auth before forwarding route.auth = hash(&route.auth); forwarding_table.set_route( prefix.to_string(), account.clone(), route.clone(), ); new_routes.push(route); } } } for prefix in withdrawn_routes.iter() { debug!("Removed route for prefix: {}", prefix); local_table.delete_route(prefix); forwarding_table.delete_route(prefix); } let epoch = forwarding_table.increment_epoch(); forwarding_table_updates.push(( new_routes, withdrawn_routes .into_iter() .map(|s| s.to_string()) .collect(), )); debug_assert_eq!(epoch as usize + 1, forwarding_table_updates.len()); store.set_routes(local_table.get_simplified_table()) }; update_routes.await } else { // The routing table hasn't changed Ok(()) } } /// Send RouteUpdateRequests to all peers that we send routing messages to async fn send_route_updates(&self) -> Result<(), CcpRoutingStoreError> { let self_clone = self.clone(); let unavailable_accounts = self.unavailable_accounts.clone(); // Check which accounts we should skip this iteration let accounts_to_skip: Vec<Uuid> = { trace!("Checking accounts to skip"); let mut unavailable_accounts = self.unavailable_accounts.lock(); let mut skip = Vec::new(); for (id, mut backoff) in unavailable_accounts.iter_mut() { if backoff.skip_intervals > 0 { skip.push(*id); } backoff.skip_intervals = backoff.skip_intervals.saturating_sub(1); } skip }; trace!("Skipping accounts: {:?}", accounts_to_skip); let mut accounts = self .store .get_accounts_to_send_routes_to(accounts_to_skip) .await?; let to_epoch_index = self_clone.forwarding_table.read().epoch(); let from_epoch_index = self_clone .last_epoch_updates_sent_for .swap(to_epoch_index, Ordering::SeqCst); let route_update_request = self_clone.create_route_update(from_epoch_index, to_epoch_index); let prepare = route_update_request.to_prepare(); accounts.sort_unstable_by_key(|a| a.id().to_string()); accounts.dedup_by_key(|a| a.id()); let broadcasting = !accounts.is_empty(); if broadcasting { trace!( "Sending route update for epochs {} - {} to accounts: {:?} {}", from_epoch_index, to_epoch_index, route_update_request, { let account_list: Vec<String> = accounts .iter() .map(|a| { format!( "{} (id: {}, ilp_address: {})", a.username(), a.id(), a.ilp_address() ) }) .collect(); account_list.join(", ") } ); // TODO: How can this be converted to a join_all expression? // futures 0.1 version worked by doing `join_all(accounts.into_iter().map(...)).and_then(...)` // It is odd that the same but with `.await` instead does not work. let mut outgoing = self_clone.outgoing.clone(); let mut results = Vec::new(); for account in accounts.into_iter() { let res = outgoing .send_request(OutgoingRequest { from: account.clone(), to: account.clone(), original_amount: prepare.amount(), prepare: prepare.clone(), }) .await; results.push((account, res)); } // Handle the results of the route broadcast attempts trace!("Updating unavailable accounts"); let mut unavailable_accounts = unavailable_accounts.lock(); for (account, result) in results.into_iter() { match (account.routing_relation(), result) { (RoutingRelation::Child, Err(err)) => { if let Some(backoff) = unavailable_accounts.get_mut(&account.id()) { // Increase the number of intervals we'll skip // (but don't overflow the value it's stored in) backoff.max = backoff.max.saturating_add(1); backoff.skip_intervals = backoff.max; } else { // Skip sending to this account next time unavailable_accounts.insert( account.id(), BackoffParams { max: 1, skip_intervals: 1, }, ); } trace!("Error sending route update to {:?} account {} (id: {}), increased backoff to {}: {:?}", account.routing_relation(), account.username(), account.id(), unavailable_accounts[&account.id()].max, err); } (_, Err(err)) => { warn!( "Error sending route update to {:?} account {} (id: {}): {:?}", account.routing_relation(), account.username(), account.id(), err ); } (_, Ok(_)) => { if unavailable_accounts.remove(&account.id()).is_some() { debug!("Account {} (id: {}) is no longer unavailable, resuming route broadcasts", account.username(), account.id()); } } } } Ok(()) } else { trace!("No accounts to broadcast routes to"); Ok(()) } } /// Create a RouteUpdateRequest representing the given range of Forwarding Routing Table epochs. /// If the epoch range is not specified, it will create an update for the last epoch only. fn create_route_update( &self, from_epoch_index: u32, to_epoch_index: u32, ) -> RouteUpdateRequest { let (start, end) = (from_epoch_index as usize, to_epoch_index as usize); let (routing_table_id, current_epoch_index) = { let table = self.forwarding_table.read(); (table.id(), table.epoch()) }; let forwarding_table_updates = self.forwarding_table_updates.read(); let epochs_to_take = end.saturating_sub(start); // Merge the new routes and withdrawn routes from all of the given epochs let mut new_routes: Vec<Route> = Vec::with_capacity(epochs_to_take); let mut withdrawn_routes: Vec<String> = Vec::new(); // Include our own prefix if its the first update // TODO this might not be the right place to send our prefix // (the reason we don't include our prefix in the forwarding table // or the updates is that there isn't necessarily an Account that // corresponds to this ILP address) if start == 0 { new_routes.push(Route { prefix: self.ilp_address.read().to_string(), path: Vec::new(), // TODO what should we include here? auth: [0; 32], props: Vec::new(), }); } // Iterate through each of the given epochs for (new, withdrawn) in forwarding_table_updates .iter() .skip(start) .take(epochs_to_take) { for new_route in new { new_routes.push(new_route.clone()); // If the route was previously withdrawn, ignore that now since it was added back if withdrawn_routes.contains(&new_route.prefix) { withdrawn_routes = withdrawn_routes .into_iter() .filter(|prefix| prefix != &new_route.prefix) .collect(); } } for withdrawn_route in withdrawn { withdrawn_routes.push(withdrawn_route.clone()); // If the route was previously added, ignore that since it was withdrawn later if new_routes .iter() .any(|route| route.prefix.as_str() == withdrawn_route.as_str()) { new_routes = new_routes .into_iter() .filter(|route| route.prefix.as_str() != withdrawn_route.as_str()) .collect(); } } } RouteUpdateRequest { routing_table_id, from_epoch_index, to_epoch_index, current_epoch_index, new_routes, withdrawn_routes, speaker: self.ilp_address.read().clone(), hold_down_time: DEFAULT_ROUTE_EXPIRY_TIME, } } /// Send a Route Update Request to a specific account for the given epoch range. /// This is used when the peer has fallen behind and has requested a specific range of updates. async fn send_route_update(&self, account: A, from_epoch_index: u32, to_epoch_index: u32) { let prepare = self .create_route_update(from_epoch_index, to_epoch_index) .to_prepare(); let account_id = account.id(); debug!( "Sending individual route update to account: {} for epochs from: {} to: {}", account_id, from_epoch_index, to_epoch_index ); let result = self .outgoing .clone() .send_request(OutgoingRequest { from: account.clone(), to: account, original_amount: prepare.amount(), prepare, }) .await; if let Err(err) = result { error!( "Error sending route update to account {}: {:?}", account_id, err ) } } } fn get_best_route_for_prefix<A: CcpRoutingAccount>( local_routes: &HashMap<String, A>, configured_routes: &HashMap<String, A>, incoming_tables: &HashMap<Uuid, RoutingTable<A>>, prefix: &str, ) -> Option<(A, Route)> { // Check if we have a configured route for that specific prefix // or any shorter prefix ("example.a.b.c" will match "example.a.b" and "example.a") // Note that this logic is duplicated from the Address type. We are not using // Addresses here because the prefixes may not be valid ILP addresses ("example." is // a valid prefix but not a valid address) let segments: Vec<&str> = prefix.split(|c| c == '.').collect(); for i in 0..segments.len() { let prefix = &segments[0..segments.len() - i].join("."); if let Some(account) = configured_routes.get(prefix) { return Some(( account.clone(), Route { prefix: account.ilp_address().to_string(), auth: [0; 32], path: Vec::new(), props: Vec::new(), }, )); } } if let Some(account) = local_routes.get(prefix) { return Some(( account.clone(), Route { prefix: account.ilp_address().to_string(), auth: [0; 32], path: Vec::new(), props: Vec::new(), }, )); } let mut candidate_routes = incoming_tables .values() .filter_map(|incoming_table| incoming_table.get_route(prefix)); if let Some((account, route)) = candidate_routes.next() { let (best_account, best_route) = candidate_routes.fold( (account, route), |(best_account, best_route), (account, route)| { // Prioritize child > peer > parent match best_account .routing_relation() .cmp(&account.routing_relation()) { StdOrdering::Greater => (best_account, best_route), StdOrdering::Less => (account, route), _ => { // Prioritize shortest path match best_route.path.len().cmp(&route.path.len()) { StdOrdering::Less => (best_account, best_route), StdOrdering::Greater => (account, route), _ => { // Finally base it on account ID if best_account.id().to_string() < account.id().to_string() { (best_account, best_route) } else { (account, route) } } } } } }, ); Some((best_account.clone(), best_route.clone())) } else { None } } #[async_trait] impl<I, O, S, A> IncomingService<A> for CcpRouteManager<I, O, S, A> where I: IncomingService<A> + Clone + Send + Sync + 'static, O: OutgoingService<A> + Clone + Send + Sync + 'static, S: AddressStore + CcpRoutingStore<Account = A> + Clone + Send + Sync + 'static, A: CcpRoutingAccount + Send + Sync + 'static, { /// Handle the IncomingRequest if it is a CCP protocol message or /// pass it on to the next handler if not async fn handle_request(&mut self, request: IncomingRequest<A>) -> IlpResult { let destination = request.prepare.destination(); if destination == *CCP_CONTROL_DESTINATION { self.handle_route_control_request(request).await } else if destination == *CCP_UPDATE_DESTINATION { self.handle_route_update_request(request).await } else { self.next_incoming.handle_request(request).await } } } #[cfg(test)] mod ranking_routes { use super::*; use crate::test_helpers::*; use crate::RoutingRelation; use std::iter::FromIterator; static LOCAL: Lazy<HashMap<String, TestAccount>> = Lazy::new(|| { HashMap::from_iter(vec![ ( "example.a".to_string(), TestAccount::new(Uuid::from_slice(&[1; 16]).unwrap(), "example.local.one"), ), ( "example.b".to_string(), TestAccount::new(Uuid::from_slice(&[2; 16]).unwrap(), "example.local.two"), ), ( "example.c".to_string(), TestAccount::new(Uuid::from_slice(&[3; 16]).unwrap(), "example.local.three"), ), ]) }); static CONFIGURED: Lazy<HashMap<String, TestAccount>> = Lazy::new(|| { HashMap::from_iter(vec![ ( "example.a".to_string(), TestAccount::new(Uuid::from_slice(&[4; 16]).unwrap(), "example.local.four"), ), ( "example.b".to_string(), TestAccount::new(Uuid::from_slice(&[5; 16]).unwrap(), "example.local.five"), ), ]) }); static INCOMING: Lazy<HashMap<Uuid, RoutingTable<TestAccount>>> = Lazy::new(|| { let mut child_table = RoutingTable::default(); let mut child = TestAccount::new(Uuid::from_slice(&[6; 16]).unwrap(), "example.child"); child.relation = RoutingRelation::Child; child_table.add_route( child, Route { prefix: "example.d".to_string(), path: vec!["example.one".to_string()], auth: [0; 32], props: Vec::new(), }, ); let mut peer_table_1 = RoutingTable::default(); let peer_1 = TestAccount::new(Uuid::from_slice(&[7; 16]).unwrap(), "example.peer1"); peer_table_1.add_route( peer_1.clone(), Route { prefix: "example.d".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }, ); peer_table_1.add_route( peer_1.clone(), Route { prefix: "example.e".to_string(), path: vec!["example.one".to_string()], auth: [0; 32], props: Vec::new(), }, ); peer_table_1.add_route( peer_1, Route { // This route should be overridden by the configured "example.a" route prefix: "example.a.sub-prefix".to_string(), path: vec!["example.one".to_string()], auth: [0; 32], props: Vec::new(), }, ); let mut peer_table_2 = RoutingTable::default(); let peer_2 = TestAccount::new(Uuid::from_slice(&[8; 16]).unwrap(), "example.peer2"); peer_table_2.add_route( peer_2, Route { prefix: "example.e".to_string(), path: vec!["example.one".to_string(), "example.two".to_string()], auth: [0; 32], props: Vec::new(), }, ); HashMap::from_iter(vec![ (Uuid::from_slice(&[6; 16]).unwrap(), child_table), (Uuid::from_slice(&[7; 16]).unwrap(), peer_table_1), (Uuid::from_slice(&[8; 16]).unwrap(), peer_table_2), ]) }); #[test] fn prioritizes_configured_routes() { let best_route = get_best_route_for_prefix(&LOCAL, &CONFIGURED, &INCOMING, "example.a"); assert_eq!( best_route.unwrap().0.id(), Uuid::from_slice(&[4; 16]).unwrap() ); } #[test] fn prioritizes_shorter_configured_routes() { let best_route = get_best_route_for_prefix(&LOCAL, &CONFIGURED, &INCOMING, "example.a.sub-prefix"); assert_eq!( best_route.unwrap().0.id(), Uuid::from_slice(&[4; 16]).unwrap() ); } #[test] fn prioritizes_local_routes_over_broadcasted_ones() { let best_route = get_best_route_for_prefix(&LOCAL, &CONFIGURED, &INCOMING, "example.c"); assert_eq!( best_route.unwrap().0.id(), Uuid::from_slice(&[3; 16]).unwrap() ); } #[test] fn prioritizes_children_over_peers() { let best_route = get_best_route_for_prefix(&LOCAL, &CONFIGURED, &INCOMING, "example.d"); assert_eq!( best_route.unwrap().0.id(), Uuid::from_slice(&[6; 16]).unwrap() ); } #[test] fn prioritizes_shorter_paths() { let best_route = get_best_route_for_prefix(&LOCAL, &CONFIGURED, &INCOMING, "example.e"); assert_eq!( best_route.unwrap().0.id(), Uuid::from_slice(&[7; 16]).unwrap() ); } #[test] fn returns_none_for_no_route() { let best_route = get_best_route_for_prefix(&LOCAL, &CONFIGURED, &INCOMING, "example.z"); assert!(best_route.is_none()); } } #[cfg(test)] mod handle_route_control_request { use super::*; use crate::fixtures::*; use crate::test_helpers::*; use interledger_packet::PrepareBuilder; use std::time::{Duration, SystemTime}; #[tokio::test] async fn handles_valid_request() { test_service_with_routes() .0 .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) .await .unwrap(); } #[tokio::test] async fn rejects_from_non_sending_account() { let result = test_service() .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: NON_ROUTING_ACCOUNT.clone(), }) .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), "We are not configured to send routes to you, sorry" ); } #[tokio::test] async fn rejects_invalid_packet() { let result = test_service() .handle_request(IncomingRequest { prepare: PrepareBuilder { destination: CCP_CONTROL_DESTINATION.clone(), amount: 0, expires_at: SystemTime::now() + Duration::from_secs(30), data: &[], execution_condition: &PEER_PROTOCOL_CONDITION, } .build(), from: ROUTING_ACCOUNT.clone(), }) .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), "Invalid route control request" ); } #[tokio::test] async fn sends_update_in_response() { let (mut service, outgoing_requests) = test_service_with_routes(); (*service.forwarding_table.write()).set_id([0; 16]); service.update_best_routes(None).await.unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: RouteControlRequest { last_known_routing_table_id: [0; 16], mode: Mode::Sync, last_known_epoch: 0, features: Vec::new(), } .to_prepare(), }) .await .unwrap(); let request: &OutgoingRequest<TestAccount> = &outgoing_requests.lock()[0]; assert_eq!(request.to.id(), ROUTING_ACCOUNT.id()); let update = RouteUpdateRequest::try_from(&request.prepare).unwrap(); assert_eq!(update.routing_table_id, [0; 16]); assert_eq!(update.from_epoch_index, 0); assert_eq!(update.to_epoch_index, 1); assert_eq!(update.current_epoch_index, 1); assert_eq!(update.new_routes.len(), 3); } #[tokio::test] async fn sends_whole_table_if_id_is_different() { let (mut service, outgoing_requests) = test_service_with_routes(); service.update_best_routes(None).await.unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: RouteControlRequest { last_known_routing_table_id: [0; 16], mode: Mode::Sync, last_known_epoch: 32, features: Vec::new(), } .to_prepare(), }) .await .unwrap(); let routing_table_id = service.forwarding_table.read().id(); let request: &OutgoingRequest<TestAccount> = &outgoing_requests.lock()[0]; assert_eq!(request.to.id(), ROUTING_ACCOUNT.id()); let update = RouteUpdateRequest::try_from(&request.prepare).unwrap(); assert_eq!(update.routing_table_id, routing_table_id); assert_eq!(update.from_epoch_index, 0); assert_eq!(update.to_epoch_index, 1); assert_eq!(update.current_epoch_index, 1); assert_eq!(update.new_routes.len(), 3); } } #[cfg(test)] mod handle_route_update_request { use super::*; use crate::fixtures::*; use crate::test_helpers::*; use interledger_packet::PrepareBuilder; use std::{ iter::FromIterator, time::{Duration, SystemTime}, }; #[tokio::test] async fn handles_valid_request() { let mut service = test_service(); let mut update = UPDATE_REQUEST_SIMPLE.clone(); update.to_epoch_index = 1; update.from_epoch_index = 0; service .handle_request(IncomingRequest { prepare: update.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) .await .unwrap(); } #[tokio::test] async fn rejects_from_child_account() { let result = test_service() .handle_request(IncomingRequest { prepare: UPDATE_REQUEST_SIMPLE.to_prepare(), from: CHILD_ACCOUNT.clone(), }) .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), "Your route broadcasts are not accepted here", ); } #[tokio::test] async fn rejects_from_non_routing_account() { let result = test_service() .handle_request(IncomingRequest { prepare: UPDATE_REQUEST_SIMPLE.to_prepare(), from: NON_ROUTING_ACCOUNT.clone(), }) .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), "Your route broadcasts are not accepted here", ); } #[tokio::test] async fn rejects_invalid_packet() { let result = test_service() .handle_request(IncomingRequest { prepare: PrepareBuilder { destination: CCP_UPDATE_DESTINATION.clone(), amount: 0, expires_at: SystemTime::now() + Duration::from_secs(30), data: &[], execution_condition: &PEER_PROTOCOL_CONDITION, } .build(), from: ROUTING_ACCOUNT.clone(), }) .await; assert!(result.is_err()); assert_eq!( str::from_utf8(result.unwrap_err().message()).unwrap(), "Invalid route update request" ); } #[tokio::test] async fn adds_table_on_first_request() { let mut service = test_service(); let mut update = UPDATE_REQUEST_SIMPLE.clone(); update.to_epoch_index = 1; update.from_epoch_index = 0; service .handle_request(IncomingRequest { prepare: update.to_prepare(), from: ROUTING_ACCOUNT.clone(), }) .await .unwrap(); assert_eq!(service.incoming_tables.read().len(), 1); } #[tokio::test] async fn filters_routes_with_other_address_scheme() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { prefix: "example.valid".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); request.new_routes.push(Route { prefix: "other.prefix".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); let request = service.filter_routes(request); assert_eq!(request.new_routes.len(), 1); assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } #[tokio::test] async fn filters_routes_for_address_scheme() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { prefix: "example.valid".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); request.new_routes.push(Route { prefix: "example.".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); let request = service.filter_routes(request); assert_eq!(request.new_routes.len(), 1); assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } #[tokio::test] async fn filters_routing_loops() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { prefix: "example.valid".to_string(), path: vec![ "example.a".to_string(), service.ilp_address.read().to_string(), "example.b".to_string(), ], auth: [0; 32], props: Vec::new(), }); request.new_routes.push(Route { prefix: "example.valid".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); let request = service.filter_routes(request); assert_eq!(request.new_routes.len(), 1); assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } #[tokio::test] async fn filters_own_prefix_routes() { let service = test_service(); let mut request = UPDATE_REQUEST_SIMPLE.clone(); request.new_routes.push(Route { prefix: "example.connector.invalid-route".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); request.new_routes.push(Route { prefix: "example.valid".to_string(), path: Vec::new(), auth: [0; 32], props: Vec::new(), }); let request = service.filter_routes(request); assert_eq!(request.new_routes.len(), 1); assert_eq!(request.new_routes[0].prefix, "example.valid".to_string()); } #[tokio::test] async fn updates_local_routing_table() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; request.from_epoch_index = 0; service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) .await .unwrap(); assert_eq!( (*service.local_table.read()) .get_route("example.prefix1") .unwrap() .0 .id(), ROUTING_ACCOUNT.id() ); assert_eq!( (*service.local_table.read()) .get_route("example.prefix2") .unwrap() .0 .id(), ROUTING_ACCOUNT.id() ); } #[tokio::test] async fn writes_local_routing_table_to_store() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; request.from_epoch_index = 0; service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) .await .unwrap(); assert_eq!( service .store .routes .lock() .get("example.prefix1") .unwrap() .id(), ROUTING_ACCOUNT.id() ); assert_eq!( service .store .routes .lock() .get("example.prefix2") .unwrap() .id(), ROUTING_ACCOUNT.id() ); } #[tokio::test] async fn doesnt_overwrite_configured_or_local_routes() { let mut service = test_service(); let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let store = TestStore::with_routes( HashMap::from_iter(vec![( "example.prefix1".to_string(), TestAccount::new(id1, "example.account9"), )]), HashMap::from_iter(vec![( "example.prefix2".to_string(), TestAccount::new(id2, "example.account10"), )]), ); service.store = store; let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; request.from_epoch_index = 0; service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) .await .unwrap(); assert_eq!( (*service.local_table.read()) .get_route("example.prefix1") .unwrap() .0 .id(), id1 ); assert_eq!( (*service.local_table.read()) .get_route("example.prefix2") .unwrap() .0 .id(), id2 ); } #[tokio::test] async fn removes_withdrawn_routes() { let mut service = test_service(); let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; request.from_epoch_index = 0; service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) .await .unwrap(); service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: RouteUpdateRequest { routing_table_id: UPDATE_REQUEST_COMPLEX.routing_table_id, from_epoch_index: 1, to_epoch_index: 3, current_epoch_index: 3, hold_down_time: 45000, speaker: UPDATE_REQUEST_COMPLEX.speaker.clone(), new_routes: Vec::new(), withdrawn_routes: vec!["example.prefix2".to_string()], } .to_prepare(), }) .await .unwrap(); assert_eq!( (*service.local_table.read()) .get_route("example.prefix1") .unwrap() .0 .id(), ROUTING_ACCOUNT.id() ); assert!((*service.local_table.read()) .get_route("example.prefix2") .is_none()); } #[tokio::test] async fn sends_control_request_if_routing_table_id_changed() { let (mut service, outgoing_requests) = test_service_with_routes(); // First request is valid let mut request1 = UPDATE_REQUEST_COMPLEX.clone(); request1.to_epoch_index = 3; request1.from_epoch_index = 0; service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request1.to_prepare(), }) .await .unwrap(); // Second has a gap in epochs let mut request2 = UPDATE_REQUEST_COMPLEX.clone(); request2.to_epoch_index = 8; request2.from_epoch_index = 7; request2.routing_table_id = [9; 16]; let err = service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request2.to_prepare(), }) .await .unwrap_err(); assert_eq!(err.code(), ErrorCode::F00_BAD_REQUEST); let request = &outgoing_requests.lock()[0]; let control = RouteControlRequest::try_from(&request.prepare).unwrap(); assert_eq!(control.last_known_epoch, 0); assert_eq!( control.last_known_routing_table_id, request2.routing_table_id ); } #[tokio::test] async fn sends_control_request_if_missing_epochs() { let (mut service, outgoing_requests) = test_service_with_routes(); // First request is valid let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 1; request.from_epoch_index = 0; service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) .await .unwrap(); // Second has a gap in epochs let mut request = UPDATE_REQUEST_COMPLEX.clone(); request.to_epoch_index = 8; request.from_epoch_index = 7; let err = service .handle_request(IncomingRequest { from: ROUTING_ACCOUNT.clone(), prepare: request.to_prepare(), }) .await .unwrap_err(); assert_eq!(err.code(), ErrorCode::F00_BAD_REQUEST); let request = &outgoing_requests.lock()[0]; let control = RouteControlRequest::try_from(&request.prepare).unwrap(); assert_eq!(control.last_known_epoch, 1); } } #[cfg(test)] mod create_route_update { use super::*; use crate::test_helpers::*; #[tokio::test] async fn heartbeat_message_for_empty_table() { let service = test_service(); let update = service.create_route_update(0, 0); assert_eq!(update.from_epoch_index, 0); assert_eq!(update.to_epoch_index, 0); assert_eq!(update.current_epoch_index, 0); // Connector's own route is always included in the 0 epoch assert_eq!(update.new_routes.len(), 1); assert_eq!(update.new_routes[0].prefix, "example.connector"); assert!(update.withdrawn_routes.is_empty()); } #[tokio::test] async fn includes_the_given_range_of_epochs() { let service = test_service(); (*service.forwarding_table.write()).set_epoch(4); *service.forwarding_table_updates.write() = vec![ ( vec![Route { prefix: "example.a".to_string(), path: vec!["example.x".to_string()], auth: [1; 32], props: Vec::new(), }], Vec::new(), ), ( vec![Route { prefix: "example.b".to_string(), path: vec!["example.x".to_string()], auth: [2; 32], props: Vec::new(), }], Vec::new(), ), ( vec![Route { prefix: "example.c".to_string(), path: vec!["example.x".to_string(), "example.y".to_string()], auth: [3; 32], props: Vec::new(), }], vec!["example.m".to_string()], ), ( vec![Route { prefix: "example.d".to_string(), path: vec!["example.x".to_string(), "example.y".to_string()], auth: [4; 32], props: Vec::new(), }], vec!["example.n".to_string()], ), ]; let update = service.create_route_update(1, 3); assert_eq!(update.from_epoch_index, 1); assert_eq!(update.to_epoch_index, 3); assert_eq!(update.current_epoch_index, 4); assert_eq!(update.new_routes.len(), 2); assert_eq!(update.withdrawn_routes.len(), 1); let new_routes: Vec<&str> = update .new_routes .iter() .map(|r| str::from_utf8(r.prefix.as_ref()).unwrap()) .collect(); assert!(new_routes.contains(&"example.b")); assert!(new_routes.contains(&"example.c")); assert!(!new_routes.contains(&"example.m")); assert_eq!(update.withdrawn_routes[0], "example.m"); } } #[cfg(test)] mod send_route_updates { use super::*; use crate::fixtures::*; use crate::test_helpers::*; use interledger_service::*; use std::{collections::HashSet, iter::FromIterator, str::FromStr}; #[tokio::test] async fn broadcasts_to_all_accounts_we_send_updates_to() { let (service, outgoing_requests) = test_service_with_routes(); service.send_route_updates().await.unwrap(); let accounts: HashSet<Uuid> = outgoing_requests .lock() .iter() .map(|request| request.to.id()) .collect(); let expected: HashSet<Uuid> = [ Uuid::from_slice(&[1; 16]).unwrap(), Uuid::from_slice(&[2; 16]).unwrap(), ] .iter() .cloned() .collect(); assert_eq!(accounts, expected); } #[tokio::test] async fn broadcasts_configured_and_local_routes() { let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created service.update_best_routes(None).await.unwrap(); service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 3); let prefixes: Vec<&str> = update .new_routes .iter() .map(|route| str::from_utf8(route.prefix.as_ref()).unwrap()) .collect(); assert!(prefixes.contains(&"example.local.1")); assert!(prefixes.contains(&"example.configured.1")); } #[tokio::test] async fn broadcasts_received_routes() { let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created service.update_best_routes(None).await.unwrap(); service .handle_route_update_request(IncomingRequest { from: TestAccount::new(Uuid::new_v4(), "example.peer"), prepare: RouteUpdateRequest { routing_table_id: [0; 16], current_epoch_index: 1, from_epoch_index: 0, to_epoch_index: 1, hold_down_time: 30000, speaker: Address::from_str("example.remote").unwrap(), new_routes: vec![Route { prefix: "example.remote".to_string(), path: vec!["example.peer".to_string()], auth: [0; 32], props: Vec::new(), }], withdrawn_routes: Vec::new(), } .to_prepare(), }) .await .unwrap(); service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 4); let prefixes: Vec<&str> = update .new_routes .iter() .map(|route| str::from_utf8(route.prefix.as_ref()).unwrap()) .collect(); assert!(prefixes.contains(&"example.local.1")); assert!(prefixes.contains(&"example.configured.1")); assert!(prefixes.contains(&"example.remote")); } #[tokio::test] async fn broadcasts_withdrawn_routes() { let id10 = Uuid::from_slice(&[10; 16]).unwrap(); let (service, outgoing_requests) = test_service_with_routes(); // This is normally spawned as a task when the service is created service.update_best_routes(None).await.unwrap(); service .handle_route_update_request(IncomingRequest { from: TestAccount::new(id10, "example.peer"), prepare: RouteUpdateRequest { routing_table_id: [0; 16], current_epoch_index: 1, from_epoch_index: 0, to_epoch_index: 1, hold_down_time: 30000, speaker: Address::from_str("example.remote").unwrap(), new_routes: vec![Route { prefix: "example.remote".to_string(), path: vec!["example.peer".to_string()], auth: [0; 32], props: Vec::new(), }], withdrawn_routes: Vec::new(), } .to_prepare(), }) .await .unwrap(); service .handle_route_update_request(IncomingRequest { from: TestAccount::new(id10, "example.peer"), prepare: RouteUpdateRequest { routing_table_id: [0; 16], current_epoch_index: 4, from_epoch_index: 1, to_epoch_index: 4, hold_down_time: 30000, speaker: Address::from_str("example.remote").unwrap(), new_routes: Vec::new(), withdrawn_routes: vec!["example.remote".to_string()], } .to_prepare(), }) .await .unwrap(); service.send_route_updates().await.unwrap(); let update = RouteUpdateRequest::try_from(&outgoing_requests.lock()[0].prepare).unwrap(); assert_eq!(update.new_routes.len(), 3); let prefixes: Vec<&str> = update .new_routes .iter() .map(|route| str::from_utf8(route.prefix.as_ref()).unwrap()) .collect(); assert!(prefixes.contains(&"example.local.1")); assert!(prefixes.contains(&"example.configured.1")); assert!(!prefixes.contains(&"example.remote")); assert_eq!(update.withdrawn_routes.len(), 1); assert_eq!(update.withdrawn_routes[0], "example.remote"); } #[tokio::test] async fn backs_off_sending_to_unavailable_child_accounts() { let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let local_routes = HashMap::from_iter(vec![ ( "example.local.1".to_string(), TestAccount::new(id1, "example.local.1"), ), ( "example.connector.other-local".to_string(), TestAccount { id: id2, ilp_address: Address::from_str("example.connector.other-local").unwrap(), relation: RoutingRelation::Child, }, ), ]); let store = TestStore::with_routes(local_routes, HashMap::new()); let outgoing_requests: Arc<Mutex<Vec<OutgoingRequest<TestAccount>>>> = Arc::new(Mutex::new(Vec::new())); let outgoing_requests_clone = outgoing_requests.clone(); let outgoing = outgoing_service_fn(move |request: OutgoingRequest<TestAccount>| { let res = if request.to.routing_relation() == RoutingRelation::Child { Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: &[], data: &[], triggered_by: Some(request.to.ilp_address()), } .build()) } else { Ok(CCP_RESPONSE.clone()) }; (*outgoing_requests_clone.lock()).push(request); res }); let service = CcpRouteManagerBuilder::new( Address::from_str("example.connector").unwrap(), store, outgoing, incoming_service_fn(|_request| { Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } .build()) }), ) .ilp_address(Address::from_str("example.connector").unwrap()) .to_service(); service.send_route_updates().await.unwrap(); // The first time, the child request is rejected assert_eq!(outgoing_requests.lock().len(), 2); { let lock = service.unavailable_accounts.lock(); let backoff = lock .get(&id2) .expect("Should have added chlid to unavailable accounts"); assert_eq!(backoff.max, 1); assert_eq!(backoff.skip_intervals, 1); } *outgoing_requests.lock() = Vec::new(); service.send_route_updates().await.unwrap(); // When we send again, we skip the child assert_eq!(outgoing_requests.lock().len(), 1); { let lock = service.unavailable_accounts.lock(); let backoff = lock .get(&id2) .expect("Should have added chlid to unavailable accounts"); assert_eq!(backoff.max, 1); assert_eq!(backoff.skip_intervals, 0); } *outgoing_requests.lock() = Vec::new(); service.send_route_updates().await.unwrap(); // When we send again, we try the child but it still won't work assert_eq!(outgoing_requests.lock().len(), 2); { let lock = service.unavailable_accounts.lock(); let backoff = lock .get(&id2) .expect("Should have added chlid to unavailable accounts"); assert_eq!(backoff.max, 2); assert_eq!(backoff.skip_intervals, 2); } } #[tokio::test] async fn resets_backoff_on_route_control_request() { let id1 = Uuid::from_slice(&[1; 16]).unwrap(); let id2 = Uuid::from_slice(&[2; 16]).unwrap(); let child_account = TestAccount { id: id2, ilp_address: Address::from_str("example.connector.other-local").unwrap(), relation: RoutingRelation::Child, }; let local_routes = HashMap::from_iter(vec![ ( "example.local.1".to_string(), TestAccount::new(id1, "example.local.1"), ), ( "example.connector.other-local".to_string(), child_account.clone(), ), ]); let store = TestStore::with_routes(local_routes, HashMap::new()); let outgoing_requests: Arc<Mutex<Vec<OutgoingRequest<TestAccount>>>> = Arc::new(Mutex::new(Vec::new())); let outgoing_requests_clone = outgoing_requests.clone(); let outgoing = outgoing_service_fn(move |request: OutgoingRequest<TestAccount>| { let res = if request.to.routing_relation() == RoutingRelation::Child { Err(RejectBuilder { code: ErrorCode::F00_BAD_REQUEST, message: &[], data: &[], triggered_by: Some(request.to.ilp_address()), } .build()) } else { Ok(CCP_RESPONSE.clone()) }; (*outgoing_requests_clone.lock()).push(request); res }); let mut service = CcpRouteManagerBuilder::new( Address::from_str("example.connector").unwrap(), store, outgoing, incoming_service_fn(|_request| { Err(RejectBuilder { code: ErrorCode::F02_UNREACHABLE, message: b"No other incoming handler!", data: &[], triggered_by: Some(&EXAMPLE_CONNECTOR), } .build()) }), ) .ilp_address(Address::from_str("example.connector").unwrap()) .to_service(); service.send_route_updates().await.unwrap(); // The first time, the child request is rejected assert_eq!(outgoing_requests.lock().len(), 2); { let lock = service.unavailable_accounts.lock(); let backoff = lock .get(&id2) .expect("Should have added chlid to unavailable accounts"); assert_eq!(backoff.max, 1); assert_eq!(backoff.skip_intervals, 1); } service .handle_request(IncomingRequest { prepare: CONTROL_REQUEST.to_prepare(), from: child_account, }) .await .unwrap(); { let lock = service.unavailable_accounts.lock(); assert!(lock.get(&id2).is_none()); } *outgoing_requests.lock() = Vec::new(); service.send_route_updates().await.unwrap(); // When we send again, we don't skip the child because we got a request from them assert_eq!(outgoing_requests.lock().len(), 2); } }
38.563523
172
0.527472
ac7f1b304374e7c90d2a287c1bd0f3b2a8f3dbd2
114
use dade::model; #[model] enum TestModel { Value( #[field(gt = 2.0)] u16 ), } fn main() {}
11.4
25
0.464912
edf1320ad309145332c695dc03db35e27fc3a99a
7,944
use solana_bpf_loader_program::{ create_vm, serialization::{deserialize_parameters, serialize_parameters}, }; use solana_rbpf::InstructionMeter; use solana_sdk::{ account::{Account as SolanaAccount, KeyedAccount}, bpf_loader, entrypoint::SUCCESS, entrypoint_native::{ComputeBudget, ComputeMeter, InvokeContext, Logger, ProcessInstruction}, instruction::{CompiledInstruction, InstructionError}, message::Message, pubkey::Pubkey, sysvar::rent::{self, Rent}, }; use spl_token::{ instruction::TokenInstruction, option::COption, pack::Pack, state::{Account, Mint}, }; use std::{cell::RefCell, fs::File, io::Read, path::PathBuf, rc::Rc}; fn load_program(name: &str) -> Vec<u8> { let mut path = PathBuf::new(); path.push("../../target/bpfel-unknown-unknown/release"); path.push(name); path.set_extension("so"); let mut file = File::open(path).unwrap(); let mut program = Vec::new(); file.read_to_end(&mut program).unwrap(); program } fn run_program( program_id: &Pubkey, parameter_accounts: &[KeyedAccount], instruction_data: &[u8], ) -> Result<u64, InstructionError> { let mut program_account = SolanaAccount::default(); program_account.data = load_program("spl_token"); let loader_id = bpf_loader::id(); let mut invoke_context = MockInvokeContext::default(); let (mut vm, heap_region) = create_vm( &loader_id, &program_account.data, parameter_accounts, &mut invoke_context, ) .unwrap(); let mut parameter_bytes = serialize_parameters( &loader_id, program_id, parameter_accounts, &instruction_data, ) .unwrap(); assert_eq!( SUCCESS, vm.execute_program(parameter_bytes.as_mut_slice(), &[], &[heap_region]) .unwrap() ); deserialize_parameters(&loader_id, parameter_accounts, &parameter_bytes).unwrap(); Ok(vm.get_total_instruction_count()) } #[test] fn assert_instruction_count() { let program_id = Pubkey::new_rand(); let source_key = Pubkey::new_rand(); let source_account = SolanaAccount::new_ref(u64::MAX, Account::get_packed_len(), &program_id); let destination_key = Pubkey::new_rand(); let destination_account = SolanaAccount::new_ref(u64::MAX, Account::get_packed_len(), &program_id); let owner_key = Pubkey::new_rand(); let owner_account = RefCell::new(SolanaAccount::default()); let mint_key = Pubkey::new_rand(); let mint_account = SolanaAccount::new_ref(0, Mint::get_packed_len(), &program_id); let rent_key = rent::id(); let rent_account = RefCell::new(rent::create_account(42, &Rent::default())); // Create new mint let instruction_data = TokenInstruction::InitializeMint { decimals: 9, mint_authority: owner_key, freeze_authority: COption::None, } .pack(); let parameter_accounts = vec![ KeyedAccount::new(&mint_key, false, &mint_account), KeyedAccount::new(&source_key, false, &source_account), ]; let initialize_mint_count = run_program(&program_id, &parameter_accounts[..], &instruction_data).unwrap(); // Create source account let instruction_data = TokenInstruction::InitializeAccount.pack(); let parameter_accounts = vec![ KeyedAccount::new(&source_key, false, &source_account), KeyedAccount::new(&mint_key, false, &mint_account), KeyedAccount::new(&owner_key, false, &owner_account), KeyedAccount::new(&rent_key, false, &rent_account), ]; let mintto_count = run_program(&program_id, &parameter_accounts[..], &instruction_data).unwrap(); // Create destination account let instruction_data = TokenInstruction::InitializeAccount.pack(); let parameter_accounts = vec![ KeyedAccount::new(&destination_key, false, &destination_account), KeyedAccount::new(&mint_key, false, &mint_account), KeyedAccount::new(&owner_key, false, &owner_account), KeyedAccount::new(&rent_key, false, &rent_account), ]; let _ = run_program(&program_id, &parameter_accounts[..], &instruction_data).unwrap(); // MintTo source account let instruction_data = TokenInstruction::MintTo { amount: 100 }.pack(); let parameter_accounts = vec![ KeyedAccount::new(&mint_key, false, &mint_account), KeyedAccount::new(&source_key, false, &source_account), KeyedAccount::new(&owner_key, true, &owner_account), ]; let initialize_account_count = run_program(&program_id, &parameter_accounts[..], &instruction_data).unwrap(); // Transfer from source to destination let instruction = TokenInstruction::Transfer { amount: 100 }; let instruction_data = instruction.pack(); let parameter_accounts = vec![ KeyedAccount::new(&source_key, false, &source_account), KeyedAccount::new(&destination_key, false, &destination_account), KeyedAccount::new(&owner_key, true, &owner_account), ]; let transfer_count = run_program(&program_id, &parameter_accounts[..], &instruction_data).unwrap(); const BASELINE_NEW_MINT_COUNT: u64 = 4000; // last known 3802 const BASELINE_INITIALIZE_ACCOUNT_COUNT: u64 = 6500; // last known 6445 const BASELINE_MINTTO_COUNT: u64 = 6500; // last known 6194 const BASELINE_TRANSFER_COUNT: u64 = 8000; // last known 7609 println!("BPF instructions executed"); println!( " InitializeMint : {:?} ({:?})", initialize_mint_count, BASELINE_NEW_MINT_COUNT ); println!( " InitializeAccount: {:?} ({:?})", initialize_account_count, BASELINE_INITIALIZE_ACCOUNT_COUNT ); println!( " MintTo : {:?} ({:?})", mintto_count, BASELINE_MINTTO_COUNT ); println!( " Transfer : {:?} ({:?})", transfer_count, BASELINE_TRANSFER_COUNT, ); assert!(initialize_account_count <= BASELINE_INITIALIZE_ACCOUNT_COUNT); assert!(initialize_mint_count <= BASELINE_NEW_MINT_COUNT); assert!(transfer_count <= BASELINE_TRANSFER_COUNT); } // Mock InvokeContext #[derive(Debug, Default)] struct MockInvokeContext { pub key: Pubkey, pub logger: MockLogger, pub compute_meter: MockComputeMeter, } impl InvokeContext for MockInvokeContext { fn push(&mut self, _key: &Pubkey) -> Result<(), InstructionError> { Ok(()) } fn pop(&mut self) {} fn verify_and_update( &mut self, _message: &Message, _instruction: &CompiledInstruction, _accounts: &[Rc<RefCell<SolanaAccount>>], ) -> Result<(), InstructionError> { Ok(()) } fn get_caller(&self) -> Result<&Pubkey, InstructionError> { Ok(&self.key) } fn get_programs(&self) -> &[(Pubkey, ProcessInstruction)] { &[] } fn get_logger(&self) -> Rc<RefCell<dyn Logger>> { Rc::new(RefCell::new(self.logger.clone())) } fn is_cross_program_supported(&self) -> bool { true } fn get_compute_budget(&self) -> ComputeBudget { ComputeBudget::default() } fn get_compute_meter(&self) -> Rc<RefCell<dyn ComputeMeter>> { Rc::new(RefCell::new(self.compute_meter.clone())) } } #[derive(Debug, Default, Clone)] struct MockComputeMeter {} impl ComputeMeter for MockComputeMeter { fn consume(&mut self, _amount: u64) -> Result<(), InstructionError> { Ok(()) } fn get_remaining(&self) -> u64 { u64::MAX } } #[derive(Debug, Default, Clone)] struct MockLogger {} impl Logger for MockLogger { fn log_enabled(&self) -> bool { true } fn log(&mut self, message: &str) { println!("{}", message); } } struct TestInstructionMeter {} impl InstructionMeter for TestInstructionMeter { fn consume(&mut self, _amount: u64) {} fn get_remaining(&self) -> u64 { u64::MAX } }
33.661017
98
0.655589
1ef7d156a18b2affb1bcb78b48c5b14f3689dfea
1,118
#![feature(proc_macro_def_site, proc_macro_quote, proc_macro_hygiene, trait_alias)] mod rule; mod expr; extern crate proc_macro; use proc_macro::{TokenStream, TokenTree}; #[derive(Debug)] enum Error { UnexpectedToken, ExpectedAtom, ExpectedPunct, } trait TokenStreamExt { fn and(self, other: TokenStream) -> TokenStream; } impl TokenStreamExt for TokenStream { fn and(mut self, other: TokenStream) -> TokenStream { self.extend(Some(other)); self } } impl TokenStreamExt for TokenTree { fn and(self, other: TokenStream) -> TokenStream { let mut this = TokenStream::from(self); this.extend(Some(other)); this } } trait TokenIter = Iterator<Item=TokenTree> + Clone; fn attempt<'a, 'c: 'a, I, R, E, F>(iter: &mut I, f: F) -> Result<R, E> where I: TokenIter, F: FnOnce(&mut I) -> Result<R, E>, { let mut iter2 = iter.clone(); let tok = f(&mut iter2)?; *iter = iter2; Ok(tok) } #[proc_macro] pub fn rule(stream: TokenStream) -> TokenStream { rule::parse_rule(&mut stream.into_iter()).unwrap().into() }
21.09434
83
0.636852
4af338bd2e7a5701cc027ff6bbe001020e67bf8b
1,709
use futures::Stream; use std::pin::Pin; use tonic::{transport::Server, Request, Response, Status}; pub mod hello_world { tonic::include_proto!("helloworld"); } pub mod echo { tonic::include_proto!("grpc.examples.echo"); } use hello_world::{ greeter_server::{Greeter, GreeterServer}, HelloReply, HelloRequest, }; use echo::{ echo_server::{Echo, EchoServer}, EchoRequest, EchoResponse, }; type ResponseStream = Pin<Box<dyn Stream<Item = Result<EchoResponse, Status>> + Send + Sync>>; #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { let addr = "[::1]:50051".parse().unwrap(); let greeter = GreeterServer::new(MyGreeter::default()); let echo = EchoServer::new(MyEcho::default()); Server::builder() .add_service(greeter) .add_service(echo) .serve(addr) .await?; Ok(()) } #[derive(Default)] pub struct MyGreeter {} #[tonic::async_trait] impl Greeter for MyGreeter { async fn say_hello( &self, request: Request<HelloRequest>, ) -> Result<Response<HelloReply>, Status> { let reply = hello_world::HelloReply { message: format!("Hello {}!", request.into_inner().name), }; Ok(Response::new(reply)) } } #[derive(Default)] pub struct MyEcho; #[tonic::async_trait] impl Echo for MyEcho { async fn unary_echo( &self, request: Request<EchoRequest>, ) -> Result<Response<EchoResponse>, Status> { let message = request.into_inner().message; Ok(Response::new(EchoResponse { message })) } type ServerStreamingEchoStream = ResponseStream; type BidirectionalStreamingEchoStream = ResponseStream; }
23.410959
94
0.638385
f8832807f95c08d56af11e3ab9e411f65b53c6d3
57,732
use byteorder::{LittleEndian, ReadBytesExt}; use futures; use futures::{future, Async, Future, Poll, Stream}; use std; use std::borrow::Cow; use std::cmp::max; use std::io::{Read, Result, Seek, SeekFrom}; use std::mem; use std::thread; use std::time::{Duration, Instant}; use crate::config::{Bitrate, PlayerConfig}; use librespot_core::session::Session; use librespot_core::spotify_id::SpotifyId; use librespot_core::util::SeqGenerator; use crate::audio::{AudioDecrypt, AudioFile, StreamLoaderController}; use crate::audio::{VorbisDecoder, VorbisPacket}; use crate::audio::{ READ_AHEAD_BEFORE_PLAYBACK_ROUNDTRIPS, READ_AHEAD_BEFORE_PLAYBACK_SECONDS, READ_AHEAD_DURING_PLAYBACK_ROUNDTRIPS, READ_AHEAD_DURING_PLAYBACK_SECONDS, }; use crate::audio_backend::Sink; use crate::metadata::{AudioItem, FileFormat}; use crate::mixer::AudioFilter; use crate::winconsole; const PRELOAD_NEXT_TRACK_BEFORE_END_DURATION_MS: u32 = 30000; pub struct Player { commands: Option<futures::sync::mpsc::UnboundedSender<PlayerCommand>>, thread_handle: Option<thread::JoinHandle<()>>, play_request_id_generator: SeqGenerator<u64>, } #[derive(PartialEq, Debug, Clone, Copy)] pub enum SinkStatus { Running, Closed, TemporarilyClosed, } pub type SinkEventCallback = Box<dyn Fn(SinkStatus) + Send>; struct PlayerInternal { session: Session, config: PlayerConfig, commands: futures::sync::mpsc::UnboundedReceiver<PlayerCommand>, state: PlayerState, preload: PlayerPreload, sink: Box<dyn Sink>, sink_status: SinkStatus, sink_event_callback: Option<SinkEventCallback>, audio_filter: Option<Box<dyn AudioFilter + Send>>, event_senders: Vec<futures::sync::mpsc::UnboundedSender<PlayerEvent>>, } enum PlayerCommand { Load { track_id: SpotifyId, play_request_id: u64, play: bool, position_ms: u32, }, Preload { track_id: SpotifyId, }, Play, Pause, Stop, Seek(u32), AddEventSender(futures::sync::mpsc::UnboundedSender<PlayerEvent>), SetSinkEventCallback(Option<SinkEventCallback>), EmitVolumeSetEvent(u16), } #[derive(Debug, Clone)] pub enum PlayerEvent { // Fired when the player is stopped (e.g. by issuing a "stop" command to the player). Stopped { play_request_id: u64, track_id: SpotifyId, }, // The player started working on playback of a track while it was in a stopped state. // This is always immediately followed up by a "Loading" or "Playing" event. Started { play_request_id: u64, track_id: SpotifyId, position_ms: u32, }, // Same as started but in the case that the player already had a track loaded. // The player was either playing the loaded track or it was paused. Changed { old_track_id: SpotifyId, new_track_id: SpotifyId, }, // The player is delayed by loading a track. Loading { play_request_id: u64, track_id: SpotifyId, position_ms: u32, }, // The player is preloading a track. Preloading { track_id: SpotifyId, }, // The player is playing a track. // This event is issued at the start of playback of whenever the position must be communicated // because it is out of sync. This includes: // start of a track // un-pausing // after a seek // after a buffer-underrun Playing { play_request_id: u64, track_id: SpotifyId, position_ms: u32, duration_ms: u32, }, // The player entered a paused state. Paused { play_request_id: u64, track_id: SpotifyId, position_ms: u32, duration_ms: u32, }, // The player thinks it's a good idea to issue a preload command for the next track now. // This event is intended for use within spirc. TimeToPreloadNextTrack { play_request_id: u64, track_id: SpotifyId, }, // The player reached the end of a track. // This event is intended for use within spirc. Spirc will respond by issuing another command // which will trigger another event (e.g. Changed or Stopped) EndOfTrack { play_request_id: u64, track_id: SpotifyId, }, // The player was unable to load the requested track. Unavailable { play_request_id: u64, track_id: SpotifyId, }, // The mixer volume was set to a new level. VolumeSet { volume: u16, }, } impl PlayerEvent { pub fn get_play_request_id(&self) -> Option<u64> { use PlayerEvent::*; match self { Loading { play_request_id, .. } | Unavailable { play_request_id, .. } | Started { play_request_id, .. } | Playing { play_request_id, .. } | TimeToPreloadNextTrack { play_request_id, .. } | EndOfTrack { play_request_id, .. } | Paused { play_request_id, .. } | Stopped { play_request_id, .. } => Some(*play_request_id), Changed { .. } | Preloading { .. } | VolumeSet { .. } => None, } } } pub type PlayerEventChannel = futures::sync::mpsc::UnboundedReceiver<PlayerEvent>; #[derive(Clone, Copy, Debug)] struct NormalisationData { track_gain_db: f32, track_peak: f32, album_gain_db: f32, album_peak: f32, } impl NormalisationData { fn parse_from_file<T: Read + Seek>(mut file: T) -> Result<NormalisationData> { const SPOTIFY_NORMALIZATION_HEADER_START_OFFSET: u64 = 144; file.seek(SeekFrom::Start(SPOTIFY_NORMALIZATION_HEADER_START_OFFSET)) .unwrap(); let track_gain_db = file.read_f32::<LittleEndian>().unwrap(); let track_peak = file.read_f32::<LittleEndian>().unwrap(); let album_gain_db = file.read_f32::<LittleEndian>().unwrap(); let album_peak = file.read_f32::<LittleEndian>().unwrap(); let r = NormalisationData { track_gain_db: track_gain_db, track_peak: track_peak, album_gain_db: album_gain_db, album_peak: album_peak, }; Ok(r) } fn get_factor(config: &PlayerConfig, data: NormalisationData) -> f32 { let mut normalisation_factor = f32::powf( 10.0, (data.track_gain_db + config.normalisation_pregain) / 20.0, ); if normalisation_factor * data.track_peak > 1.0 { warn!("Reducing normalisation factor to prevent clipping. Please add negative pregain to avoid."); normalisation_factor = 1.0 / data.track_peak; } debug!("Normalisation Data: {:?}", data); debug!("Applied normalisation factor: {}", normalisation_factor); normalisation_factor } } impl Player { pub fn new<F>( config: PlayerConfig, session: Session, audio_filter: Option<Box<dyn AudioFilter + Send>>, sink_builder: F, ) -> (Player, PlayerEventChannel) where F: FnOnce() -> Box<dyn Sink> + Send + 'static, { let (cmd_tx, cmd_rx) = futures::sync::mpsc::unbounded(); let (event_sender, event_receiver) = futures::sync::mpsc::unbounded(); let handle = thread::spawn(move || { debug!("new Player[{}]", session.session_id()); let internal = PlayerInternal { session: session, config: config, commands: cmd_rx, state: PlayerState::Stopped, preload: PlayerPreload::None, sink: sink_builder(), sink_status: SinkStatus::Closed, sink_event_callback: None, audio_filter: audio_filter, event_senders: [event_sender].to_vec(), }; // While PlayerInternal is written as a future, it still contains blocking code. // It must be run by using wait() in a dedicated thread. let _ = internal.wait(); debug!("PlayerInternal thread finished."); }); ( Player { commands: Some(cmd_tx), thread_handle: Some(handle), play_request_id_generator: SeqGenerator::new(0), }, event_receiver, ) } fn command(&self, cmd: PlayerCommand) { self.commands.as_ref().unwrap().unbounded_send(cmd).unwrap(); } pub fn load(&mut self, track_id: SpotifyId, start_playing: bool, position_ms: u32) -> u64 { let play_request_id = self.play_request_id_generator.get(); self.command(PlayerCommand::Load { track_id, play_request_id, play: start_playing, position_ms, }); play_request_id } pub fn preload(&self, track_id: SpotifyId) { self.command(PlayerCommand::Preload { track_id }); } pub fn play(&self) { self.command(PlayerCommand::Play) } pub fn pause(&self) { self.command(PlayerCommand::Pause) } pub fn stop(&self) { self.command(PlayerCommand::Stop) } pub fn seek(&self, position_ms: u32) { self.command(PlayerCommand::Seek(position_ms)); } pub fn get_player_event_channel(&self) -> PlayerEventChannel { let (event_sender, event_receiver) = futures::sync::mpsc::unbounded(); self.command(PlayerCommand::AddEventSender(event_sender)); event_receiver } pub fn get_end_of_track_future(&self) -> Box<dyn Future<Item = (), Error = ()>> { let result = self .get_player_event_channel() .filter(|event| match event { PlayerEvent::EndOfTrack { .. } | PlayerEvent::Stopped { .. } => true, _ => false, }) .into_future() .map_err(|_| ()) .map(|_| ()); Box::new(result) } pub fn set_sink_event_callback(&self, callback: Option<SinkEventCallback>) { self.command(PlayerCommand::SetSinkEventCallback(callback)); } pub fn emit_volume_set_event(&self, volume: u16) { self.command(PlayerCommand::EmitVolumeSetEvent(volume)); } } impl Drop for Player { fn drop(&mut self) { debug!("Shutting down player thread ..."); self.commands = None; if let Some(handle) = self.thread_handle.take() { match handle.join() { Ok(_) => (), Err(_) => error!("Player thread panicked!"), } } } } struct PlayerLoadedTrackData { decoder: Decoder, normalisation_factor: f32, stream_loader_controller: StreamLoaderController, bytes_per_second: usize, duration_ms: u32, stream_position_pcm: u64, } enum PlayerPreload { None, Loading { track_id: SpotifyId, loader: Box<dyn Future<Item = PlayerLoadedTrackData, Error = ()>>, }, Ready { track_id: SpotifyId, loaded_track: PlayerLoadedTrackData, }, } type Decoder = VorbisDecoder<Subfile<AudioDecrypt<AudioFile>>>; enum PlayerState { Stopped, Loading { track_id: SpotifyId, play_request_id: u64, start_playback: bool, loader: Box<dyn Future<Item = PlayerLoadedTrackData, Error = ()>>, }, Paused { track_id: SpotifyId, play_request_id: u64, decoder: Decoder, normalisation_factor: f32, stream_loader_controller: StreamLoaderController, bytes_per_second: usize, duration_ms: u32, stream_position_pcm: u64, suggested_to_preload_next_track: bool, }, Playing { track_id: SpotifyId, play_request_id: u64, decoder: Decoder, normalisation_factor: f32, stream_loader_controller: StreamLoaderController, bytes_per_second: usize, duration_ms: u32, stream_position_pcm: u64, reported_nominal_start_time: Option<Instant>, suggested_to_preload_next_track: bool, }, EndOfTrack { track_id: SpotifyId, play_request_id: u64, loaded_track: PlayerLoadedTrackData, }, Invalid, } impl PlayerState { fn is_playing(&self) -> bool { use self::PlayerState::*; match *self { Stopped | EndOfTrack { .. } | Paused { .. } | Loading { .. } => false, Playing { .. } => true, Invalid => panic!("invalid state"), } } #[allow(dead_code)] fn is_stopped(&self) -> bool { use self::PlayerState::*; match *self { Stopped => true, _ => false, } } fn is_loading(&self) -> bool { use self::PlayerState::*; match *self { Loading { .. } => true, _ => false, } } fn decoder(&mut self) -> Option<&mut Decoder> { use self::PlayerState::*; match *self { Stopped | EndOfTrack { .. } | Loading { .. } => None, Paused { ref mut decoder, .. } | Playing { ref mut decoder, .. } => Some(decoder), Invalid => panic!("invalid state"), } } fn stream_loader_controller(&mut self) -> Option<&mut StreamLoaderController> { use self::PlayerState::*; match *self { Stopped | EndOfTrack { .. } | Loading { .. } => None, Paused { ref mut stream_loader_controller, .. } | Playing { ref mut stream_loader_controller, .. } => Some(stream_loader_controller), Invalid => panic!("invalid state"), } } fn playing_to_end_of_track(&mut self) { use self::PlayerState::*; match mem::replace(self, Invalid) { Playing { track_id, play_request_id, decoder, duration_ms, bytes_per_second, normalisation_factor, stream_loader_controller, stream_position_pcm, .. } => { *self = EndOfTrack { track_id, play_request_id, loaded_track: PlayerLoadedTrackData { decoder, duration_ms, bytes_per_second, normalisation_factor, stream_loader_controller, stream_position_pcm, }, }; } _ => panic!("Called playing_to_end_of_track in non-playing state."), } } fn paused_to_playing(&mut self) { use self::PlayerState::*; match ::std::mem::replace(self, Invalid) { Paused { track_id, play_request_id, decoder, normalisation_factor, stream_loader_controller, duration_ms, bytes_per_second, stream_position_pcm, suggested_to_preload_next_track, } => { *self = Playing { track_id, play_request_id, decoder, normalisation_factor, stream_loader_controller, duration_ms, bytes_per_second, stream_position_pcm, reported_nominal_start_time: None, suggested_to_preload_next_track, }; } _ => panic!("invalid state"), } } fn playing_to_paused(&mut self) { use self::PlayerState::*; match ::std::mem::replace(self, Invalid) { Playing { track_id, play_request_id, decoder, normalisation_factor, stream_loader_controller, duration_ms, bytes_per_second, stream_position_pcm, reported_nominal_start_time: _, suggested_to_preload_next_track, } => { *self = Paused { track_id, play_request_id, decoder, normalisation_factor, stream_loader_controller, duration_ms, bytes_per_second, stream_position_pcm, suggested_to_preload_next_track, }; } _ => panic!("invalid state"), } } } struct PlayerTrackLoader { session: Session, config: PlayerConfig, } impl PlayerTrackLoader { fn find_available_alternative<'a>(&self, audio: &'a AudioItem) -> Option<Cow<'a, AudioItem>> { if audio.available { Some(Cow::Borrowed(audio)) } else { if let Some(alternatives) = &audio.alternatives { let alternatives = alternatives .iter() .map(|alt_id| AudioItem::get_audio_item(&self.session, *alt_id)); let alternatives = future::join_all(alternatives).wait().unwrap(); alternatives .into_iter() .find(|alt| alt.available) .map(Cow::Owned) } else { None } } } fn stream_data_rate(&self, format: FileFormat) -> usize { match format { FileFormat::OGG_VORBIS_96 => 12 * 1024, FileFormat::OGG_VORBIS_160 => 20 * 1024, FileFormat::OGG_VORBIS_320 => 40 * 1024, FileFormat::MP3_256 => 32 * 1024, FileFormat::MP3_320 => 40 * 1024, FileFormat::MP3_160 => 20 * 1024, FileFormat::MP3_96 => 12 * 1024, FileFormat::MP3_160_ENC => 20 * 1024, FileFormat::MP4_128_DUAL => 16 * 1024, FileFormat::OTHER3 => 40 * 1024, // better some high guess than nothing FileFormat::AAC_160 => 20 * 1024, FileFormat::AAC_320 => 40 * 1024, FileFormat::MP4_128 => 16 * 1024, FileFormat::OTHER5 => 40 * 1024, // better some high guess than nothing } } fn load_track(&self, spotify_id: SpotifyId, position_ms: u32) -> Option<PlayerLoadedTrackData> { let audio = match AudioItem::get_audio_item(&self.session, spotify_id).wait() { Ok(audio) => audio, Err(_) => { error!("Unable to load audio item."); return None; } }; info!("Loading <{}> with Spotify URI <{}>", audio.name, audio.uri); let audio = match self.find_available_alternative(&audio) { Some(audio) => audio, None => { warn!("<{}> is not available", audio.uri); return None; } }; assert!(audio.duration >= 0); let duration_ms = audio.duration as u32; // (Most) podcasts seem to support only 96 bit Vorbis, so fall back to it let formats = match self.config.bitrate { Bitrate::Bitrate96 => [ FileFormat::OGG_VORBIS_96, FileFormat::OGG_VORBIS_160, FileFormat::OGG_VORBIS_320, ], Bitrate::Bitrate160 => [ FileFormat::OGG_VORBIS_160, FileFormat::OGG_VORBIS_96, FileFormat::OGG_VORBIS_320, ], Bitrate::Bitrate320 => [ FileFormat::OGG_VORBIS_320, FileFormat::OGG_VORBIS_160, FileFormat::OGG_VORBIS_96, ], }; let format = formats .iter() .find(|format| audio.files.contains_key(format)) .unwrap(); let file_id = match audio.files.get(&format) { Some(&file_id) => file_id, None => { warn!("<{}> in not available in format {:?}", audio.name, format); return None; } }; let bytes_per_second = self.stream_data_rate(*format); let play_from_beginning = position_ms == 0; let key = self.session.audio_key().request(spotify_id, file_id); let encrypted_file = AudioFile::open( &self.session, file_id, bytes_per_second, play_from_beginning, ); let encrypted_file = match encrypted_file.wait() { Ok(encrypted_file) => encrypted_file, Err(_) => { error!("Unable to load encrypted file."); return None; } }; let mut stream_loader_controller = encrypted_file.get_stream_loader_controller(); if play_from_beginning { // No need to seek -> we stream from the beginning stream_loader_controller.set_stream_mode(); } else { // we need to seek -> we set stream mode after the initial seek. stream_loader_controller.set_random_access_mode(); } let key = match key.wait() { Ok(key) => key, Err(_) => { error!("Unable to load decryption key"); return None; } }; let mut decrypted_file = AudioDecrypt::new(key, encrypted_file); let normalisation_factor = match NormalisationData::parse_from_file(&mut decrypted_file) { Ok(normalisation_data) => { NormalisationData::get_factor(&self.config, normalisation_data) } Err(_) => { warn!("Unable to extract normalisation data, using default value."); 1.0 as f32 } }; let audio_file = Subfile::new(decrypted_file, 0xa7); let mut decoder = VorbisDecoder::new(audio_file).unwrap(); if position_ms != 0 { match decoder.seek(position_ms as i64) { Ok(_) => (), Err(err) => error!("Vorbis error: {:?}", err), } stream_loader_controller.set_stream_mode(); } let stream_position_pcm = PlayerInternal::position_ms_to_pcm(position_ms); winconsole::console::set_title(&audio.name).unwrap(); info!("<{}> ({} ms) loaded", audio.name, audio.duration); Some(PlayerLoadedTrackData { decoder, normalisation_factor, stream_loader_controller, bytes_per_second, duration_ms, stream_position_pcm, }) } } impl Future for PlayerInternal { type Item = (); type Error = (); fn poll(&mut self) -> Poll<(), ()> { // While this is written as a future, it still contains blocking code. // It must be run on its own thread. loop { let mut all_futures_completed_or_not_ready = true; // process commands that were sent to us let cmd = match self.commands.poll() { Ok(Async::Ready(None)) => return Ok(Async::Ready(())), // client has disconnected - shut down. Ok(Async::Ready(Some(cmd))) => { all_futures_completed_or_not_ready = false; Some(cmd) } Ok(Async::NotReady) => None, Err(_) => None, }; if let Some(cmd) = cmd { self.handle_command(cmd); } // Handle loading of a new track to play if let PlayerState::Loading { ref mut loader, track_id, start_playback, play_request_id, } = self.state { match loader.poll() { Ok(Async::Ready(loaded_track)) => { self.start_playback( track_id, play_request_id, loaded_track, start_playback, ); if let PlayerState::Loading { .. } = self.state { panic!("The state wasn't changed by start_playback()"); } } Ok(Async::NotReady) => (), Err(_) => { warn!("Unable to load <{:?}>\nSkipping to next track", track_id); assert!(self.state.is_loading()); self.send_event(PlayerEvent::EndOfTrack { track_id, play_request_id, }) } } } // handle pending preload requests. if let PlayerPreload::Loading { ref mut loader, track_id, } = self.preload { match loader.poll() { Ok(Async::Ready(loaded_track)) => { self.send_event(PlayerEvent::Preloading { track_id }); self.preload = PlayerPreload::Ready { track_id, loaded_track, }; } Ok(Async::NotReady) => (), Err(_) => { debug!("Unable to preload {:?}", track_id); self.preload = PlayerPreload::None; // Let Spirc know that the track was unavailable. if let PlayerState::Playing { play_request_id, .. } | PlayerState::Paused { play_request_id, .. } = self.state { self.send_event(PlayerEvent::Unavailable { track_id, play_request_id, }); } } } } if self.state.is_playing() { self.ensure_sink_running(); if let PlayerState::Playing { track_id, play_request_id, ref mut decoder, normalisation_factor, ref mut stream_position_pcm, ref mut reported_nominal_start_time, duration_ms, .. } = self.state { let packet = decoder.next_packet().expect("Vorbis error"); if let Some(ref packet) = packet { *stream_position_pcm = *stream_position_pcm + (packet.data().len() / 2) as u64; let stream_position_millis = Self::position_pcm_to_ms(*stream_position_pcm); let notify_about_position = match *reported_nominal_start_time { None => true, Some(reported_nominal_start_time) => { // only notify if we're behind. If we're ahead it's probably due to a buffer of the backend and we;re actually in time. let lag = (Instant::now() - reported_nominal_start_time).as_millis() as i64 - stream_position_millis as i64; if lag > 1000 { true } else { false } } }; if notify_about_position { *reported_nominal_start_time = Some( Instant::now() - Duration::from_millis(stream_position_millis as u64), ); self.send_event(PlayerEvent::Playing { track_id, play_request_id, position_ms: stream_position_millis as u32, duration_ms, }); } } self.handle_packet(packet, normalisation_factor); } else { unreachable!(); }; } if let PlayerState::Playing { track_id, play_request_id, duration_ms, stream_position_pcm, ref mut stream_loader_controller, ref mut suggested_to_preload_next_track, .. } | PlayerState::Paused { track_id, play_request_id, duration_ms, stream_position_pcm, ref mut stream_loader_controller, ref mut suggested_to_preload_next_track, .. } = self.state { if (!*suggested_to_preload_next_track) && ((duration_ms as i64 - Self::position_pcm_to_ms(stream_position_pcm) as i64) < PRELOAD_NEXT_TRACK_BEFORE_END_DURATION_MS as i64) && stream_loader_controller.range_to_end_available() { *suggested_to_preload_next_track = true; self.send_event(PlayerEvent::TimeToPreloadNextTrack { track_id, play_request_id, }); } } if self.session.is_invalid() { return Ok(Async::Ready(())); } if (!self.state.is_playing()) && all_futures_completed_or_not_ready { return Ok(Async::NotReady); } } } } impl PlayerInternal { fn position_pcm_to_ms(position_pcm: u64) -> u32 { (position_pcm * 10 / 441) as u32 } fn position_ms_to_pcm(position_ms: u32) -> u64 { position_ms as u64 * 441 / 10 } fn ensure_sink_running(&mut self) { if self.sink_status != SinkStatus::Running { trace!("== Starting sink =="); if let Some(callback) = &mut self.sink_event_callback { callback(SinkStatus::Running); } match self.sink.start() { Ok(()) => self.sink_status = SinkStatus::Running, Err(err) => error!("Could not start audio: {}", err), } } } fn ensure_sink_stopped(&mut self, temporarily: bool) { match self.sink_status { SinkStatus::Running => { trace!("== Stopping sink =="); self.sink.stop().unwrap(); self.sink_status = if temporarily { SinkStatus::TemporarilyClosed } else { SinkStatus::Closed }; if let Some(callback) = &mut self.sink_event_callback { callback(self.sink_status); } } SinkStatus::TemporarilyClosed => { if !temporarily { self.sink_status = SinkStatus::Closed; if let Some(callback) = &mut self.sink_event_callback { callback(SinkStatus::Closed); } } } SinkStatus::Closed => (), } } fn handle_player_stop(&mut self) { match self.state { PlayerState::Playing { track_id, play_request_id, .. } | PlayerState::Paused { track_id, play_request_id, .. } | PlayerState::EndOfTrack { track_id, play_request_id, .. } | PlayerState::Loading { track_id, play_request_id, .. } => { self.ensure_sink_stopped(false); self.send_event(PlayerEvent::Stopped { track_id, play_request_id, }); self.state = PlayerState::Stopped; } PlayerState::Stopped => (), PlayerState::Invalid => panic!("invalid state"), } } fn handle_play(&mut self) { if let PlayerState::Paused { track_id, play_request_id, stream_position_pcm, duration_ms, .. } = self.state { self.state.paused_to_playing(); let position_ms = Self::position_pcm_to_ms(stream_position_pcm); self.send_event(PlayerEvent::Playing { track_id, play_request_id, position_ms, duration_ms, }); self.ensure_sink_running(); } else { warn!("Player::play called from invalid state"); } } fn handle_pause(&mut self) { if let PlayerState::Playing { track_id, play_request_id, stream_position_pcm, duration_ms, .. } = self.state { self.state.playing_to_paused(); self.ensure_sink_stopped(false); let position_ms = Self::position_pcm_to_ms(stream_position_pcm); self.send_event(PlayerEvent::Paused { track_id, play_request_id, position_ms, duration_ms, }); } else { warn!("Player::pause called from invalid state"); } } fn handle_packet(&mut self, packet: Option<VorbisPacket>, normalisation_factor: f32) { match packet { Some(mut packet) => { if packet.data().len() > 0 { if let Some(ref editor) = self.audio_filter { editor.modify_stream(&mut packet.data_mut()) }; if self.config.normalisation && normalisation_factor != 1.0 { for x in packet.data_mut().iter_mut() { *x = (*x as f32 * normalisation_factor) as i16; } } if let Err(err) = self.sink.write(&packet.data()) { error!("Could not write audio: {}", err); self.ensure_sink_stopped(false); } } } None => { self.state.playing_to_end_of_track(); if let PlayerState::EndOfTrack { track_id, play_request_id, .. } = self.state { self.send_event(PlayerEvent::EndOfTrack { track_id, play_request_id, }) } else { unreachable!(); } } } } fn start_playback( &mut self, track_id: SpotifyId, play_request_id: u64, loaded_track: PlayerLoadedTrackData, start_playback: bool, ) { let position_ms = Self::position_pcm_to_ms(loaded_track.stream_position_pcm); if start_playback { self.ensure_sink_running(); self.send_event(PlayerEvent::Playing { track_id, play_request_id, position_ms, duration_ms: loaded_track.duration_ms, }); self.state = PlayerState::Playing { track_id: track_id, play_request_id: play_request_id, decoder: loaded_track.decoder, normalisation_factor: loaded_track.normalisation_factor, stream_loader_controller: loaded_track.stream_loader_controller, duration_ms: loaded_track.duration_ms, bytes_per_second: loaded_track.bytes_per_second, stream_position_pcm: loaded_track.stream_position_pcm, reported_nominal_start_time: Some( Instant::now() - Duration::from_millis(position_ms as u64), ), suggested_to_preload_next_track: false, }; } else { self.ensure_sink_stopped(false); self.state = PlayerState::Paused { track_id: track_id, play_request_id: play_request_id, decoder: loaded_track.decoder, normalisation_factor: loaded_track.normalisation_factor, stream_loader_controller: loaded_track.stream_loader_controller, duration_ms: loaded_track.duration_ms, bytes_per_second: loaded_track.bytes_per_second, stream_position_pcm: loaded_track.stream_position_pcm, suggested_to_preload_next_track: false, }; self.send_event(PlayerEvent::Paused { track_id, play_request_id, position_ms, duration_ms: loaded_track.duration_ms, }); } } fn handle_command_load( &mut self, track_id: SpotifyId, play_request_id: u64, play: bool, position_ms: u32, ) { if !self.config.gapless { self.ensure_sink_stopped(play); } // emit the correct player event match self.state { PlayerState::Playing { track_id: old_track_id, .. } | PlayerState::Paused { track_id: old_track_id, .. } | PlayerState::EndOfTrack { track_id: old_track_id, .. } | PlayerState::Loading { track_id: old_track_id, .. } => self.send_event(PlayerEvent::Changed { old_track_id: old_track_id, new_track_id: track_id, }), PlayerState::Stopped => self.send_event(PlayerEvent::Started { track_id, play_request_id, position_ms, }), PlayerState::Invalid { .. } => panic!("Player is in an invalid state."), } // Now we check at different positions whether we already have a pre-loaded version // of this track somewhere. If so, use it and return. // Check if there's a matching loaded track in the EndOfTrack player state. // This is the case if we're repeating the same track again. if let PlayerState::EndOfTrack { track_id: previous_track_id, .. } = self.state { if previous_track_id == track_id { let mut loaded_track = match mem::replace(&mut self.state, PlayerState::Invalid) { PlayerState::EndOfTrack { loaded_track, .. } => loaded_track, _ => unreachable!(), }; if Self::position_ms_to_pcm(position_ms) != loaded_track.stream_position_pcm { loaded_track .stream_loader_controller .set_random_access_mode(); let _ = loaded_track.decoder.seek(position_ms as i64); // This may be blocking. // But most likely the track is fully // loaded already because we played // to the end of it. loaded_track.stream_loader_controller.set_stream_mode(); loaded_track.stream_position_pcm = Self::position_ms_to_pcm(position_ms); } self.preload = PlayerPreload::None; self.start_playback(track_id, play_request_id, loaded_track, play); if let PlayerState::Invalid = self.state { panic!("start_playback() hasn't set a valid player state."); } return; } } // Check if we are already playing the track. If so, just do a seek and update our info. if let PlayerState::Playing { track_id: current_track_id, ref mut stream_position_pcm, ref mut decoder, ref mut stream_loader_controller, .. } | PlayerState::Paused { track_id: current_track_id, ref mut stream_position_pcm, ref mut decoder, ref mut stream_loader_controller, .. } = self.state { if current_track_id == track_id { // we can use the current decoder. Ensure it's at the correct position. if Self::position_ms_to_pcm(position_ms) != *stream_position_pcm { stream_loader_controller.set_random_access_mode(); let _ = decoder.seek(position_ms as i64); // This may be blocking. stream_loader_controller.set_stream_mode(); *stream_position_pcm = Self::position_ms_to_pcm(position_ms); } // Move the info from the current state into a PlayerLoadedTrackData so we can use // the usual code path to start playback. let old_state = mem::replace(&mut self.state, PlayerState::Invalid); if let PlayerState::Playing { stream_position_pcm, decoder, stream_loader_controller, bytes_per_second, duration_ms, normalisation_factor, .. } | PlayerState::Paused { stream_position_pcm, decoder, stream_loader_controller, bytes_per_second, duration_ms, normalisation_factor, .. } = old_state { let loaded_track = PlayerLoadedTrackData { decoder, normalisation_factor, stream_loader_controller, bytes_per_second, duration_ms, stream_position_pcm, }; self.preload = PlayerPreload::None; self.start_playback(track_id, play_request_id, loaded_track, play); if let PlayerState::Invalid = self.state { panic!("start_playback() hasn't set a valid player state."); } return; } else { unreachable!(); } } } // Check if the requested track has been preloaded already. If so use the preloaded data. if let PlayerPreload::Ready { track_id: loaded_track_id, .. } = self.preload { if track_id == loaded_track_id { let preload = std::mem::replace(&mut self.preload, PlayerPreload::None); if let PlayerPreload::Ready { track_id, mut loaded_track, } = preload { if Self::position_ms_to_pcm(position_ms) != loaded_track.stream_position_pcm { loaded_track .stream_loader_controller .set_random_access_mode(); let _ = loaded_track.decoder.seek(position_ms as i64); // This may be blocking loaded_track.stream_loader_controller.set_stream_mode(); } self.start_playback(track_id, play_request_id, loaded_track, play); return; } else { unreachable!(); } } } // We need to load the track - either from scratch or by completing a preload. // In any case we go into a Loading state to load the track. self.ensure_sink_stopped(play); self.send_event(PlayerEvent::Loading { track_id, play_request_id, position_ms, }); // Try to extract a pending loader from the preloading mechanism let loader = if let PlayerPreload::Loading { track_id: loaded_track_id, .. } = self.preload { if (track_id == loaded_track_id) && (position_ms == 0) { let mut preload = PlayerPreload::None; std::mem::swap(&mut preload, &mut self.preload); if let PlayerPreload::Loading { loader, .. } = preload { Some(loader) } else { None } } else { None } } else { None }; self.preload = PlayerPreload::None; // If we don't have a loader yet, create one from scratch. let loader = loader .or_else(|| Some(self.load_track(track_id, position_ms))) .unwrap(); // Set ourselves to a loading state. self.state = PlayerState::Loading { track_id, play_request_id, start_playback: play, loader, }; } fn handle_command_preload(&mut self, track_id: SpotifyId) { debug!("Preloading track"); let mut preload_track = true; // check whether the track is already loaded somewhere or being loaded. if let PlayerPreload::Loading { track_id: currently_loading, .. } | PlayerPreload::Ready { track_id: currently_loading, .. } = self.preload { if currently_loading == track_id { // we're already preloading the requested track. preload_track = false; } else { // we're preloading something else - cancel it. self.preload = PlayerPreload::None; } } if let PlayerState::Playing { track_id: current_track_id, .. } | PlayerState::Paused { track_id: current_track_id, .. } | PlayerState::EndOfTrack { track_id: current_track_id, .. } = self.state { if current_track_id == track_id { // we already have the requested track loaded. preload_track = false; } } // schedule the preload of the current track if desired. if preload_track { let loader = self.load_track(track_id, 0); self.preload = PlayerPreload::Loading { track_id, loader } } } fn handle_command_seek(&mut self, position_ms: u32) { if let Some(stream_loader_controller) = self.state.stream_loader_controller() { stream_loader_controller.set_random_access_mode(); } if let Some(decoder) = self.state.decoder() { match decoder.seek(position_ms as i64) { Ok(_) => { if let PlayerState::Playing { ref mut stream_position_pcm, .. } | PlayerState::Paused { ref mut stream_position_pcm, .. } = self.state { *stream_position_pcm = Self::position_ms_to_pcm(position_ms); } } Err(err) => error!("Vorbis error: {:?}", err), } } else { warn!("Player::seek called from invalid state"); } // If we're playing, ensure, that we have enough data leaded to avoid a buffer underrun. if let Some(stream_loader_controller) = self.state.stream_loader_controller() { stream_loader_controller.set_stream_mode(); } // ensure we have a bit of a buffer of downloaded data self.preload_data_before_playback(); if let PlayerState::Playing { track_id, play_request_id, ref mut reported_nominal_start_time, duration_ms, .. } = self.state { *reported_nominal_start_time = Some(Instant::now() - Duration::from_millis(position_ms as u64)); self.send_event(PlayerEvent::Playing { track_id, play_request_id, position_ms, duration_ms, }); } if let PlayerState::Paused { track_id, play_request_id, duration_ms, .. } = self.state { self.send_event(PlayerEvent::Paused { track_id, play_request_id, position_ms, duration_ms, }); } } fn handle_command(&mut self, cmd: PlayerCommand) { debug!("command={:?}", cmd); match cmd { PlayerCommand::Load { track_id, play_request_id, play, position_ms, } => self.handle_command_load(track_id, play_request_id, play, position_ms), PlayerCommand::Preload { track_id } => self.handle_command_preload(track_id), PlayerCommand::Seek(position_ms) => self.handle_command_seek(position_ms), PlayerCommand::Play => self.handle_play(), PlayerCommand::Pause => self.handle_pause(), PlayerCommand::Stop => self.handle_player_stop(), PlayerCommand::AddEventSender(sender) => self.event_senders.push(sender), PlayerCommand::SetSinkEventCallback(callback) => self.sink_event_callback = callback, PlayerCommand::EmitVolumeSetEvent(volume) => { self.send_event(PlayerEvent::VolumeSet { volume }) } } } fn send_event(&mut self, event: PlayerEvent) { let mut index = 0; while index < self.event_senders.len() { match self.event_senders[index].unbounded_send(event.clone()) { Ok(_) => index += 1, Err(_) => { self.event_senders.remove(index); } } } } fn load_track( &self, spotify_id: SpotifyId, position_ms: u32, ) -> Box<dyn Future<Item = PlayerLoadedTrackData, Error = ()>> { // This method creates a future that returns the loaded stream and associated info. // Ideally all work should be done using asynchronous code. However, seek() on the // audio stream is implemented in a blocking fashion. Thus, we can't turn it into future // easily. Instead we spawn a thread to do the work and return a one-shot channel as the // future to work with. let loader = PlayerTrackLoader { session: self.session.clone(), config: self.config.clone(), }; let (result_tx, result_rx) = futures::sync::oneshot::channel(); std::thread::spawn(move || { loader .load_track(spotify_id, position_ms) .and_then(move |data| { let _ = result_tx.send(data); Some(()) }); }); Box::new(result_rx.map_err(|_| ())) } fn preload_data_before_playback(&mut self) { if let PlayerState::Playing { bytes_per_second, ref mut stream_loader_controller, .. } = self.state { // Request our read ahead range let request_data_length = max( (READ_AHEAD_DURING_PLAYBACK_ROUNDTRIPS * (0.001 * stream_loader_controller.ping_time_ms() as f64) * bytes_per_second as f64) as usize, (READ_AHEAD_DURING_PLAYBACK_SECONDS * bytes_per_second as f64) as usize, ); stream_loader_controller.fetch_next(request_data_length); // Request the part we want to wait for blocking. This effecively means we wait for the previous request to partially complete. let wait_for_data_length = max( (READ_AHEAD_BEFORE_PLAYBACK_ROUNDTRIPS * (0.001 * stream_loader_controller.ping_time_ms() as f64) * bytes_per_second as f64) as usize, (READ_AHEAD_BEFORE_PLAYBACK_SECONDS * bytes_per_second as f64) as usize, ); stream_loader_controller.fetch_next_blocking(wait_for_data_length); } } } impl Drop for PlayerInternal { fn drop(&mut self) { debug!("drop PlayerInternal[{}]", self.session.session_id()); } } impl ::std::fmt::Debug for PlayerCommand { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { match *self { PlayerCommand::Load { track_id, play, position_ms, .. } => f .debug_tuple("Load") .field(&track_id) .field(&play) .field(&position_ms) .finish(), PlayerCommand::Preload { track_id } => { f.debug_tuple("Preload").field(&track_id).finish() } PlayerCommand::Play => f.debug_tuple("Play").finish(), PlayerCommand::Pause => f.debug_tuple("Pause").finish(), PlayerCommand::Stop => f.debug_tuple("Stop").finish(), PlayerCommand::Seek(position) => f.debug_tuple("Seek").field(&position).finish(), PlayerCommand::AddEventSender(_) => f.debug_tuple("AddEventSender").finish(), PlayerCommand::SetSinkEventCallback(_) => { f.debug_tuple("SetSinkEventCallback").finish() } PlayerCommand::EmitVolumeSetEvent(volume) => { f.debug_tuple("VolumeSet").field(&volume).finish() } } } } impl ::std::fmt::Debug for PlayerState { fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { use PlayerState::*; match *self { Stopped => f.debug_struct("Stopped").finish(), Loading { track_id, play_request_id, .. } => f .debug_struct("Loading") .field("track_id", &track_id) .field("play_request_id", &play_request_id) .finish(), Paused { track_id, play_request_id, .. } => f .debug_struct("Paused") .field("track_id", &track_id) .field("play_request_id", &play_request_id) .finish(), Playing { track_id, play_request_id, .. } => f .debug_struct("Playing") .field("track_id", &track_id) .field("play_request_id", &play_request_id) .finish(), EndOfTrack { track_id, play_request_id, .. } => f .debug_struct("EndOfTrack") .field("track_id", &track_id) .field("play_request_id", &play_request_id) .finish(), Invalid => f.debug_struct("Invalid").finish(), } } } struct Subfile<T: Read + Seek> { stream: T, offset: u64, } impl<T: Read + Seek> Subfile<T> { pub fn new(mut stream: T, offset: u64) -> Subfile<T> { stream.seek(SeekFrom::Start(offset)).unwrap(); Subfile { stream: stream, offset: offset, } } } impl<T: Read + Seek> Read for Subfile<T> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { self.stream.read(buf) } } impl<T: Read + Seek> Seek for Subfile<T> { fn seek(&mut self, mut pos: SeekFrom) -> Result<u64> { pos = match pos { SeekFrom::Start(offset) => SeekFrom::Start(offset + self.offset), x => x, }; let newpos = self.stream.seek(pos)?; if newpos > self.offset { Ok(newpos - self.offset) } else { Ok(0) } } }
33.662974
151
0.504036
abbab5c23dbb760cc07f77d0fe7c91655adc3eec
385
// Test that specializing on a `rustc_specialization_trait` trait is allowed. // check-pass #![feature(min_specialization)] #![feature(rustc_attrs)] #[rustc_specialization_trait] trait SpecTrait { fn g(&self); } trait X { fn f(&self); } impl<T> X for T { default fn f(&self) {} } impl<T: SpecTrait> X for T { fn f(&self) { self.g(); } } fn main() {}
13.75
77
0.612987
9cc204a767134b1320bd09a353a875b3eb8768d1
417
use crate::*; use super::Wrap; /// Format agnostic PE view. pub type PeView<'a> = Wrap<pe32::PeView<'a>, pe64::PeView<'a>>; impl<'a> PeView<'a> { pub fn from_bytes<T: AsRef<[u8]> + ?Sized>(image: &'a T) -> Result<PeView<'a>> { match pe64::PeView::from_bytes(image) { Ok(file) => Ok(Wrap::T64(file)), Err(Error::PeMagic) => Ok(Wrap::T32(pe32::PeView::from_bytes(image)?)), Err(err) => Err(err), } } }
26.0625
81
0.59952
5df442faa5066985c5d385d6e05ab8668467ed3c
44,484
use std::{ default::Default, fmt::{self, Display, Write}, ops::Add, }; use crate::model::{ guild::Emoji, id::{ChannelId, RoleId, UserId}, misc::Mentionable, }; /// The Message Builder is an ergonomic utility to easily build a message, /// by adding text and mentioning mentionable structs. /// /// The finalized value can be accessed via [`Self::build`] or the inner value. /// /// # Examples /// /// Build a message, mentioning a [`Self::user`] and an [`Self::emoji`], and retrieving the /// value: /// /// ```rust,no_run /// # use serde_json::json; /// # use serenity::model::prelude::*; /// # /// # async fn run() { /// # let user = UserId(1); /// # let emoji = serde_json::from_value::<Emoji>(json!({ /// # "animated": false, /// # "id": EmojiId(2), /// # "name": "test", /// # "managed": false, /// # "require_colons": true, /// # "roles": Vec::<Role>::new(), /// # })).unwrap(); /// # /// use serenity::utils::MessageBuilder; /// /// // assuming an `emoji` and `user` have already been bound /// /// let content = MessageBuilder::new() /// .push("You sent a message, ") /// .mention(&user) /// .push("! ") /// .emoji(&emoji) /// .build(); /// # } /// ``` #[derive(Clone, Debug, Default)] pub struct MessageBuilder(pub String); impl MessageBuilder { /// Creates a new, empty builder. /// /// # Examples /// /// Create a new [`MessageBuilder`]: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let message = MessageBuilder::new(); /// /// // alternatively: /// let message = MessageBuilder::default(); /// ``` pub fn new() -> MessageBuilder { MessageBuilder::default() } /// Pulls the inner value out of the builder. /// /// # Examples /// /// Create a string mentioning a channel by Id, and then suffixing `"!"`, /// and finally building it to retrieve the inner String: /// /// ```rust /// use serenity::model::id::ChannelId; /// use serenity::utils::MessageBuilder; /// /// let channel_id = ChannelId(81384788765712384); /// /// let content = MessageBuilder::new().channel(channel_id).push("!").build(); /// /// assert_eq!(content, "<#81384788765712384>!"); /// ``` /// /// This is equivalent to simply retrieving the tuple struct's first value: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let mut content = MessageBuilder::new(); /// content.push("test"); /// /// assert_eq!(content.build(), "test"); /// ``` pub fn build(&mut self) -> String { self.clone().0 } /// Mentions the [`GuildChannel`] in the built message. /// /// This accepts anything that converts _into_ a [`ChannelId`]. Refer to /// [`ChannelId`]'s documentation for more information. /// /// Refer to [`ChannelId`]'s [Display implementation] for more information on /// how this is formatted. /// /// # Examples /// /// Mentioning a [`Channel`] by Id: /// /// ```rust /// use serenity::model::id::ChannelId; /// use serenity::utils::MessageBuilder; /// /// let channel_id = ChannelId(81384788765712384); /// /// let content = MessageBuilder::new().push("The channel is: ").channel(channel_id).build(); /// /// assert_eq!(content, "The channel is: <#81384788765712384>"); /// ``` /// /// [`Channel`]: crate::model::channel::Channel /// [`GuildChannel`]: crate::model::channel::GuildChannel /// [Display implementation]: ChannelId#impl-Display #[inline] pub fn channel<C: Into<ChannelId>>(&mut self, channel: C) -> &mut Self { self._channel(channel.into()) } fn _channel(&mut self, channel: ChannelId) -> &mut Self { #[allow(clippy::let_underscore_must_use)] let _ = write!(self.0, "{}", channel.mention()); // should not error, ignoring self } /// Displays the given emoji in the built message. /// /// Refer to [`Emoji`]s [Display implementation] for more information on how /// this is formatted. /// /// # Examples /// /// Mention an emoji in a message's content: /// /// ```rust /// # use serde_json::json; /// # use serenity::model::guild::Role; /// # /// # { /// # /// use serenity::model::guild::Emoji; /// use serenity::model::id::EmojiId; /// use serenity::utils::MessageBuilder; /// /// # let emoji = serde_json::from_value::<Emoji>(json!({ /// # "animated": false, /// # "id": EmojiId(302516740095606785), /// # "managed": true, /// # "name": "smugAnimeFace".to_string(), /// # "require_colons": true, /// # "roles": Vec::<Role>::new(), /// # })).unwrap(); /// /// let message = MessageBuilder::new().push("foo ").emoji(&emoji).push(".").build(); /// /// assert_eq!(message, "foo <:smugAnimeFace:302516740095606785>."); /// # } /// ``` /// /// [Display implementation]: crate::model::guild::Emoji#impl-Display pub fn emoji(&mut self, emoji: &Emoji) -> &mut Self { #[allow(clippy::let_underscore_must_use)] let _ = write!(self.0, "{}", emoji); // should not error, ignoring self } /// Mentions something that implements the [`Mentionable`] trait. pub fn mention<M: Mentionable>(&mut self, item: &M) -> &mut Self { #[allow(clippy::let_underscore_must_use)] let _ = write!(self.0, "{}", item.mention()); // should not error, ignoring self } /// Pushes a string to the internal message content. /// /// Note that this does not mutate either the given data or the internal /// message content in anyway prior to appending the given content to the /// internal message. /// /// # Examples /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let mut message = MessageBuilder::new(); /// message.push("test"); /// /// assert_eq!( /// { /// message.push("ing"); /// message.build() /// }, /// "testing" /// ); /// ``` #[inline] pub fn push<D: I>(&mut self, content: D) -> &mut Self { self._push(&content.into().to_string()) } fn _push(&mut self, content: &str) -> &mut Self { self.0.push_str(content); self } /// Pushes a codeblock to the content, with optional syntax highlighting. /// /// # Examples /// /// Pushing a Rust codeblock: /// /// ```rust,ignore /// use serenity::utils::MessageBuilder; /// /// let code = r#" /// fn main() { /// println!("Hello, world!"); /// } /// "#; /// /// let content = MessageBuilder::new() /// .push_codeblock(code, Some("rust")) /// .build(); /// /// let expected = r#"```rust /// fn main() { /// println!("Hello, world!"); /// } /// ```"#; /// /// assert_eq!(content, expected); /// ``` /// /// Pushing a codeblock without a language: /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new() /// .push_codeblock("hello", None) /// .build(); /// /// assert_eq!(content, "```\nhello\n```"); /// ``` pub fn push_codeblock<D: I>(&mut self, content: D, language: Option<&str>) -> &mut Self { self.0.push_str("```"); if let Some(language) = language { self.0.push_str(language); } self.0.push('\n'); self.0.push_str(&content.into().to_string()); self.0.push_str("\n```"); self } /// Pushes inlined monospaced text to the content. /// /// # Examples /// /// Display a server configuration value to the user: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let key = "prefix"; /// let value = "&"; /// /// let content = MessageBuilder::new() /// .push("The setting ") /// .push_mono(key) /// .push(" for this server is ") /// .push_mono(value) /// .push(".") /// .build(); /// /// let expected = format!("The setting `{}` for this server is `{}`.", key, value); /// /// assert_eq!(content, expected); /// ``` pub fn push_mono<D: I>(&mut self, content: D) -> &mut Self { self.0.push('`'); self.0.push_str(&content.into().to_string()); self.0.push('`'); self } /// Pushes inlined italicized text to the content. /// /// # Examples /// /// Emphasize information to the user: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new() /// .push("You don't ") /// .push_italic("always need") /// .push(" to italicize ") /// .push_italic("everything") /// .push(".") /// .build(); /// /// let expected = "You don't _always need_ to italicize _everything_."; /// /// assert_eq!(content, expected); /// ``` pub fn push_italic<D: I>(&mut self, content: D) -> &mut Self { self.0.push('_'); self.0.push_str(&content.into().to_string()); self.0.push('_'); self } /// Pushes an inline bold text to the content. pub fn push_bold<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("**"); self.0.push_str(&content.into().to_string()); self.0.push_str("**"); self } /// Pushes an underlined inline text to the content. pub fn push_underline<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("__"); self.0.push_str(&content.into().to_string()); self.0.push_str("__"); self } /// Pushes a strikethrough inline text to the content. pub fn push_strike<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("~~"); self.0.push_str(&content.into().to_string()); self.0.push_str("~~"); self } /// Pushes a spoiler'd inline text to the content. pub fn push_spoiler<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("||"); self.0.push_str(&content.into().to_string()); self.0.push_str("||"); self } /// Pushes a quoted inline text to the content pub fn push_quote<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("> "); self.0.push_str(&content.into().to_string()); self } /// Pushes the given text with a newline appended to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_line("hello").push("world").build(); /// /// assert_eq!(content, "hello\nworld"); /// ``` pub fn push_line<D: I>(&mut self, content: D) -> &mut Self { self.push(content); self.0.push('\n'); self } /// Pushes inlined monospace text with an added newline to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_mono_line("hello").push("world").build(); /// /// assert_eq!(content, "`hello`\nworld"); /// ``` pub fn push_mono_line<D: I>(&mut self, content: D) -> &mut Self { self.push_mono(content); self.0.push('\n'); self } /// Pushes an inlined italicized text with an added newline to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_italic_line("hello").push("world").build(); /// /// assert_eq!(content, "_hello_\nworld"); /// ``` pub fn push_italic_line<D: I>(&mut self, content: D) -> &mut Self { self.push_italic(content); self.0.push('\n'); self } /// Pushes an inline bold text with an added newline to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_bold_line("hello").push("world").build(); /// /// assert_eq!(content, "**hello**\nworld"); /// ``` pub fn push_bold_line<D: I>(&mut self, content: D) -> &mut Self { self.push_bold(content); self.0.push('\n'); self } /// Pushes an underlined inline text with an added newline to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_underline_line("hello").push("world").build(); /// /// assert_eq!(content, "__hello__\nworld"); /// ``` pub fn push_underline_line<D: I>(&mut self, content: D) -> &mut Self { self.push_underline(content); self.0.push('\n'); self } /// Pushes a strikethrough inline text with a newline added to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_strike_line("hello").push("world").build(); /// /// assert_eq!(content, "~~hello~~\nworld"); /// ``` pub fn push_strike_line<D: I>(&mut self, content: D) -> &mut Self { self.push_strike(content); self.0.push('\n'); self } /// Pushes a spoiler'd inline text with a newline added to the content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_spoiler_line("hello").push("world").build(); /// /// assert_eq!(content, "||hello||\nworld"); /// ``` pub fn push_spoiler_line<D: I>(&mut self, content: D) -> &mut Self { self.push_spoiler(content); self.0.push('\n'); self } /// Pushes a quoted inline text to the content /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new().push_quote_line("hello").push("world").build(); /// /// assert_eq!(content, "> hello\nworld"); /// ``` pub fn push_quote_line<D: I>(&mut self, content: D) -> &mut Self { self.push_quote(content); self.0.push('\n'); self } /// Pushes text to your message, but normalizing content - that means /// ensuring that there's no unwanted formatting, mention spam etc. pub fn push_safe<C: I>(&mut self, content: C) -> &mut Self { { let mut c = content.into(); c.inner = normalize(&c.inner).replace('*', "\\*").replace('`', "\\`").replace('_', "\\_"); self.0.push_str(&c.to_string()); } self } /// Pushes a code-block to your message normalizing content. pub fn push_codeblock_safe<D: I>(&mut self, content: D, language: Option<&str>) -> &mut Self { self.0.push_str("```"); if let Some(language) = language { self.0.push_str(language); } self.0.push('\n'); { let mut c = content.into(); c.inner = normalize(&c.inner).replace("```", " "); self.0.push_str(&c.to_string()); } self.0.push_str("\n```"); self } /// Pushes an inline monospaced text to the content normalizing content. pub fn push_mono_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push('`'); { let mut c = content.into(); c.inner = normalize(&c.inner).replace('`', "'"); self.0.push_str(&c.to_string()); } self.0.push('`'); self } /// Pushes an inline italicized text to the content normalizing content. pub fn push_italic_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push('_'); { let mut c = content.into(); c.inner = normalize(&c.inner).replace('_', " "); self.0.push_str(&c.to_string()); } self.0.push('_'); self } /// Pushes an inline bold text to the content normalizing content. pub fn push_bold_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("**"); { let mut c = content.into(); c.inner = normalize(&c.inner).replace("**", " "); self.0.push_str(&c.to_string()); } self.0.push_str("**"); self } /// Pushes an underlined inline text to the content normalizing content. pub fn push_underline_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("__"); { let mut c = content.into(); c.inner = normalize(&c.inner).replace("__", " "); self.0.push_str(&c.to_string()); } self.0.push_str("__"); self } /// Pushes a strikethrough inline text to the content normalizing content. pub fn push_strike_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("~~"); { let mut c = content.into(); c.inner = normalize(&c.inner).replace("~~", " "); self.0.push_str(&c.to_string()); } self.0.push_str("~~"); self } /// Pushes a spoiler'd inline text to the content normalizing content. pub fn push_spoiler_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("||"); { let mut c = content.into(); c.inner = normalize(&c.inner).replace("||", " "); self.0.push_str(&c.to_string()); } self.0.push_str("||"); self } /// Pushes a quoted inline text to the content normalizing content. pub fn push_quote_safe<D: I>(&mut self, content: D) -> &mut Self { self.0.push_str("> "); { let mut c = content.into(); c.inner = normalize(&c.inner).replace("> ", " "); self.0.push_str(&c.to_string()); } self } /// Pushes text with a newline appended to the content normalizing content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_line_safe("Hello @everyone").push("How are you?").build(); /// /// assert_eq!(content, "Hello @\u{200B}everyone\nHow are you?"); /// ``` pub fn push_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_safe(content); self.0.push('\n'); self } /// Pushes an inline monospaced text with added newline to the content normalizing content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_mono_line_safe("`hello @everyone`").push("world").build(); /// /// assert_eq!(content, "`'hello @\u{200B}everyone'`\nworld"); /// ``` pub fn push_mono_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_mono_safe(content); self.0.push('\n'); self } /// Pushes an inline italicized text with added newline to the content normalizing content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_italic_line_safe("@everyone").push("Isn't a mention.").build(); /// /// assert_eq!(content, "_@\u{200B}everyone_\nIsn't a mention."); /// ``` pub fn push_italic_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_italic_safe(content); self.0.push('\n'); self } /// Pushes an inline bold text with added newline to the content normalizing content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_bold_line_safe("@everyone").push("Isn't a mention.").build(); /// /// assert_eq!(content, "**@\u{200B}everyone**\nIsn't a mention."); /// ``` pub fn push_bold_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_bold_safe(content); self.0.push('\n'); self } /// Pushes an underlined inline text with added newline to the content normalizing content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = MessageBuilder::new() /// .push_underline_line_safe("@everyone") /// .push("Isn't a mention.") /// .build(); /// /// assert_eq!(content, "__@\u{200B}everyone__\nIsn't a mention."); /// ``` pub fn push_underline_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_underline_safe(content); self.0.push('\n'); self } /// Pushes a strikethrough inline text with added newline to the content normalizing /// content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_strike_line_safe("@everyone").push("Isn't a mention.").build(); /// /// assert_eq!(content, "~~@\u{200B}everyone~~\nIsn't a mention."); /// ``` pub fn push_strike_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_strike_safe(content); self.0.push('\n'); self } /// Pushes a spoiler'd inline text with added newline to the content normalizing /// content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_spoiler_line_safe("@everyone").push("Isn't a mention.").build(); /// /// assert_eq!(content, "||@\u{200B}everyone||\nIsn't a mention."); /// ``` pub fn push_spoiler_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_spoiler_safe(content); self.0.push('\n'); self } /// Pushes a quoted inline text with added newline to the content normalizing /// content. /// /// # Examples /// /// Push content and then append a newline: /// /// ```rust /// use serenity::utils::MessageBuilder; /// /// let content = /// MessageBuilder::new().push_quote_line_safe("@everyone").push("Isn't a mention.").build(); /// /// assert_eq!(content, "> @\u{200B}everyone\nIsn't a mention."); /// ``` pub fn push_quote_line_safe<D: I>(&mut self, content: D) -> &mut Self { self.push_quote_safe(content); self.0.push('\n'); self } /// Starts a multi-line quote, every push after this one will be quoted pub fn quote_rest(&mut self) -> &mut Self { self.0.push_str("\n>>> "); self } /// Mentions the [`Role`] in the built message. /// /// This accepts anything that converts _into_ a [`RoleId`]. Refer to /// [`RoleId`]'s documentation for more information. /// /// Refer to [`RoleId`]'s [Display implementation] for more information on how /// this is formatted. /// /// [`Role`]: crate::model::guild::Role /// [Display implementation]: RoleId#impl-Display pub fn role<R: Into<RoleId>>(&mut self, role: R) -> &mut Self { #[allow(clippy::let_underscore_must_use)] let _ = write!(self.0, "{}", role.into().mention()); // should not error, ignoring self } /// Mentions the [`User`] in the built message. /// /// This accepts anything that converts _into_ a [`UserId`]. Refer to /// [`UserId`]'s documentation for more information. /// /// Refer to [`UserId`]'s [Display implementation] for more information on how /// this is formatted. /// /// [`User`]: crate::model::user::User /// [Display implementation]: UserId#impl-Display pub fn user<U: Into<UserId>>(&mut self, user: U) -> &mut Self { #[allow(clippy::let_underscore_must_use)] let _ = write!(self.0, "{}", user.into().mention()); // should not error, ignoring self } } impl Display for MessageBuilder { /// Formats the message builder into a string. /// /// This is done by simply taking the internal value of the tuple-struct and /// writing it into the formatter. /// /// # Examples /// /// Create a message builder, and format it into a string via the `format!` /// macro: /// /// ```rust /// use serenity::utils::MessageBuilder; fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.0, f) } } /// A trait with additional functionality over the [`MessageBuilder`] for /// creating content with additional functionality available only in embeds. /// /// Namely, this allows you to create named links via the non-escaping /// [`Self::push_named_link`] method and the escaping [`Self::push_named_link_safe`] method. /// /// # Examples /// /// Make a named link to Rust's GitHub organization: /// /// ```rust /// #[cfg(feature = "utils")] /// { /// use serenity::utils::{EmbedMessageBuilding, MessageBuilder}; /// /// let msg = MessageBuilder::new() /// .push_named_link("Rust's GitHub", "https://github.com/rust-lang") /// .build(); /// /// assert_eq!(msg, "[Rust's GitHub](https://github.com/rust-lang)"); /// } /// /// #[cfg(not(feature = "utils"))] /// {} /// ``` pub trait EmbedMessageBuilding { /// Pushes a named link to a message, intended for use in embeds. /// /// # Examples /// /// Make a simple link to Rust's homepage for use in an embed: /// /// ```rust /// #[cfg(feature = "utils")] /// { /// use serenity::utils::{EmbedMessageBuilding, MessageBuilder}; /// /// let mut msg = MessageBuilder::new(); /// msg.push("Rust's website: "); /// msg.push_named_link("Homepage", "https://rust-lang.org"); /// let content = msg.build(); /// /// assert_eq!(content, "Rust's website: [Homepage](https://rust-lang.org)"); /// } /// /// #[cfg(not(feature = "utils"))] /// {} /// ``` fn push_named_link<T: I, U: I>(&mut self, name: T, url: U) -> &mut Self; /// Pushes a named link intended for use in an embed, but with a normalized /// name to avoid escaping issues. /// /// Refer to [`Self::push_named_link`] for more information. /// /// # Examples /// /// ```rust /// #[cfg(feature = "utils")] /// { /// use serenity::utils::{EmbedMessageBuilding, MessageBuilder}; /// /// let mut msg = MessageBuilder::new(); /// msg.push("A weird website name: "); /// msg.push_named_link_safe("Try to ] break links (](", "https://rust-lang.org"); /// let content = msg.build(); /// /// assert_eq!( /// content, /// "A weird website name: [Try to break links ( (](https://rust-lang.org)" /// ); /// } /// /// #[cfg(not(feature = "utils"))] /// {} /// ``` fn push_named_link_safe<T: I, U: I>(&mut self, name: T, url: U) -> &mut Self; } impl EmbedMessageBuilding for MessageBuilder { fn push_named_link<T: I, U: I>(&mut self, name: T, url: U) -> &mut Self { let name = name.into().to_string(); let url = url.into().to_string(); #[allow(clippy::let_underscore_must_use)] let _ = write!(self.0, "[{}]({})", name, url); // error cannot be returned, ignoring instead self } fn push_named_link_safe<T: I, U: I>(&mut self, name: T, url: U) -> &mut Self { self.0.push('['); { let mut c = name.into(); c.inner = normalize(&c.inner).replace("]", " "); self.0.push_str(&c.to_string()); } self.0.push_str("]("); { let mut c = url.into(); c.inner = normalize(&c.inner).replace(")", " "); self.0.push_str(&c.to_string()); } self.0.push(')'); self } } /// Formatting modifiers for MessageBuilder content pushes /// /// Provides an enum of formatting modifiers for a string, for combination with /// string types and Content types. /// /// # Examples /// /// Create a new Content type which describes a bold-italic "text": /// /// ```rust,no_run /// use serenity::utils::Content; /// use serenity::utils::ContentModifier::{Bold, Italic}; /// let content: Content = Bold + Italic + "text"; /// ``` #[non_exhaustive] pub enum ContentModifier { Italic, Bold, Strikethrough, Code, Underline, Spoiler, } /// Describes formatting on string content #[derive(Debug, Default, Clone)] pub struct Content { pub italic: bool, pub bold: bool, pub strikethrough: bool, pub inner: String, pub code: bool, pub underline: bool, pub spoiler: bool, } impl<T: ToString> Add<T> for Content { type Output = Content; fn add(mut self, rhs: T) -> Content { self.inner = self.inner + &rhs.to_string(); self } } impl<T: ToString> Add<T> for ContentModifier { type Output = Content; fn add(self, rhs: T) -> Content { let mut nc = self.to_content(); nc.inner = nc.inner + &rhs.to_string(); nc } } impl Add<ContentModifier> for Content { type Output = Content; fn add(mut self, rhs: ContentModifier) -> Content { self.apply(&rhs); self } } impl Add<ContentModifier> for ContentModifier { type Output = Content; fn add(self, rhs: ContentModifier) -> Content { let mut nc = self.to_content(); nc.apply(&rhs); nc } } impl ContentModifier { fn to_content(&self) -> Content { let mut nc = Content::default(); nc.apply(self); nc } } impl Content { pub fn apply(&mut self, modifier: &ContentModifier) { match *modifier { ContentModifier::Italic => { self.italic = true; }, ContentModifier::Bold => { self.bold = true; }, ContentModifier::Strikethrough => { self.strikethrough = true; }, ContentModifier::Code => { self.code = true; }, ContentModifier::Underline => { self.underline = true; }, ContentModifier::Spoiler => { self.spoiler = true; }, } } #[allow(clippy::inherent_to_string)] pub fn to_string(&self) -> String { trait UnwrapWith { fn unwrap_with(&self, n: usize) -> usize; } impl UnwrapWith for bool { fn unwrap_with(&self, n: usize) -> usize { if *self { n } else { 0 } } } let capacity = self.inner.len() + self.spoiler.unwrap_with(4) + self.bold.unwrap_with(4) + self.italic.unwrap_with(2) + self.strikethrough.unwrap_with(4) + self.underline.unwrap_with(4) + self.code.unwrap_with(2); let mut new_str = String::with_capacity(capacity); if self.spoiler { new_str.push_str("||"); } if self.bold { new_str.push_str("**"); } if self.italic { new_str.push('*'); } if self.strikethrough { new_str.push_str("~~"); } if self.underline { new_str.push_str("__"); } if self.code { new_str.push('`'); } new_str.push_str(&self.inner); if self.code { new_str.push('`'); } if self.underline { new_str.push_str("__"); } if self.strikethrough { new_str.push_str("~~"); } if self.italic { new_str.push('*'); } if self.bold { new_str.push_str("**"); } if self.spoiler { new_str.push_str("||"); } new_str } } impl From<ContentModifier> for Content { fn from(cm: ContentModifier) -> Content { cm.to_content() } } mod private { use std::fmt; use super::{Content, ContentModifier}; pub trait A {} impl A for ContentModifier {} impl A for Content {} impl<T: fmt::Display> A for T {} } /// This trait exists for the purpose of bypassing the "conflicting implementations" error from the compiler. pub trait I: self::private::A { fn into(self) -> Content; } impl<T: fmt::Display> I for T { fn into(self) -> Content { Content { italic: false, bold: false, strikethrough: false, inner: self.to_string(), code: false, underline: false, spoiler: false, } } } impl I for ContentModifier { fn into(self) -> Content { self.to_content() } } impl I for Content { fn into(self) -> Content { self } } fn normalize(text: &str) -> String { // Remove invite links and popular scam websites, mostly to prevent the // current user from triggering various ad detectors and prevent embeds. text.replace("discord.gg", "discord\u{2024}gg") .replace("discord.me", "discord\u{2024}me") .replace("discordlist.net", "discordlist\u{2024}net") .replace("discordservers.com", "discordservers\u{2024}com") .replace("discord.com/invite", "discord\u{2024}com/invite") .replace("discordapp.com/invite", "discordapp\u{2024}com/invite") // Remove right-to-left override and other similar annoying symbols .replace('\u{202E}', " ") // RTL Override .replace('\u{200F}', " ") // RTL Mark .replace('\u{202B}', " ") // RTL Embedding .replace('\u{200B}', " ") // Zero-width space .replace('\u{200D}', " ") // Zero-width joiner .replace('\u{200C}', " ") // Zero-width non-joiner // Remove everyone and here mentions. Has to be put after ZWS replacement // because it utilises it itself. .replace("@everyone", "@\u{200B}everyone") .replace("@here", "@\u{200B}here") } #[cfg(test)] mod test { use super::{ ContentModifier::{Bold, Code, Italic, Spoiler}, MessageBuilder, }; use crate::model::prelude::*; macro_rules! gen { ($($fn:ident => [$($text:expr => $expected:expr),+]),+) => ({ $( $( assert_eq!(MessageBuilder::new().$fn($text).0, $expected); )+ )+ }); } #[test] fn code_blocks() { let content = MessageBuilder::new().push_codeblock("test", Some("rb")).build(); assert_eq!(content, "```rb\ntest\n```"); } #[test] fn safe_content() { let content = MessageBuilder::new().push_safe("@everyone discord.gg/discord-api").build(); assert_ne!(content, "@everyone discord.gg/discord-api"); } #[test] fn no_free_formatting() { let content = MessageBuilder::new().push_bold_safe("test**test").build(); assert_ne!(content, "**test**test**"); } #[test] fn mentions() { let content_emoji = MessageBuilder::new() .emoji(&Emoji { animated: false, available: true, id: EmojiId(32), name: "Rohrkatze".to_string(), managed: false, require_colons: true, roles: vec![], user: None, }) .build(); let content_mentions = MessageBuilder::new().channel(1).mention(&UserId(2)).role(3).user(4).build(); assert_eq!(content_mentions, "<#1><@2><@&3><@4>"); assert_eq!(content_emoji, "<:Rohrkatze:32>"); } #[test] fn content() { let content = Bold + Italic + Code + "Fun!"; assert_eq!(content.to_string(), "***`Fun!`***"); let content = Spoiler + Bold + "Divert your eyes elsewhere"; assert_eq!(content.to_string(), "||**Divert your eyes elsewhere**||"); } #[test] fn init() { assert_eq!(MessageBuilder::new().0, ""); assert_eq!(MessageBuilder::default().0, ""); } #[test] fn message_content() { let message_content = MessageBuilder::new().push(Bold + Italic + Code + "Fun!").build(); assert_eq!(message_content, "***`Fun!`***"); } #[test] fn message_content_safe() { let message_content = MessageBuilder::new().push_safe(Bold + Italic + "test**test").build(); assert_eq!(message_content, "***test\\*\\*test***"); } #[test] fn push() { assert_eq!(MessageBuilder::new().push('a').0, "a"); assert!(MessageBuilder::new().push("").0.is_empty()); } #[test] fn push_codeblock() { let content = &MessageBuilder::new().push_codeblock("foo", None).0.clone(); assert_eq!(content, "```\nfoo\n```"); let content = &MessageBuilder::new().push_codeblock("fn main() { }", Some("rs")).0.clone(); assert_eq!(content, "```rs\nfn main() { }\n```"); } #[test] fn push_codeblock_safe() { assert_eq!( MessageBuilder::new().push_codeblock_safe("foo", Some("rs")).0, "```rs\nfoo\n```", ); assert_eq!(MessageBuilder::new().push_codeblock_safe("", None).0, "```\n\n```",); assert_eq!(MessageBuilder::new().push_codeblock_safe("1 * 2", None).0, "```\n1 * 2\n```",); assert_eq!( MessageBuilder::new().push_codeblock_safe("`1 * 3`", None).0, "```\n`1 * 3`\n```", ); assert_eq!(MessageBuilder::new().push_codeblock_safe("```.```", None).0, "```\n . \n```",); } #[test] fn push_safe() { gen! { push_safe => [ "" => "", "foo" => "foo", "1 * 2" => "1 \\* 2" ], push_bold_safe => [ "" => "****", "foo" => "**foo**", "*foo*" => "***foo***", "f*o**o" => "**f*o o**" ], push_italic_safe => [ "" => "__", "foo" => "_foo_", "f_o_o" => "_f o o_" ], push_mono_safe => [ "" => "``", "foo" => "`foo`", "asterisk *" => "`asterisk *`", "`ticks`" => "`'ticks'`" ], push_strike_safe => [ "" => "~~~~", "foo" => "~~foo~~", "foo ~" => "~~foo ~~~", "~~foo" => "~~ foo~~", "~~fo~~o~~" => "~~ fo o ~~" ], push_underline_safe => [ "" => "____", "foo" => "__foo__", "foo _" => "__foo ___", "__foo__ bar" => "__ foo bar__" ], push_spoiler_safe => [ "" => "||||", "foo" => "||foo||", "foo |" => "||foo |||", "||foo|| bar" =>"|| foo bar||" ], push_line_safe => [ "" => "\n", "foo" => "foo\n", "1 * 2" => "1 \\* 2\n" ], push_mono_line_safe => [ "" => "``\n", "a ` b `" => "`a ' b '`\n" ], push_italic_line_safe => [ "" => "__\n", "a * c" => "_a * c_\n" ], push_bold_line_safe => [ "" => "****\n", "a ** d" => "**a d**\n" ], push_underline_line_safe => [ "" => "____\n", "a __ e" => "__a e__\n" ], push_strike_line_safe => [ "" => "~~~~\n", "a ~~ f" => "~~a f~~\n" ], push_spoiler_line_safe => [ "" => "||||\n", "a || f" => "||a f||\n" ] }; } #[test] fn push_unsafe() { gen! { push_bold => [ "a" => "**a**", "" => "****", '*' => "*****", "**" => "******" ], push_bold_line => [ "" => "****\n", "foo" => "**foo**\n" ], push_italic => [ "a" => "_a_", "" => "__", "_" => "___", "__" => "____" ], push_italic_line => [ "" => "__\n", "foo" => "_foo_\n", "_?" => "__?_\n" ], push_line => [ "" => "\n", "foo" => "foo\n", "\n\n" => "\n\n\n", "\nfoo\n" => "\nfoo\n\n" ], push_mono => [ "a" => "`a`", "" => "``", "`" => "```", "``" => "````" ], push_mono_line => [ "" => "``\n", "foo" => "`foo`\n", "\n" => "`\n`\n", "`\n`\n" => "``\n`\n`\n" ], push_strike => [ "a" => "~~a~~", "" => "~~~~", "~" => "~~~~~", "~~" => "~~~~~~" ], push_strike_line => [ "" => "~~~~\n", "foo" => "~~foo~~\n" ], push_underline => [ "a" => "__a__", "" => "____", "_" => "_____", "__" => "______" ], push_underline_line => [ "" => "____\n", "foo" => "__foo__\n" ], push_spoiler => [ "a" => "||a||", "" => "||||", "|" => "|||||", "||" => "||||||" ], push_spoiler_line => [ "" => "||||\n", "foo" => "||foo||\n" ] }; } #[test] fn normalize() { assert_eq!(super::normalize("@everyone"), "@\u{200B}everyone"); assert_eq!(super::normalize("@here"), "@\u{200B}here"); assert_eq!(super::normalize("discord.gg"), "discord\u{2024}gg"); assert_eq!(super::normalize("discord.me"), "discord\u{2024}me"); assert_eq!(super::normalize("discordlist.net"), "discordlist\u{2024}net"); assert_eq!(super::normalize("discordservers.com"), "discordservers\u{2024}com"); assert_eq!(super::normalize("discord.com/invite"), "discord\u{2024}com/invite"); assert_eq!(super::normalize("\u{202E}"), " "); assert_eq!(super::normalize("\u{200F}"), " "); assert_eq!(super::normalize("\u{202B}"), " "); assert_eq!(super::normalize("\u{200B}"), " "); assert_eq!(super::normalize("\u{200D}"), " "); assert_eq!(super::normalize("\u{200C}"), " "); } }
28.515385
109
0.499303
76b691aae242e6c821f8bdf4c4a22c5dcb2cde65
10,453
pub use tomo_serenity_ext::*; use crate::types::Ref; use crate::utils::now; use chrono::{TimeZone, Utc}; use core::num::NonZeroUsize; use magic::dark_magic::report_bytes; use magic::traits::MagicIter as _; use magic::traits::MagicStr as _; use std::fmt::{Display, Write}; const NHENTAI_ICON: &str = "https://cdn.discordapp.com/attachments/513304527064006656/766670086928859146/nhen.png"; impl Embedable for Ref<requester::saucenao::SauceNao> { fn append(&self, embed: &mut CreateEmbed) { let mut info = String::new(); match self.characters.len() { 0 => {} 1 => { let content = self.characters.iter().next().unwrap(); writeln!(&mut info, "**Character**: {}", content).unwrap(); } _ => { let content = self.characters.iter().join("\n"); embed.field("Characters", content, false); } } match self.parody.len() { 0 => {} 1 => { let content = self.parody.iter().next().unwrap(); writeln!(&mut info, "**Parody**: {}", content).unwrap(); } _ => { let content = self.parody.iter().join("\n"); embed.field("Parody", content, false); } } if let Some(creator) = &self.creator { writeln!(&mut info, "**Creator**: {}", creator).unwrap(); } match self.author.len() { 0 => {} 1 => { let content = self .author .iter() .next() .map(|(k, v)| format!("[{} ({})]({})", k, v.name, v.url)) .unwrap(); writeln!(&mut info, "**Author**: {}", content).unwrap(); } _ => { let content = self .author .iter() .map(|(k, v)| format!("[{} ({})]({})", k, v.name, v.url)) .join("\n"); embed.field("Author", content, false); } } match self.sources.len() { 0 => {} 1 => { let content = self .sources .iter() .next() .map(|(k, v)| format!("[{}]({})", k, v)) .unwrap(); writeln!(&mut info, "**Source**: {}", content).unwrap(); } _ => { let content = self .sources .iter() .map(|(k, v)| format!("[{}]({})", k, v)) .join("\n"); embed.field("Sources", content, false); } } match self.altenative_links.len() { 0 => {} 1 => { let content = self .altenative_links .iter() .next() .map(|(k, v)| format!("[{}]({})", k, v)) .unwrap(); writeln!(&mut info, "**Altenative link**: {}", content).unwrap(); } _ => { let content = self .altenative_links .iter() .map(|(k, v)| format!("[{}]({})", k, v)) .join("\n"); embed.field("Altenative links", content, false); } } if let Some(n) = &self.note { writeln!(&mut info, "**Note**: {}", n).unwrap(); } if let Some(title) = &self.title { embed.title(title); } embed .description(info) .url(self.url()) .thumbnail(self.img_url()) .timestamp(now()) .footer(|f| f.text("Powered by SauceNao")); } } impl Embedable for Ref<requester::ehentai::Gmetadata> { fn append(&self, embed: &mut CreateEmbed) { let tags = self.parse_tags(); let mut info = String::new(); match (&self.title, &self.title_jpn) { (Some(ref title), None) | (None, Some(ref title)) => { embed.title(title); } (Some(ref title), Some(ref title_jpn)) => { embed.title(title); writeln!(&mut info, "**Title Jpn:** {}", title_jpn).unwrap(); } _ => {} } fn write_info<D: Display>(mut info: &mut String, key: &str, data: Option<Vec<D>>) { if let Some(value) = data { write!(&mut info, "**{}:** ", key).unwrap(); for i in value { write!(&mut info, "`{}` | ", i).unwrap(); } info.truncate(info.len() - 3); info.push('\n'); } }; fn write_info_normal<D: Display>(mut info: &mut String, key: &str, data: Option<Vec<D>>) { if let Some(value) = data { write!(&mut info, "**{}:** ", key).unwrap(); for i in value { write!(&mut info, "{} | ", i).unwrap(); } info.truncate(info.len() - 3); info.push('\n'); } }; write_info_normal(&mut info, "Language", tags.language); write_info(&mut info, "Parody", tags.parody); write_info(&mut info, "Characters", tags.characters); write_info(&mut info, "Artist", tags.artist); write_info_normal(&mut info, "Circle", tags.group); writeln!(&mut info, "**Gallery type**: {}", &self.category).unwrap(); writeln!( &mut info, "**Total files**: {} ({})", &self.filecount, report_bytes(self.filesize) ) .unwrap(); write!(&mut info, "**Rating**: {} / 5", &self.rating).unwrap(); if self.expunged { info.push_str("\n>>>>> ***EXPUNGED*** <<<<<"); } if !self.tags.is_empty() { info.push_str("\n\n***TAGs***"); } embed.description(info); [ ("Male", tags.male), ("Female", tags.female), ("Misc", tags.misc), ] .iter() .filter_map(|(k, v)| v.as_ref().map(|v| (k, v))) .map(|(key, v)| { let mut content = String::with_capacity(45 * v.len()); for tag in v { write!(&mut content, "[{}]({}) | ", tag, tag.wiki_url()).unwrap(); } content.truncate(content.len() - 3); (key, content) }) .for_each(|(k, v)| { let mut splited = v.split_at_limit(1024, "|"); if let Some(data) = splited.next() { embed.field(k, data, false); for later in splited { embed.field('\u{200B}', later, false); } } }); let time = self .posted .parse::<i64>() .map(|v| Utc.timestamp(v, 0)) .unwrap_or_else(|_| Utc::now()) .to_rfc3339(); embed.timestamp(time); embed.thumbnail(&self.thumb); embed.color(self.category.color()); let url = self.url(); embed.url(&url); embed.footer(|f| { f.icon_url("https://cdn.discordapp.com/emojis/676135471566290985.png") .text(&url[21..]) }); } } impl Embedable for Ref<requester::nhentai::NhentaiGallery> { fn append(&self, embed: &mut CreateEmbed) { let metadata = self.metadata(); let mut description = format!( "**Category**: {}\n**Language**: {}\n**Total Pages**: {}\n", metadata.categories.join(", "), metadata.languages.join(", "), (&**self).total_pages(), ); if !self.scanlator.is_empty() { let data = format!("**Scanlator**: {}\n", &self.scanlator); description.push_str(&data); } if let Some(parodies) = metadata.parodies { let data = format!("**Parody**: {}\n", parodies.join(", ")); description.push_str(&data); } if let Some(characters) = metadata.characters { let data = format!("**Character**: {}\n", characters.join(", ")); description.push_str(&data); } if let Some(groups) = metadata.groups { let data = format!("**Group**: {}\n", groups.join(", ")); description.push_str(&data); } if let Some(artists) = metadata.artists { let data = format!("**Artist**: {}\n", artists.join(", ")); description.push_str(&data); } let color = { let num_length = (self.id as f32 + 1.0).log10().ceil() as u64; self.media_id * num_length + self.id }; embed.title(&self.title.pretty); embed.url(self.url()); embed.thumbnail(self.thumbnail()); embed.description(description); embed.color(color & 0xffffff); embed.timestamp(Utc.timestamp(self.upload_date as _, 0).to_rfc3339()); embed.footer(|f| f.text(format!("ID: {}", self.id)).icon_url(NHENTAI_ICON)); if let Some(tags) = metadata.tags { embed.field("Tags", tags.join(", "), false); } } } impl Paginator for Ref<requester::nhentai::NhentaiGallery> { fn append_page(&self, page: NonZeroUsize, embed: &mut CreateEmbed) { let total_pages = (&**self).total_pages(); let page = page.get(); let color = { let num_length = (self.id as f32 + 1.0).log10().ceil() as u64; self.media_id * num_length + self.id }; embed.title(&self.title.pretty); embed.url(self.url()); embed.color(color); embed.footer(|f| { f.text(format!( "ID: {} | Page: {} / {}", self.id, page, total_pages )) .icon_url(NHENTAI_ICON) }); match self.page(page) { Some(p) => embed.image(p), None => embed.field( "Error", format!("Out of page, this gallery has only {} pages", total_pages), false, ), }; } fn total_pages(&self) -> Option<usize> { Some((&**self).total_pages()) } }
30.926036
98
0.438343
fb884d10a10ea9c8029f7e3ce02f3ed2bff51ca7
2,611
//! Library used by tidy and other tools. //! //! This library contains the tidy lints and exposes it //! to be used by tools. use std::fs::File; use std::io::Read; use walkdir::{DirEntry, WalkDir}; use std::path::Path; macro_rules! t { ($e:expr, $p:expr) => { match $e { Ok(e) => e, Err(e) => panic!("{} failed on {} with {}", stringify!($e), ($p).display(), e), } }; ($e:expr) => { match $e { Ok(e) => e, Err(e) => panic!("{} failed with {}", stringify!($e), e), } }; } macro_rules! tidy_error { ($bad:expr, $fmt:expr) => ({ *$bad = true; eprintln!("tidy error: {}", $fmt); }); ($bad:expr, $fmt:expr, $($arg:tt)*) => ({ *$bad = true; eprint!("tidy error: "); eprintln!($fmt, $($arg)*); }); } pub mod bins; pub mod debug_artifacts; pub mod deps; pub mod edition; pub mod error_codes_check; pub mod errors; pub mod extdeps; pub mod features; pub mod pal; pub mod style; pub mod ui_tests; pub mod unit_tests; pub mod unstable_book; fn filter_dirs(path: &Path) -> bool { let skip = [ "tidy-test-file", "compiler/rustc_codegen_cranelift", "src/llvm-project", "library/backtrace", "library/boehm_shim/target", "library/stdarch", "src/tools/cargo", "src/tools/clippy", "src/tools/miri", "src/tools/rls", "src/tools/rust-analyzer", "src/tools/rust-installer", "src/tools/rustfmt", "src/doc/book", // Filter RLS output directories "target/rls", ]; skip.iter().any(|p| path.ends_with(p)) } fn walk_many( paths: &[&Path], skip: &mut dyn FnMut(&Path) -> bool, f: &mut dyn FnMut(&DirEntry, &str), ) { for path in paths { walk(path, skip, f); } } fn walk(path: &Path, skip: &mut dyn FnMut(&Path) -> bool, f: &mut dyn FnMut(&DirEntry, &str)) { let mut contents = String::new(); walk_no_read(path, skip, &mut |entry| { contents.clear(); if t!(File::open(entry.path()), entry.path()).read_to_string(&mut contents).is_err() { contents.clear(); } f(&entry, &contents); }); } fn walk_no_read(path: &Path, skip: &mut dyn FnMut(&Path) -> bool, f: &mut dyn FnMut(&DirEntry)) { let walker = WalkDir::new(path).into_iter().filter_entry(|e| !skip(e.path())); for entry in walker { if let Ok(entry) = entry { if entry.file_type().is_dir() { continue; } f(&entry); } } }
24.175926
97
0.528916
bbd818735f6e48f86af245ecf38cf8e339076fe5
15,280
/// Link Simulator /// /// Copyright (C) 2019 PTScientists GmbH /// /// 17.04.2019 Eric Reinthal extern crate ctrlc; extern crate link; extern crate term; use link::Link; use link::LinkStatistic; use mio::net; use rand::Rng; use std::env; use std::fs; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; use std::time; const USAGE: &str = "USAGE: link_simulator [options] <file> options: -i Print link info on startup -s Print link statistics to stdout (only works on terminals with 60+ chars per line) (does not work on Windows) -gLINK_NAME Send link statistics to Grafana for specified link (repeat this option if needed) -G Send statistics of all links to Grafana -p Discard packet if bit is flipped <file> must have the following format: link_name address_in address_out bit_error_rate delay_ms max_bandwidth ... ... (lines starting with '#' are ignored) line format: link_name: String (without spaces) address_{in/out}: IPv4:Port bit_error_rate: float (0.0 ... 1.0( delay_ms: integer [ms] max_bandwidth: float [bps] example: Test_route localhost:2000 localhost:2001 1e-5 2000 1000000"; const INFLUXDB_ADDRESS: &str = "127.0.0.1:8100"; // config arguments per link definition, see USAGE above const ARGS_PER_LINK: usize = 6; fn main() { // SIGINT capture setup let running = Arc::new(AtomicBool::new(true)); let r = running.clone(); ctrlc::set_handler(move || { r.store(false, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); // read configuration file let config = read_input(); if config.is_none() { println!("{}", USAGE); std::process::exit(1); } let config = config.unwrap(); let active_links = config.measurable_links; let num_links = active_links.len(); if config.print_info { for l in active_links.iter() { println!( "Link {}: {}\n\t(BER: {}, delay: {}ms, max. bandwidth: {}bps)", l.link.name(), l.link.route(), l.link.bit_error_rate(), l.link.delay_ms(), link::bytes_to_readable(l.link.max_bandwidth() as u64) ); if l.grafana_output { println!("\tSent to Grafana"); } } } // grafana setup // must match input address / port of Influx DB let grafana_output = INFLUXDB_ADDRESS.parse().unwrap(); // udp socket for sending messages to grafana-dump let grafana_socket = net::UdpSocket::bind(&"0.0.0.0:0".parse().unwrap()).unwrap(); let mut grafana_stat = Vec::new(); // generate unique session id let grafana_session: u16 = rand::thread_rng().gen(); if config.send_to_grafana { // send initial link info for l in active_links.iter().filter(|x| x.grafana_output) { let influx = format!(r#"link_config,name={},session_tag={} name="{}",session={},inbound="{}",outbound="{}",ber={},delay={}i,bandwidth={}"#, l.link.name(), grafana_session, l.link.name(), grafana_session, l.link.route().inbound, l.link.route().outbound, l.link.bit_error_rate(), l.link.delay_ms(), l.link.max_bandwidth()); grafana_socket .send_to(&influx.as_bytes()[..influx.len()], &grafana_output) .unwrap(); grafana_stat.push(Statistic::new( l.link.name().to_string(), l.statistic.clone(), true, )); } } // start forwarding threads let mut link_threads = Vec::with_capacity(num_links); let mut statistics = Vec::with_capacity(num_links); for active_link in active_links { let link_stat = active_link.statistic; statistics.push(Statistic::new( active_link.link.name().to_string(), link_stat, active_link.grafana_output, )); let mut link = active_link.link; let r = running.clone(); let r_stop = running.clone(); link_threads.push(thread::spawn(move || { match link.start(r) { Ok(_) => (), Err(e) => { println!("Link '{}' crashed: {}", link.name(), e); r_stop.store(false, Ordering::SeqCst); } }; })); } // print statistics to stdout let mut thread_stat_printer: Option<thread::JoinHandle<()>> = None; if config.print_statistic { let running_stat = running.clone(); thread_stat_printer = Some(thread::spawn(move || { // continuous route statistics output let mut terminal = term::stdout().unwrap(); for _ in 0..num_links { println!(); // prepare output lines } while running_stat.load(Ordering::SeqCst) { for _ in 0..num_links { terminal.cursor_up().unwrap(); } for statistic in &mut statistics { // used to acquire stat lock as short as possible // i.e. not during println let link_statistic: LinkStatistic; { link_statistic = statistic.link_statistic.lock().unwrap().clone(); } terminal.delete_line().unwrap(); println!("[{:<14}] {}", statistic.name, link_statistic); } thread::sleep(time::Duration::from_millis(500)); } })); } // send statistics to grafana let mut thread_send_grafana: Option<thread::JoinHandle<()>> = None; if config.send_to_grafana { let running_grafana = running.clone(); thread_send_grafana = Some(thread::spawn(move || { // run forever while running_grafana.load(Ordering::SeqCst) { // for all grafana-active links for stat in &grafana_stat { // copy in order to lock as short as possible let s: LinkStatistic; { s = stat.link_statistic.lock().unwrap().clone(); } // influx DB string let influx = format!(r#"link_stat,session={},name={} recv_b={}i,recv_p={}i,drop_b={}i,drop_p={}i,forw_b={}i,forw_p={}i,rate={},buffer={},flips={}i,discards={}i"#, grafana_session, stat.name, s.received_bytes, s.received_packets, s.dropped_bytes, s.dropped_packets, s.forwarded_bytes, s.forwarded_packets, s.forwarding_rate*8, s.buffer_occupancy, s.bit_flips, s.discarded_packets); grafana_socket .send_to(&influx.as_bytes()[..influx.len()], &grafana_output) .unwrap(); } thread::sleep(time::Duration::from_millis(500)); } })); } // gracefully terminate if let Some(t) = thread_stat_printer { t.join().unwrap(); } if let Some(t) = thread_send_grafana { t.join().unwrap(); } for t in link_threads { t.join().unwrap(); } } /// associates a link with a LinkStatistic and a name, used for terminal output /// and possibly sending status data (network metadata) at a later point struct MeasurableLink { pub link: Link, pub statistic: Arc<Mutex<LinkStatistic>>, pub grafana_output: bool, } impl MeasurableLink { fn new( link: Link, statistic: Arc<Mutex<LinkStatistic>>, grafana_output: bool, ) -> MeasurableLink { MeasurableLink { link, statistic, grafana_output, } } } /// associates a LinkStatistic with a name and a field for calculating the rate struct Statistic { pub name: String, pub link_statistic: Arc<Mutex<LinkStatistic>>, pub grafana_output: bool, } impl Statistic { fn new( name: String, link_statistic: Arc<Mutex<LinkStatistic>>, grafana_output: bool, ) -> Statistic { Statistic { name, link_statistic, grafana_output, } } } /// configuration from command line parameters struct Configuration { pub measurable_links: Vec<MeasurableLink>, pub print_info: bool, pub print_statistic: bool, pub send_to_grafana: bool, pub discard_bitflip_packet: bool, } impl Configuration { fn new( measurable_links: Vec<MeasurableLink>, print_info: bool, print_statistic: bool, send_to_grafana: bool, discard_bitflip_packet: bool, ) -> Configuration { Configuration { measurable_links, print_info, print_statistic, send_to_grafana, discard_bitflip_packet, } } } /// parse input file and create link structs fn read_input() -> Option<Configuration> { // see USAGE definition above // parse options, then file name let mut print_link_info = false; let mut print_link_statistic = false; let mut grafana_send_all = false; let mut grafana_output = Vec::new(); let mut discard_bitflip_packet = false; let mut file_name = String::from(""); // arguments iterator, skip first (program name) let mut args = env::args(); if args.len() < 2 { return None; } args.next(); // parse arguments while file_name.as_str() == "" { match args.next() { None => break, Some(arg) => { let mut chars = arg.chars(); if chars.next().unwrap() == '-' { // parse options if arg == "-i" { // print to info print_link_info = true; } else if arg == "-s" { // print link statistic print_link_statistic = true; } else if arg == "-G" { grafana_send_all = true; } else if arg == "-p" { discard_bitflip_packet = true; } else { match chars.next() { None => { // invalid argument println!("Invalid argument: {}", arg); return None; } // send to grafana Some(c) => { if c == 'g' { // just save name of link, skip '-g' grafana_output.push(arg[2..].to_string()); } else { // invalid argument println!("Invalid argument: {}", arg); return None; } } } } } else { // parse config filename file_name = arg.to_string(); } } } } if file_name.as_str() == "" { println!("No configuration file provided"); return None; } let grafana_count = grafana_output.len(); if grafana_send_all && grafana_count > 0 { println!("Either '-G' or some '-g' arguments may be provided, not both"); return None; } // result vector let mut links = Vec::new(); let contents = match fs::read_to_string(&file_name) { Ok(x) => x, Err(e) => { println!("Could not open file '{}': {}", file_name, e); std::process::exit(1); } }; for line in contents .lines() .filter(|x| x.len() > 0) // empty lines .filter(|x| x.chars().next().unwrap() != '#') // comment lines { let components: Vec<&str> = line.split_whitespace().collect(); if components.len() != ARGS_PER_LINK { println!("'{}'\nLine invalid, aborting", line); return None; } let ber = match components[3].parse::<f64>() { Ok(x) => x, Err(_) => { println!( "'{}'\n4th argument (bit error rate) must be numeric, aborting", line ); return None; } }; let delay = match components[4].parse::<u32>() { Ok(x) => x, Err(_) => { println!("'{}'\n5th argument (delay) must be numeric, aborting", line); return None; } }; let max_bw = match components[5].parse::<f64>() { Ok(x) => x, Err(_) => { println!( "'{}'\n6th argument (max. bandwidth) must be numeric, aborting", line ); return None; } }; let stat_local = Arc::new(Mutex::new(Default::default())); let stat_link = stat_local.clone(); let link = match Link::new( components[0].to_string(), &components[1], &components[2], ber, delay, max_bw, stat_link, discard_bitflip_packet, ) { Ok(x) => x, Err(e) => { println!("{}", e); return None; } }; // check if this link will be sent to grafana let mut grafana_flag = false; if let Some(_) = grafana_output .iter() .find(|&x| x == &components[0].to_string()) { grafana_flag = true; grafana_output.retain(move |x| x != &components[0].to_string()); } // push a link with its accompanying statistics reference links.push(MeasurableLink::new( link, stat_local, grafana_send_all || grafana_flag, )); } if links.len() == 0 { println!("No links defined in configuration file"); return None; } if grafana_output.len() != 0 { println!("Invalid links specified for Grafana output:"); for l in grafana_output { println!(" - {}", l); } return None; } Some(Configuration::new( links, print_link_info, print_link_statistic, grafana_send_all || grafana_count > 0, discard_bitflip_packet, )) }
32.860215
258
0.495288
23d3a4c808f3abd2cbd2106ec0a29e8ccd9fec81
710
mod fs; mod global; mod stdio; use self::global::io_module; use crate::{StdError, StdResult}; use fs::fs_module; use laythe_core::{hooks::GcHooks, managed::Gc, module::Package, utils::IdEmitter}; use stdio::stdio_module; pub const IO_MODULE_PATH: &str = "std/io"; pub fn add_io_package( hooks: &GcHooks, std: Gc<Package>, emitter: &mut IdEmitter, ) -> StdResult<()> { let mut root = std.root_module(); let mut io_module = io_module(hooks, std, emitter)?; root.insert_module(hooks, io_module)?; let stdio = stdio_module(hooks, std, emitter)?; let fs = fs_module(hooks, std, emitter)?; io_module.insert_module(hooks, stdio)?; io_module.insert_module(hooks, fs).map_err(StdError::from) }
25.357143
82
0.709859
18d4d249e3f315b2d8694288978f60f4a7217445
17,723
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use admission_control_proto::proto::{ admission_control::{ SubmitTransactionRequest, SubmitTransactionResponse as ProtoSubmitTransactionResponse, }, admission_control_grpc::AdmissionControlClient, }; use client::{AccountData, AccountStatus}; use crypto::signing::KeyPair; use generate_keypair::load_key_from_file; use lazy_static::lazy_static; use logger::prelude::*; use metrics::OpMetrics; use rand::Rng; use std::{collections::HashMap, convert::TryInto, sync::Arc, thread, time}; use types::{account_address::AccountAddress, account_config::association_address}; pub mod grpc_helpers; pub mod ruben_opt; pub mod txn_generator; use grpc_helpers::{ divide_items, get_account_states, submit_and_wait_txn_requests, sync_account_sequence_number, }; use txn_generator::gen_mint_txn_requests; lazy_static! { pub static ref OP_COUNTER: OpMetrics = OpMetrics::new_and_registered("benchmark"); } /// Benchmark library for Libra Blockchain. /// /// Benchmarker aims to automate the process of submitting TXNs to admission control /// in a configurable mechanism, and wait for accepted TXNs comitted or timeout. /// Current main usage is measuring TXN throughput. /// How to run a benchmarker (see RuBen in bin/ruben.rs): /// 1. Create a benchmarker with AdmissionControlClient(s) and NodeDebugClient, /// 2. Generate accounts (using txn_generator module) and mint them: mint_accounts. /// 3. Play transactions offline-generated by txn_generator module: /// submit_and_wait_txn_committed, measure_txn_throughput. /// Metrics reported include: /// * Counters related to: /// * TXN generation: requested_txns, created_txns, sign_failed_txns; /// * Submission to AC and AC response: submit_txns.{ac_status_code}, /// submit_txns.{mempool_status_code}, submit_txns.{vm_status}, submit_txns.{grpc_error}; /// * Final status within epoch: committed_txns, timedout_txns; /// * Gauges: request_duration_ms, running_duration_ms, request_throughput, txns_throughput. pub struct Benchmarker { /// Using multiple clients can help improve the request speed. clients: Vec<Arc<AdmissionControlClient>>, /// Upper bound duration to stagger the clients before submitting TXNs. stagger_range_ms: u16, /// Persisted sequence numbers for generated accounts and faucet account /// BEFORE playing new round of TXNs. prev_sequence_numbers: HashMap<AccountAddress, u64>, } impl Benchmarker { /// Construct Benchmarker with a vector of AC clients and a NodeDebugClient. pub fn new(clients: Vec<AdmissionControlClient>, stagger_range_ms: u16) -> Self { if clients.is_empty() { panic!("failed to create benchmarker without any AdmissionControlClient"); } let arc_clients = clients.into_iter().map(Arc::new).collect(); let prev_sequence_numbers = HashMap::new(); Benchmarker { clients: arc_clients, stagger_range_ms, prev_sequence_numbers, } } /// -------------------------------------------------------------------- /// /// Benchmark setup: Load faucet account and minting APIs and helpers. /// /// -------------------------------------------------------------------- /// /// Load keypair from given faucet_account_path, /// then try to sync with a validator to get up-to-date faucet account's sequence number. /// Why restore faucet account: Benchmarker as a client can be stopped/restarted repeatedly /// while the libra swarm as a server keeping running. fn load_faucet_account(&self, faucet_account_path: &str) -> AccountData { let faucet_account_keypair: KeyPair = load_key_from_file(faucet_account_path).expect("invalid faucet keypair file"); let address = association_address(); // Request and wait for account's (sequence_number, account_status) from a validator. // Assume Benchmarker is the ONLY active client in the libra network. let client = self .clients .get(0) .expect("no available AdmissionControlClient"); let states = get_account_states(client, &[address]); let (sequence_number, status) = states .get(&address) .expect("failed to get faucet account from validator"); assert_eq!(status, &AccountStatus::Persisted); AccountData { address, key_pair: Some(faucet_account_keypair), sequence_number: *sequence_number, status: status.clone(), } } /// Minting given accounts using self's AC client(s). /// Mint TXNs must be 100% successful in order to continue benchmark. /// Therefore mint_accounts() will panic when any mint TXN is not accepted or fails. /// Known issue: Minting opereations from two different Benchmarker instances /// will fail because they are sharing the same faucet account. pub fn mint_accounts(&mut self, mint_key_file_path: &str, accounts: &[AccountData]) { let mut faucet_account = self.load_faucet_account(mint_key_file_path); self.prev_sequence_numbers .insert(faucet_account.address, faucet_account.sequence_number); for account in accounts.iter() { self.prev_sequence_numbers .insert(account.address, account.sequence_number); } let mint_requests = gen_mint_txn_requests(&mut faucet_account, accounts); // Disable client staggering for mint operations. let stagger_range_ms = self.stagger_range_ms; self.stagger_range_ms = 1; let (num_accepted, num_committed, _, _) = self.submit_and_wait_txn_committed(&mint_requests, &mut [faucet_account]); self.stagger_range_ms = stagger_range_ms; // We stop immediately if any minting fails. if num_accepted != mint_requests.len() || num_accepted - num_committed > 0 { panic!( "{} of {} mint transaction(s) accepted, and {} failed", num_accepted, mint_requests.len(), num_accepted - num_committed, ) } } /// ----------------------------------------------------------------- /// /// Transaction submission and waiting for commit APIs and helpers. /// /// ----------------------------------------------------------------- /// /// Put client to sleep for a random duration before submitting TXN requests. /// Return how long the client is scheduled to be delayed. fn stagger_client(stagger_range_ms: u16) -> u16 { let mut rng = rand::thread_rng(); // Double check the upper bound value to be no less than 1. let duration = rng.gen_range(0, std::cmp::max(1, stagger_range_ms)); thread::sleep(time::Duration::from_millis(u64::from(duration))); duration } /// Send requests to AC async, wait for responses from AC. /// Return #accepted TXNs and submission duration. pub fn submit_txns(&mut self, txn_reqs: &[SubmitTransactionRequest]) -> (usize, u128) { let txn_req_chunks = divide_items(txn_reqs, self.clients.len()); let now = time::Instant::now(); // Zip txn_req_chunks with clients: when first iter returns none, // zip will short-circuit and next will not be called on the second iter. let children: Vec<thread::JoinHandle<_>> = txn_req_chunks .zip(self.clients.iter().cycle()) .map(|(chunk, client)| { let local_chunk = Vec::from(chunk); let local_client = Arc::clone(client); let stagger_range_ms = self.stagger_range_ms; // Spawn threads with corresponding client. thread::spawn( // Dispatch TXN requests to client and submit, return the list of responses // that are accepted by AC, and how long the client is delayed. move || -> (Vec<ProtoSubmitTransactionResponse>, u16) { let delay_duration_ms = Self::stagger_client(stagger_range_ms); info!( "Dispatch a chunk of {} requests to client and start to submit after staggered {} ms.", local_chunk.len(), delay_duration_ms, ); (submit_and_wait_txn_requests(&local_client, &local_chunk), delay_duration_ms) }, ) }) .collect(); // Wait for threads and gather reponses. // TODO: Group response by error type and report staticstics. let mut txn_resps: Vec<ProtoSubmitTransactionResponse> = vec![]; let mut delay_duration_ms = self.stagger_range_ms; for child in children { let resp_tuple = child.join().expect("failed to join a request thread"); txn_resps.extend(resp_tuple.0.into_iter()); // Start counting time as soon as the first client starts to submit TXNs. delay_duration_ms = std::cmp::min(delay_duration_ms, resp_tuple.1); } let mut request_duration_ms = now.elapsed().as_millis(); // Calling stagger_client() should ensure delay duration strictly < self.stagger_range_ms. if delay_duration_ms < self.stagger_range_ms { request_duration_ms -= u128::from(delay_duration_ms); } info!( "Submitted and accepted {} TXNs within {} ms.", txn_resps.len(), request_duration_ms, ); (txn_resps.len(), request_duration_ms) } /// Wait for accepted TXNs to commit or time out: for any account, if its sequence number /// (bumpped during TXN generation) equals the one synchronized from validator, /// denoted as sync sequence number, then all its TXNs are committed. /// Return senders' most up-to-date sync sequence numbers and how long we have waited. pub fn wait_txns(&self, senders: &[AccountData]) -> (HashMap<AccountAddress, u64>, u128) { let account_chunks = divide_items(senders, self.clients.len()); let now = time::Instant::now(); let children: Vec<thread::JoinHandle<HashMap<_, _>>> = account_chunks .zip(self.clients.iter().cycle()) .map(|(chunk, client)| { let local_chunk = Vec::from(chunk); let local_client = Arc::clone(client); info!( "Dispatch a chunk of {} accounts to client.", local_chunk.len() ); thread::spawn(move || -> HashMap<AccountAddress, u64> { sync_account_sequence_number(&local_client, &local_chunk) }) }) .collect(); let mut sequence_numbers: HashMap<AccountAddress, u64> = HashMap::new(); for child in children { let sequence_number_chunk = child.join().expect("failed to join a wait thread"); sequence_numbers.extend(sequence_number_chunk); } let wait_duration_ms = now.elapsed().as_millis(); info!("Waited for TXNs for {} ms", wait_duration_ms); (sequence_numbers, wait_duration_ms) } /// -------------------------------------------------- /// /// Transaction playing, throughput measureing APIs. /// /// -------------------------------------------------- /// /// With the previous stored sequence number (e.g. self.prev_sequence_numbers) /// and the synchronized sequence number from validator, calculate how many TXNs are committed. /// Update both senders sequence numbers and self.prev_sequence_numbers to the just-queried /// synchrnized sequence numbers. Return (#committed, #uncommitted) TXNs. /// Reason to backtrace sender's sequence number: /// If some of sender's TXNs are not committed because they are rejected by AC, /// we should use the synchronized sequence number in future TXN generation. /// On the other hand, if sender's TXNs are accepted but just waiting to be committed, /// part of the newly generated TXNs will be rejected by AC due to old sequence number, /// but eventually local account's sequence number will be new enough to get accepted. fn check_txn_results( &mut self, senders: &mut [AccountData], sync_sequence_numbers: &HashMap<AccountAddress, u64>, ) -> (usize, usize) { let mut committed_txns = 0; let mut uncommitted_txns = 0; // Invariant for any account X in Benchmarker: // 1) X's current persisted sequence number (X.sequence_number) >= // X's synchronized sequence number (sync_sequence_number[X]) // 2) X's current persisted sequence number (X.sequence_number) >= // X's previous persisted sequence number (self.prev_sequence_numbers[X]) for sender in senders.iter_mut() { let prev_sequence_number = self .prev_sequence_numbers .get_mut(&sender.address) .expect("Sender doesn't exist in Benchmark environment"); let sync_sequence_number = sync_sequence_numbers .get(&sender.address) .expect("Sender doesn't exist in validators"); assert!(sender.sequence_number >= *sync_sequence_number); assert!(*sync_sequence_number >= *prev_sequence_number); if sender.sequence_number > *sync_sequence_number { error!("Account {:?} has uncommitted TXNs", sender.address); } committed_txns += *sync_sequence_number - *prev_sequence_number; uncommitted_txns += sender.sequence_number - *sync_sequence_number; *prev_sequence_number = *sync_sequence_number; sender.sequence_number = *sync_sequence_number; } info!( "#committed TXNs = {}, #uncommitted TXNs = {}", committed_txns, uncommitted_txns ); let committed_txns_usize = committed_txns .try_into() .expect("Unable to convert u64 to usize"); let uncommitted_txns_usize = uncommitted_txns .try_into() .expect("Unable to convert u64 to usize"); OP_COUNTER.inc_by("committed_txns", committed_txns_usize); OP_COUNTER.inc_by("timedout_txns", uncommitted_txns_usize); (committed_txns_usize, uncommitted_txns_usize) } /// Implement the general way to submit TXNs to Libra and then /// wait for all accepted ones to become committed. /// Return (#accepted TXNs, #committed TXNs, submit duration, wait duration). pub fn submit_and_wait_txn_committed( &mut self, txn_reqs: &[SubmitTransactionRequest], senders: &mut [AccountData], ) -> (usize, usize, u128, u128) { let (num_txns_accepted, submit_duration_ms) = self.submit_txns(txn_reqs); let (sync_sequence_numbers, wait_duration_ms) = self.wait_txns(senders); let (num_committed, _) = self.check_txn_results(senders, &sync_sequence_numbers); ( num_txns_accepted, num_committed, submit_duration_ms, wait_duration_ms, ) } /// Calcuate average committed transactions per second. fn calculate_throughput(num_txns: usize, duration_ms: u128, prefix: &str) -> f64 { assert!(duration_ms > 0); let throughput = num_txns as f64 * 1000f64 / duration_ms as f64; info!( "{} throughput est = {} txns / {} ms = {:.2} rps.", prefix, num_txns, duration_ms, throughput, ); throughput } /// Similar to submit_and_wait_txn_committed but with timing. /// How given TXNs are played and how time durations (submission, commit and running) /// are defined are illustrated as follows: /// t_submit AC responds all requests /// |==============================================>| /// t_commit (unable to measure) Storage stores all committed TXNs /// |========================================================>| /// t_run 1 epoch of measuring finishes. /// |===========================================================>| /// Estimated TXN throughput from user perspective = #TXN / t_run. /// Estimated request throughput = #TXN / t_submit. /// Estimated TXN throughput internal to libra = #TXN / t_commit, not measured by this API. /// Return request througnhput and TXN throughput. pub fn measure_txn_throughput( &mut self, txn_reqs: &[SubmitTransactionRequest], senders: &mut [AccountData], ) -> (f64, f64) { let (_, num_committed, submit_duration_ms, wait_duration_ms) = self.submit_and_wait_txn_committed(txn_reqs, senders); let request_throughput = Self::calculate_throughput(txn_reqs.len(), submit_duration_ms, "REQ"); let running_duration_ms = submit_duration_ms + wait_duration_ms; let txn_throughput = Self::calculate_throughput(num_committed, running_duration_ms, "TXN"); OP_COUNTER.set("submit_duration_ms", submit_duration_ms as usize); OP_COUNTER.set("wait_duration_ms", wait_duration_ms as usize); OP_COUNTER.set("running_duration_ms", running_duration_ms as usize); OP_COUNTER.set("request_throughput", request_throughput as usize); OP_COUNTER.set("txn_throughput", txn_throughput as usize); (request_throughput, txn_throughput) } }
49.505587
115
0.627659
db80bcfbf301ad544fd5342ac0c5644c5e8cfd35
357
//use rocket::request::Form; use rocket::data::TempFile; //* thread = UID of thread being commented to #[derive(FromForm)] pub struct NewComment<'f> { pub thread: String, pub attachment: TempFile<'f>, pub content: String, } #[derive(FromForm)] pub struct NewThread<'f> { pub title: String, pub attachment: TempFile<'f>, pub content: String, }
19.833333
45
0.689076
d7b152c4b2415f463be733f6c13a84f03663e369
2,128
use std::net::TcpStream; use imap::Session; use native_tls::TlsStream; use super::{Mail, mail::MailServer}; /// Generic email client able to receive and send mails pub struct ImapClient { mail_server: MailServer, current_mail: Option<imap::types::Mailbox>, session: Option<Session<TlsStream<TcpStream>>>, } impl ImapClient { pub fn new() -> Self { Self{ mail_server: MailServer::Unknown, session: None, current_mail: None, } } pub fn retrieve_mailboxes(&mut self) -> Vec<String> { let mailboxes = self.session.as_mut().unwrap() .list(Some("*"), Some("*")).unwrap(); mailboxes.into_iter().map(|name| name.name().to_string()).collect() } /// Select mail box pub fn select_mailbox(&mut self, mail_name: &str) { let mailbox = self.session.as_mut().unwrap().select(mail_name).unwrap(); self.current_mail = Some(mailbox); } /// Read all mails in current inbox pub fn read_mails(&mut self) -> Vec<Mail> { // RFC 822 dictates the format of the body of e-mails let last_message_id = self.current_mail.as_ref().unwrap().exists; let sequence = format!("{}:{}", last_message_id-20, last_message_id); let messages = self.session.as_mut().unwrap().fetch(sequence, "RFC822").unwrap(); let mails = messages.into_iter() .map(|message| message.body().expect("No body...")) .map(|body| Mail::parse(body)) .collect(); mails } pub fn connect(&mut self, email: String, password: String, hostname: String) { let tls = native_tls::TlsConnector::builder() .build() .unwrap(); self.mail_server = MailServer::Imap(hostname.clone()); let client = imap::connect(self.mail_server.address(), hostname, &tls) .unwrap(); let imap_session = client.login(email, password) .unwrap(); self.session = Some(imap_session); } pub fn disconnect(&mut self) { self.session.as_mut().unwrap().logout().unwrap(); } }
30.4
89
0.597744
d5637c0d99bde16f4dc76d7750b136be96c578c8
1,233
use std::collections::HashMap; struct Solution {} impl Solution { pub fn four_sum_count(nums1: Vec<i32>, nums2: Vec<i32>, nums3: Vec<i32>, nums4: Vec<i32>) -> i32 { let mut result = 0; let mut map = HashMap::new(); for i in 0..nums1.len() { for j in 0..nums2.len() { let sum = nums1[i] + nums2[j]; let count = map.entry(sum).or_insert(0); *count += 1 } } for k in 0..nums3.len() { for l in 0..nums4.len() { let sum = nums3[k] + nums4[l]; let count = map.get(&-sum); result += count.unwrap_or(&0) } } return result; } } pub fn test() { println!("four_sum_count"); println!("{}", Solution::four_sum_count( vec![1, 2], vec![-2, -1], vec![-1, 2], vec![0, 2], ) == 2); println!("{}", Solution::four_sum_count( vec![0], vec![0], vec![0], vec![0], ) == 1); println!("{}", Solution::four_sum_count( vec![-1, 1, 1, 1, -1], vec![0, -1, -1, 0, 1], vec![-1, -1, 1, -1, -1], vec![0, 1, 0, -1, -1], ) == 132); }
25.6875
102
0.420925
11f42219a729324b675d319da167d7eaecd5634a
2,085
#[macro_use] extern crate criterion; use std::iter; use aw_ascii85::decode; use criterion::Criterion; const EXAMPLE_PLAIN: &[u8; 269] = b"Man is distinguished, not only by his reason, but by this singular passion from other animals, which is a lust of the mind, that by a perseverance of delight in the continued and indefatigable generation of knowledge, exceeds the short vehemence of any carnal pleasure."; pub fn bench_decode(c: &mut Criterion) { c.bench_function("decode-empty", |b| b.iter(|| decode(b"<~~>"))); let encoded = r#"<~9jqo^BlbD-BleB1DJ+*+F(f,q/0JhKF<GL>[email protected]$d7F!,L7@<6@)/0JDEF<G%<+EV:2F!, O<DJ+*.@<*K0@<6L(Df-\0Ec5e;DffZ(EZee.Bl.9pF"AGXBPCsi+DGm>@3BB/F*&OCAfu2/AKY i(DIb:@FD,*)+C]U=@3BN#EcYf8ATD3s@q?d$AftVqCh[NqF<G:8+EV:.+Cf>-FD5W8ARlolDIa l(DId<j@<?3r@:F%a+D58'ATD4$Bl@l3De:,-DJs`8ARoFb/0JMK@qB4^F!,R<AKZ&-DfTqBG%G >uD.RTpAKYo'+CT/5+Cei#DII?(E,9)oF*2M7/c~>"# .as_bytes(); c.bench_function("decode-example", |b| b.iter(|| decode(encoded))); let decoded: Vec<u8> = (0..u8::MAX).into_iter().cycle().take(10 * 1024 * 1024).collect(); let encoded = aw_ascii85::encode(&decoded[..]); c.bench_function("decode-10mb", |b| b.iter(|| decode(&encoded[..]))); let decoded: Vec<u8> = iter::repeat(b'z').take(10 * 1024 * 1024).collect(); let encoded = aw_ascii85::encode(&decoded[..]); c.bench_function("decode-z", |b| b.iter(|| decode(&encoded[..]))); } pub fn bench_encode(c: &mut Criterion) { c.bench_function("encode-empty", |b| b.iter(|| decode(b""))); c.bench_function("encode-example", |b| b.iter(|| decode(&EXAMPLE_PLAIN[..]))); let decoded: Vec<u8> = (0..u8::MAX).into_iter().cycle().take(10 * 1024 * 1024).collect(); c.bench_function("encode-10mb", |b| b.iter(|| decode(&decoded[..]))); let decoded: Vec<u8> = iter::repeat(0_u8).take(10 * 1024 * 1024).collect(); c.bench_function("encode-z", |b| b.iter(|| decode(&decoded[..]))); } criterion_group!(group_decode, bench_decode); criterion_group!(group_encode, bench_encode); criterion_main!(group_decode, group_encode);
44.361702
307
0.655156
62d00dacb6f867ffc659eea96f09267a49d74e4d
169
extern crate bytes; extern crate chrono; extern crate digest; extern crate scutiger_core; extern crate sha2; extern crate tempfile; pub mod backend; pub mod processor;
16.9
27
0.804734
569fe63813ec8d56639a0221a5d53676db4d63fb
1,392
use std::io; // ========================================================================= // #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum BlockType { Verbatim, AlignedOffset, Uncompressed, } impl BlockType { pub fn from_bits(bits: u32) -> io::Result<BlockType> { match bits { 1 => Ok(BlockType::Verbatim), 2 => Ok(BlockType::AlignedOffset), 3 => Ok(BlockType::Uncompressed), _ => invalid_data!("Invalid LZX block type ({})", bits), } } pub fn to_bits(&self) -> u32 { match *self { BlockType::Verbatim => 1, BlockType::AlignedOffset => 2, BlockType::Uncompressed => 3, } } } // ========================================================================= // #[cfg(test)] mod tests { use super::BlockType; #[test] #[should_panic(expected = "Invalid LZX block type (7)")] fn invalid_block_type() { BlockType::from_bits(7).unwrap(); } #[test] fn round_trip() { let btypes = &[ BlockType::Verbatim, BlockType::AlignedOffset, BlockType::Uncompressed, ]; for &btype in btypes { assert_eq!(BlockType::from_bits(btype.to_bits()).unwrap(), btype); } } } // ========================================================================= //
24.857143
79
0.442529
87efb02d8a4aebd63ba9ea06059771b724fa119d
72,513
use super::{ super::{machine::Machine, registers::SP, Error}, common, extract_opcode, instruction_length, utils::update_register, Instruction, Itype, R4type, Register, Rtype, Stype, Utype, }; use ckb_vm_definitions::instructions as insts; pub fn execute_instruction<Mac: Machine>( inst: Instruction, machine: &mut Mac, ) -> Result<Option<Mac::REG>, Error> { let op = extract_opcode(inst); let next_pc: Option<Mac::REG> = match op { insts::OP_SUB => { let i = Rtype(inst); common::sub(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_SUBW => { let i = Rtype(inst); common::subw(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_ADD => { let i = Rtype(inst); common::add(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_ADDW => { let i = Rtype(inst); common::addw(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_XOR => { let i = Rtype(inst); common::xor(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_OR => { let i = Rtype(inst); common::or(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_AND => { let i = Rtype(inst); common::and(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_SLL => { let i = Rtype(inst); let shift_value = machine.registers()[i.rs2()].clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = machine.registers()[i.rs1()].clone() << shift_value; update_register(machine, i.rd(), value); None } insts::OP_SLLW => { let i = Rtype(inst); let shift_value = machine.registers()[i.rs2()].clone() & Mac::REG::from_u8(0x1F); let value = machine.registers()[i.rs1()].clone() << shift_value; update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SRL => { let i = Rtype(inst); let shift_value = machine.registers()[i.rs2()].clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = machine.registers()[i.rs1()].clone() >> shift_value; update_register(machine, i.rd(), value); None } insts::OP_SRLW => { let i = Rtype(inst); let shift_value = machine.registers()[i.rs2()].clone() & Mac::REG::from_u8(0x1F); let value = machine.registers()[i.rs1()].zero_extend(&Mac::REG::from_u8(32)) >> shift_value; update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SRA => { let i = Rtype(inst); let shift_value = machine.registers()[i.rs2()].clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = machine.registers()[i.rs1()].signed_shr(&shift_value); update_register(machine, i.rd(), value); None } insts::OP_SRAW => { let i = Rtype(inst); let shift_value = machine.registers()[i.rs2()].clone() & Mac::REG::from_u8(0x1F); let value = machine.registers()[i.rs1()] .sign_extend(&Mac::REG::from_u8(32)) .signed_shr(&shift_value); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SLT => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.lt_s(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SLTU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.lt(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_LB | insts::OP_VERSION1_LB => { let i = Itype(inst); common::lb( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LB, )?; None } insts::OP_LH | insts::OP_VERSION1_LH => { let i = Itype(inst); common::lh( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LH, )?; None } insts::OP_LW | insts::OP_VERSION1_LW => { let i = Itype(inst); common::lw( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LW, )?; None } insts::OP_LD | insts::OP_VERSION1_LD => { let i = Itype(inst); common::ld( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LD, )?; None } insts::OP_LBU | insts::OP_VERSION1_LBU => { let i = Itype(inst); common::lbu( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LBU, )?; None } insts::OP_LHU | insts::OP_VERSION1_LHU => { let i = Itype(inst); common::lhu( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LHU, )?; None } insts::OP_LWU | insts::OP_VERSION1_LWU => { let i = Itype(inst); common::lwu( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_LWU, )?; None } insts::OP_ADDI => { let i = Itype(inst); common::addi(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_ADDIW => { let i = Itype(inst); common::addiw(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_XORI => { let i = Itype(inst); common::xori(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_ORI => { let i = Itype(inst); common::ori(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_ANDI => { let i = Itype(inst); common::andi(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_SLTI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let imm_value = Mac::REG::from_i32(i.immediate_s()); let value = rs1_value.lt_s(&imm_value); update_register(machine, i.rd(), value); None } insts::OP_SLTIU => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let imm_value = Mac::REG::from_i32(i.immediate_s()); let value = rs1_value.lt(&imm_value); update_register(machine, i.rd(), value); None } insts::OP_JALR | insts::OP_VERSION1_JALR => { let i = Itype(inst); let link = machine.pc().overflowing_add(&Mac::REG::from_u8(4)); if op == insts::OP_JALR { update_register(machine, i.rd(), link.clone()); } let mut next_pc = machine.registers()[i.rs1()].overflowing_add(&Mac::REG::from_i32(i.immediate_s())); next_pc = next_pc & (!Mac::REG::one()); if op != insts::OP_JALR { update_register(machine, i.rd(), link); } Some(next_pc) } insts::OP_SLLI => { let i = Itype(inst); common::slli(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_SRLI => { let i = Itype(inst); common::srli(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_SRAI => { let i = Itype(inst); common::srai(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_SLLIW => { let i = Itype(inst); common::slliw(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_SRLIW => { let i = Itype(inst); common::srliw(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_SRAIW => { let i = Itype(inst); common::sraiw(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_SB => { let i = Stype(inst); common::sb(machine, i.rs1(), i.rs2(), i.immediate_s())?; None } insts::OP_SH => { let i = Stype(inst); common::sh(machine, i.rs1(), i.rs2(), i.immediate_s())?; None } insts::OP_SW => { let i = Stype(inst); common::sw(machine, i.rs1(), i.rs2(), i.immediate_s())?; None } insts::OP_SD => { let i = Stype(inst); common::sd(machine, i.rs1(), i.rs2(), i.immediate_s())?; None } insts::OP_BEQ => { let i = Stype(inst); let pc = machine.pc(); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let condition = rs1_value.eq(&rs2_value); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(4).overflowing_add(&pc), ); Some(new_pc) } insts::OP_BNE => { let i = Stype(inst); let pc = machine.pc(); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let condition = rs1_value.ne(&rs2_value); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(4).overflowing_add(&pc), ); Some(new_pc) } insts::OP_BLT => { let i = Stype(inst); let pc = machine.pc(); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let condition = rs1_value.lt_s(&rs2_value); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(4).overflowing_add(&pc), ); Some(new_pc) } insts::OP_BGE => { let i = Stype(inst); let pc = machine.pc(); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let condition = rs1_value.ge_s(&rs2_value); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(4).overflowing_add(&pc), ); Some(new_pc) } insts::OP_BLTU => { let i = Stype(inst); let pc = machine.pc(); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let condition = rs1_value.lt(&rs2_value); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(4).overflowing_add(&pc), ); Some(new_pc) } insts::OP_BGEU => { let i = Stype(inst); let pc = machine.pc(); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let condition = rs1_value.ge(&rs2_value); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(4).overflowing_add(&pc), ); Some(new_pc) } insts::OP_LUI => { let i = Utype(inst); update_register(machine, i.rd(), Mac::REG::from_i32(i.immediate_s())); None } insts::OP_AUIPC => { let i = Utype(inst); let value = machine .pc() .overflowing_add(&Mac::REG::from_i32(i.immediate_s())); update_register(machine, i.rd(), value); None } insts::OP_ECALL => { // The semantic of ECALL is determined by the hardware, which // is not part of the spec, hence here the implementation is // deferred to the machine. This way custom ECALLs might be // provided for different environments. machine.ecall()?; None } insts::OP_EBREAK => { machine.ebreak()?; None } insts::OP_FENCEI => None, insts::OP_FENCE => None, insts::OP_JAL => { let i = Utype(inst); common::jal(machine, i.rd(), i.immediate_s(), 4) } insts::OP_MUL => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_mul(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_MULW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value .zero_extend(&Mac::REG::from_u8(32)) .overflowing_mul(&rs2_value.zero_extend(&Mac::REG::from_u8(32))); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_MULH => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_mul_high_signed(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_MULHSU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_mul_high_signed_unsigned(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_MULHU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_mul_high_unsigned(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_DIV => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_div_signed(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_DIVW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_value = rs1_value.sign_extend(&Mac::REG::from_u8(32)); let rs2_value = rs2_value.sign_extend(&Mac::REG::from_u8(32)); let value = rs1_value.overflowing_div_signed(&rs2_value); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_DIVU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_div(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_DIVUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_value = rs1_value.zero_extend(&Mac::REG::from_u8(32)); let rs2_value = rs2_value.zero_extend(&Mac::REG::from_u8(32)); let value = rs1_value.overflowing_div(&rs2_value); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_REM => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_rem_signed(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_REMW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_value = rs1_value.sign_extend(&Mac::REG::from_u8(32)); let rs2_value = rs2_value.sign_extend(&Mac::REG::from_u8(32)); let value = rs1_value.overflowing_rem_signed(&rs2_value); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_REMU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.overflowing_rem(&rs2_value); update_register(machine, i.rd(), value); None } insts::OP_REMUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_value = rs1_value.zero_extend(&Mac::REG::from_u8(32)); let rs2_value = rs2_value.zero_extend(&Mac::REG::from_u8(32)); let value = rs1_value.overflowing_rem(&rs2_value); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_RVC_SUB => { let i = Rtype(inst); common::sub(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_RVC_ADD => { let i = Rtype(inst); common::add(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_RVC_XOR => { let i = Rtype(inst); common::xor(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_RVC_OR => { let i = Rtype(inst); common::or(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_RVC_AND => { let i = Rtype(inst); common::and(machine, i.rd(), i.rs1(), i.rs2()); None } // > C.SUBW (RV64/128; RV32 RES) insts::OP_RVC_SUBW => { let i = Rtype(inst); common::subw(machine, i.rd(), i.rs1(), i.rs2()); None } // > C.ADDW (RV64/128; RV32 RES) insts::OP_RVC_ADDW => { let i = Rtype(inst); common::addw(machine, i.rd(), i.rs1(), i.rs2()); None } insts::OP_RVC_ADDI => { let i = Itype(inst); common::addi(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_RVC_ANDI => { let i = Itype(inst); common::andi(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_RVC_ADDIW => { let i = Itype(inst); common::addiw(machine, i.rd(), i.rs1(), i.immediate_s()); None } insts::OP_RVC_SLLI => { let i = Itype(inst); common::slli(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_RVC_SRLI => { let i = Itype(inst); common::srli(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_RVC_SRAI => { let i = Itype(inst); common::srai(machine, i.rd(), i.rs1(), i.immediate()); None } insts::OP_RVC_LW | insts::OP_VERSION1_RVC_LW => { let i = Itype(inst); common::lw( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_RVC_LW, )?; None } insts::OP_RVC_LD | insts::OP_VERSION1_RVC_LD => { let i = Itype(inst); common::ld( machine, i.rd(), i.rs1(), i.immediate_s(), op == insts::OP_RVC_LD, )?; None } insts::OP_RVC_SW => { let i = Stype(inst); common::sw(machine, i.rs1(), i.rs2(), i.immediate_s())?; None } insts::OP_RVC_SD => { let i = Stype(inst); common::sd(machine, i.rs1(), i.rs2(), i.immediate_s())?; None } insts::OP_RVC_LI => { let i = Utype(inst); update_register(machine, i.rd(), Mac::REG::from_i32(i.immediate_s())); None } insts::OP_RVC_LUI => { let i = Utype(inst); update_register(machine, i.rd(), Mac::REG::from_i32(i.immediate_s())); None } insts::OP_RVC_ADDI4SPN => { let i = Utype(inst); let value = machine.registers()[SP].overflowing_add(&Mac::REG::from_u32(i.immediate())); update_register(machine, i.rd(), value); None } insts::OP_RVC_LWSP | insts::OP_VERSION1_RVC_LWSP => { let i = Utype(inst); common::lw( machine, i.rd(), SP, i.immediate_s(), op == insts::OP_RVC_LWSP, )?; None } insts::OP_RVC_LDSP | insts::OP_VERSION1_RVC_LDSP => { let i = Utype(inst); common::ld( machine, i.rd(), SP, i.immediate_s(), op == insts::OP_RVC_LDSP, )?; None } insts::OP_RVC_SWSP => { let i = Stype(inst); common::sw(machine, SP, i.rs2(), i.immediate_s())?; None } insts::OP_RVC_SDSP => { let i = Stype(inst); common::sd(machine, SP, i.rs2(), i.immediate_s())?; None } insts::OP_RVC_BEQZ => { let i = Stype(inst); let pc = machine.pc(); let condition = machine.registers()[i.rs1()].eq(&Mac::REG::zero()); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(2).overflowing_add(&pc), ); Some(new_pc) } insts::OP_RVC_BNEZ => { let i = Stype(inst); let pc = machine.pc(); let condition = machine.registers()[i.rs1()] .eq(&Mac::REG::zero()) .logical_not(); let new_pc = condition.cond( &Mac::REG::from_i32(i.immediate_s()).overflowing_add(&pc), &Mac::REG::from_u8(2).overflowing_add(&pc), ); Some(new_pc) } insts::OP_RVC_MV => { let i = Rtype(inst); let value = machine.registers()[i.rs2()].clone(); update_register(machine, i.rd(), value); None } insts::OP_RVC_JAL => { let i = Utype(inst); common::jal(machine, 1, i.immediate_s(), 2) } insts::OP_RVC_J => { let i = Utype(inst); Some( machine .pc() .overflowing_add(&Mac::REG::from_i32(i.immediate_s())), ) } insts::OP_RVC_JR => { let i = Stype(inst); let mut next_pc = machine.registers()[i.rs1()].clone(); next_pc = next_pc & (!Mac::REG::one()); Some(next_pc) } insts::OP_RVC_JALR | insts::OP_VERSION1_RVC_JALR => { let i = Stype(inst); let link = machine.pc().overflowing_add(&Mac::REG::from_u8(2)); if op == insts::OP_RVC_JALR { update_register(machine, 1, link.clone()); } let mut next_pc = machine.registers()[i.rs1()].clone(); next_pc = next_pc & (!Mac::REG::one()); if op != insts::OP_RVC_JALR { update_register(machine, 1, link); } Some(next_pc) } insts::OP_RVC_ADDI16SP => { let i = Itype(inst); let value = machine.registers()[SP].overflowing_add(&Mac::REG::from_i32(i.immediate_s())); update_register(machine, SP, value); None } insts::OP_RVC_SRLI64 => None, insts::OP_RVC_SRAI64 => None, insts::OP_RVC_SLLI64 => None, insts::OP_RVC_NOP => None, insts::OP_RVC_EBREAK => { machine.ebreak()?; None } insts::OP_CLZ => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = rs1_value.clz(); update_register(machine, i.rd(), value); None } insts::OP_CLZW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = rs1_value .zero_extend(&Mac::REG::from_u8(32)) .clz() .overflowing_sub(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_CTZ => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = rs1_value.ctz(); update_register(machine, i.rd(), value); None } insts::OP_CTZW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = (rs1_value.clone() | Mac::REG::from_u64(0xffff_ffff_0000_0000)).ctz(); update_register(machine, i.rd(), value); None } insts::OP_PCNT => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = rs1_value.pcnt(); update_register(machine, i.rd(), value); None } insts::OP_PCNTW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = rs1_value.zero_extend(&Mac::REG::from_u8(32)).pcnt(); update_register(machine, i.rd(), value); None } insts::OP_ANDN => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.clone() & !rs2_value.clone(); update_register(machine, i.rd(), value); None } insts::OP_ORN => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.clone() | !rs2_value.clone(); update_register(machine, i.rd(), value); None } insts::OP_XNOR => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.clone() ^ !rs2_value.clone(); update_register(machine, i.rd(), value); None } insts::OP_PACK => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let xlen_half = Mac::REG::from_u8(Mac::REG::BITS / 2); let upper = rs2_value.clone() << xlen_half.clone(); let lower = rs1_value.clone() << xlen_half.clone() >> xlen_half; let value = upper | lower; update_register(machine, i.rd(), value); None } insts::OP_PACKU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let xlen_half = Mac::REG::from_u8(Mac::REG::BITS / 2); let upper = rs2_value.clone() >> xlen_half.clone() << xlen_half.clone(); let lower = rs1_value.clone() >> xlen_half; let value = upper | lower; update_register(machine, i.rd(), value); None } insts::OP_PACKH => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let upper = (rs2_value.clone() & Mac::REG::from_u8(0xff)) << Mac::REG::from_u8(8); let lower = rs1_value.clone() & Mac::REG::from_u8(0xff); let value = upper | lower; update_register(machine, i.rd(), value); None } insts::OP_PACKW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let upper = rs2_value.clone() << Mac::REG::from_u8(16); let lower = rs1_value.zero_extend(&Mac::REG::from_u8(16)); let value = (upper | lower).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_PACKUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let upper = rs2_value.sign_extend(&Mac::REG::from_u8(32)) & Mac::REG::from_u64(0xffff_ffff_ffff_0000); let lower = rs1_value.clone() << Mac::REG::from_u8(32) >> Mac::REG::from_u8(48); let value = upper | lower; update_register(machine, i.rd(), value); None } insts::OP_MIN => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.lt_s(&rs2_value).cond(&rs1_value, &rs2_value); update_register(machine, i.rd(), value); None } insts::OP_MINU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.lt(&rs2_value).cond(&rs1_value, &rs2_value); update_register(machine, i.rd(), value); None } insts::OP_MAX => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.ge_s(&rs2_value).cond(&rs1_value, &rs2_value); update_register(machine, i.rd(), value); None } insts::OP_MAXU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value.ge(&rs2_value).cond(&rs1_value, &rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SEXTB => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let shift = &Mac::REG::from_u8(Mac::REG::BITS - 8); let value = rs1_value.signed_shl(shift).signed_shr(shift); update_register(machine, i.rd(), value); None } insts::OP_SEXTH => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let shift = &Mac::REG::from_u8(Mac::REG::BITS - 16); let value = rs1_value.signed_shl(shift).signed_shr(shift); update_register(machine, i.rd(), value); None } insts::OP_SBSET => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.clone() | (Mac::REG::one() << shamt); update_register(machine, i.rd(), value); None } insts::OP_SBSETI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.clone() | (Mac::REG::one() << shamt); update_register(machine, i.rd(), value); None } insts::OP_SBSETW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.clone() | (Mac::REG::one() << shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SBSETIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.clone() | (Mac::REG::one() << shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SBCLR => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.clone() & !(Mac::REG::one() << shamt); update_register(machine, i.rd(), value); None } insts::OP_SBCLRI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.clone() & !(Mac::REG::one() << shamt); update_register(machine, i.rd(), value); None } insts::OP_SBCLRW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.clone() & Mac::REG::from_u64(0xffff_fffe_ffff_fffe).rol(&shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SBCLRIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.clone() & Mac::REG::from_u64(0xffff_fffe_ffff_fffe).rol(&shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SBINV => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.clone() ^ (Mac::REG::one() << shamt); update_register(machine, i.rd(), value); None } insts::OP_SBINVI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.clone() ^ (Mac::REG::one() << shamt); update_register(machine, i.rd(), value); None } insts::OP_SBINVW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.clone() ^ (Mac::REG::one() << shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SBINVIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.clone() ^ (Mac::REG::one() << shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SBEXT => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = Mac::REG::one() & (rs1_value.clone() >> shamt); update_register(machine, i.rd(), value); None } insts::OP_SBEXTI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = Mac::REG::one() & (rs1_value.clone() >> shamt); update_register(machine, i.rd(), value); None } insts::OP_SBEXTW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = Mac::REG::one() & (rs1_value.clone() >> shamt); update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SLO => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.slo(&shamt); update_register(machine, i.rd(), value); None } insts::OP_SLOI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.slo(&shamt); update_register(machine, i.rd(), value); None } insts::OP_SLOW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.slo(&shamt).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_SLOIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = rs1_value.slo(&shamt).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_SRO => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.sro(&shamt); update_register(machine, i.rd(), value); None } insts::OP_SROI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.sro(&shamt); update_register(machine, i.rd(), value); None } insts::OP_SROW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = (rs1_value.clone() | Mac::REG::from_u64(0xffff_ffff_0000_0000)) >> shamt; update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_SROIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let value = (rs1_value.clone() | Mac::REG::from_u64(0xffff_ffff_0000_0000)) >> shamt; update_register(machine, i.rd(), value.sign_extend(&Mac::REG::from_u8(32))); None } insts::OP_ROR => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.ror(&shamt); update_register(machine, i.rd(), value); None } insts::OP_RORI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.ror(&shamt); update_register(machine, i.rd(), value); None } insts::OP_RORW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let twins = rs1_value .zero_extend(&Mac::REG::from_u8(32)) .overflowing_mul(&Mac::REG::from_u64(0x_0000_0001_0000_0001)); let value = twins.ror(&shamt).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_RORIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.immediate()); let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let twins = rs1_value .zero_extend(&Mac::REG::from_u8(32)) .overflowing_mul(&Mac::REG::from_u64(0x_0000_0001_0000_0001)); let value = twins.ror(&shamt).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_ROL => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_value.rol(&shamt); update_register(machine, i.rd(), value); None } insts::OP_ROLW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let shamt = rs2_value.clone() & Mac::REG::from_u8(31); let twins = rs1_value .zero_extend(&Mac::REG::from_u8(32)) .overflowing_mul(&Mac::REG::from_u64(0x_0000_0001_0000_0001)); let value = twins.rol(&shamt).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_GREV => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::grev32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::grev64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_GREVI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = i.immediate(); let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::grev32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::grev64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_GREVW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::grev32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_GREVIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = i.immediate(); let value = common::grev32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_SHFL => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::shfl32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::shfl64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_UNSHFL => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::unshfl32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::unshfl64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_SHFLI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = i.immediate(); let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::shfl32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::shfl64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_UNSHFLI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = i.immediate(); let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::unshfl32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::unshfl64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_SHFLW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::shfl32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_UNSHFLW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::unshfl32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_GORC => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::gorc32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::gorc64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_GORCI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = i.immediate(); let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::gorc32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::gorc64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_GORCW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::gorc32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_GORCIW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = i.immediate(); let value = common::gorc32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_BFP => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::bfp32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::bfp64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_BFPW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::bfp32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_BEXT => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::bext32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::bext64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_BEXTW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::bext32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_BDEP => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::bdep32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::bdep64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_BDEPW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::bdep32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_CLMUL => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::clmul32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::clmul64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_CLMULW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::clmul32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_CLMULH => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::clmulh32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::clmulh64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_CLMULHW => { let i = Rtype(inst); let rs1_value = machine.registers()[i.rs1()].to_u32(); let rs2_value = machine.registers()[i.rs2()].to_u32(); let value = common::clmulh32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_CLMULR => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::clmulr32(rs1_value.to_u32(), rs2_value.to_u32())) } else { Mac::REG::from_u64(common::clmulr64(rs1_value.to_u64(), rs2_value.to_u64())) }; update_register(machine, i.rd(), value); None } insts::OP_CLMULRW => { let i = Rtype(inst); let rs1_value = machine.registers()[i.rs1()].to_u32(); let rs2_value = machine.registers()[i.rs2()].to_u32(); let value = common::clmulr32(rs1_value.to_u32(), rs2_value.to_u32()); let r = Mac::REG::from_u32(value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), r); None } insts::OP_CRC32B => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc3232(rs1_value.to_u32(), 8)) } else { Mac::REG::from_u64(common::crc3264(rs1_value.to_u64(), 8)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32H => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc3232(rs1_value.to_u32(), 16)) } else { Mac::REG::from_u64(common::crc3264(rs1_value.to_u64(), 16)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32W => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc3232(rs1_value.to_u32(), 32)) } else { Mac::REG::from_u64(common::crc3264(rs1_value.to_u64(), 32)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32CB => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc32c32(rs1_value.to_u32(), 8)) } else { Mac::REG::from_u64(common::crc32c64(rs1_value.to_u64(), 8)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32CH => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc32c32(rs1_value.to_u32(), 16)) } else { Mac::REG::from_u64(common::crc32c64(rs1_value.to_u64(), 16)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32CW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc32c32(rs1_value.to_u32(), 32)) } else { Mac::REG::from_u64(common::crc32c64(rs1_value.to_u64(), 32)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32D => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc3232(rs1_value.to_u32(), 64)) } else { Mac::REG::from_u64(common::crc3264(rs1_value.to_u64(), 64)) }; update_register(machine, i.rd(), value); None } insts::OP_CRC32CD => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = if Mac::REG::BITS == 32 { Mac::REG::from_u32(common::crc32c32(rs1_value.to_u32(), 64)) } else { Mac::REG::from_u64(common::crc32c64(rs1_value.to_u64(), 64)) }; update_register(machine, i.rd(), value); None } insts::OP_BMATOR => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::bmator(rs1_value.to_u64(), rs2_value.to_u64()); update_register(machine, i.rd(), Mac::REG::from_u64(value)); None } insts::OP_BMATXOR => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = common::bmatxor(rs1_value.to_u64(), rs2_value.to_u64()); update_register(machine, i.rd(), Mac::REG::from_u64(value)); None } insts::OP_BMATFLIP => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let value = common::bmatflip(rs1_value.to_u64()); update_register(machine, i.rd(), Mac::REG::from_u64(value)); None } insts::OP_CMIX => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs3_value = &machine.registers()[i.rs3()]; let value = (rs1_value.clone() & rs2_value.clone()) | (rs3_value.clone() & !rs2_value.clone()); update_register(machine, i.rd(), value); None } insts::OP_CMOV => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs3_value = &machine.registers()[i.rs3()]; let value = rs2_value.eq(&Mac::REG::zero()).cond(rs3_value, rs1_value); update_register(machine, i.rd(), value); None } insts::OP_FSL => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs3_value = &machine.registers()[i.rs3()]; let value = rs1_value.fsl(rs3_value, rs2_value); update_register(machine, i.rd(), value); None } insts::OP_FSLW => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs3_value = &machine.registers()[i.rs3()]; let upper = rs1_value.clone() << Mac::REG::from_u8(32); let lower = rs3_value.clone().zero_extend(&Mac::REG::from_u8(32)); let value = upper | lower; let value = value.rol(rs2_value).signed_shr(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_FSR => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs3_value = &machine.registers()[i.rs3()]; let value = rs1_value.fsr(rs3_value, rs2_value); update_register(machine, i.rd(), value); None } insts::OP_FSRW => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs3_value = &machine.registers()[i.rs3()]; let upper = rs3_value.clone() << Mac::REG::from_u8(32); let lower = rs1_value.clone().zero_extend(&Mac::REG::from_u8(32)); let value = upper | lower; let value = value.ror(rs2_value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_FSRI => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let immediate = i.immediate(); let rs2_value = &Mac::REG::from_u32(immediate & 0x3f); let rs3_value = &machine.registers()[immediate as usize >> 7]; let value = rs1_value.fsr(rs3_value, rs2_value); update_register(machine, i.rd(), value); None } insts::OP_FSRIW => { let i = R4type(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_u32(i.rs2() as u32); let rs3_value = &machine.registers()[i.rs3()]; let upper = rs3_value.clone() << Mac::REG::from_u8(32); let lower = rs1_value.clone().zero_extend(&Mac::REG::from_u8(32)); let value = upper | lower; let value = value.ror(rs2_value).sign_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_SH1ADD => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = (rs1_value.clone() << Mac::REG::from_u32(1)).overflowing_add(rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SH2ADD => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = (rs1_value.clone() << Mac::REG::from_u32(2)).overflowing_add(rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SH3ADD => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = (rs1_value.clone() << Mac::REG::from_u32(3)).overflowing_add(rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SH1ADDUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_z = rs1_value.clone().zero_extend(&Mac::REG::from_u8(32)); let value = (rs1_z << Mac::REG::from_u32(1)).overflowing_add(rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SH2ADDUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_z = rs1_value.clone().zero_extend(&Mac::REG::from_u8(32)); let value = (rs1_z << Mac::REG::from_u32(2)).overflowing_add(rs2_value); update_register(machine, i.rd(), value); None } insts::OP_SH3ADDUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs1_z = rs1_value.clone().zero_extend(&Mac::REG::from_u8(32)); let value = (rs1_z << Mac::REG::from_u32(3)).overflowing_add(rs2_value); update_register(machine, i.rd(), value); None } insts::OP_ADDWU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value .overflowing_add(&rs2_value) .zero_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_SUBWU => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let value = rs1_value .overflowing_sub(&rs2_value) .zero_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_ADDIWU => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &Mac::REG::from_i32(i.immediate_s()); let value = rs1_value .overflowing_add(rs2_value) .zero_extend(&Mac::REG::from_u8(32)); update_register(machine, i.rd(), value); None } insts::OP_ADDUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs2_u = rs2_value.zero_extend(&Mac::REG::from_u8(32)); let value = rs1_value.overflowing_add(&rs2_u); update_register(machine, i.rd(), value); None } insts::OP_SUBUW => { let i = Rtype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = &machine.registers()[i.rs2()]; let rs2_u = rs2_value.zero_extend(&Mac::REG::from_u8(32)); let value = rs1_value.overflowing_sub(&rs2_u); update_register(machine, i.rd(), value); None } insts::OP_SLLIUW => { let i = Itype(inst); let rs1_value = &machine.registers()[i.rs1()]; let rs2_value = Mac::REG::from_u32(i.immediate()); let rs1_u = rs1_value.clone().zero_extend(&Mac::REG::from_u8(32)); let shamt = rs2_value & Mac::REG::from_u8(Mac::REG::SHIFT_MASK); let value = rs1_u << shamt; update_register(machine, i.rd(), value); None } insts::OP_CUSTOM_LOAD_IMM => { let i = Utype(inst); let value = Mac::REG::from_i32(i.immediate_s()); update_register(machine, i.rd(), value); None } insts::OP_UNLOADED | insts::OP_CUSTOM_TRACE_END..=255 => { return Err(Error::InvalidOp(op as u8)) } }; Ok(next_pc) } pub fn execute<Mac: Machine>(inst: Instruction, machine: &mut Mac) -> Result<(), Error> { let next_pc = execute_instruction(inst, machine)?; let default_instruction_size = instruction_length(inst); let default_next_pc = machine .pc() .overflowing_add(&Mac::REG::from_u8(default_instruction_size)); machine.set_pc(next_pc.unwrap_or(default_next_pc)); Ok(()) }
40.852394
100
0.49285
0e2f0b6666e67cc45e88d4bb10f1e6dbfab35f15
1,338
#[doc = "Register `SPINLOCK20` reader"] pub struct R(crate::R<SPINLOCK20_SPEC>); impl core::ops::Deref for R { type Target = crate::R<SPINLOCK20_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<SPINLOCK20_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<SPINLOCK20_SPEC>) -> Self { R(reader) } } #[doc = "Reading from a spinlock address will: - Return 0 if lock is already locked - Otherwise return nonzero, and simultaneously claim the lock Writing (any value) releases the lock. If core 0 and core 1 attempt to claim the same lock simultaneously, core 0 wins. The value returned on success is 0x1 << lock number. This register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api). For information about available fields see [spinlock20](index.html) module"] pub struct SPINLOCK20_SPEC; impl crate::RegisterSpec for SPINLOCK20_SPEC { type Ux = u32; } #[doc = "`read()` method returns [spinlock20::R](R) reader structure"] impl crate::Readable for SPINLOCK20_SPEC { type Reader = R; } #[doc = "`reset()` method sets SPINLOCK20 to value 0"] impl crate::Resettable for SPINLOCK20_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.857143
122
0.671151
e5d3a34b4231751d6680f6f9283a2a037456fcc8
3,692
/* * StrafesNET Public API * * ### How to use To request an api key open a support request in the [issue tracker](https://issues.strafes.net/client/index.php#/types/3/issues). ### Default API request rate limits are as follows * 100 requests per minute * 3000 requests per hour ### Game IDs ``` 1 Bhop 2 Surf ``` ### Style IDs ``` 1 Autohop 2 Scroll 3 Sideways 4 Half-Sideways 5 W-Only 6 A-Only 7 Backwards ``` * * OpenAPI spec version: 1.0.0 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ use std::rc::Rc; use std::borrow::Borrow; use std::borrow::Cow; use std::collections::HashMap; use hyper; use serde_json; use futures; use futures::{Future, Stream}; use hyper::header::UserAgent; use super::{Error, configuration}; pub struct UserApiClient<C: hyper::client::Connect> { configuration: Rc<configuration::Configuration<C>>, } impl<C: hyper::client::Connect> UserApiClient<C> { pub fn new(configuration: Rc<configuration::Configuration<C>>) -> UserApiClient<C> { UserApiClient { configuration: configuration, } } } pub trait UserApi { fn user_user_id_get(&self, user_id: i32) -> Box<Future<Item = ::models::User, Error = Error<serde_json::Value>>>; } impl<C: hyper::client::Connect>UserApi for UserApiClient<C> { fn user_user_id_get(&self, user_id: i32) -> Box<Future<Item = ::models::User, Error = Error<serde_json::Value>>> { let configuration: &configuration::Configuration<C> = self.configuration.borrow(); let mut auth_headers = HashMap::<String, String>::new(); let mut auth_query = HashMap::<String, String>::new(); if let Some(ref apikey) = configuration.api_key { let key = apikey.key.clone(); let val = match apikey.prefix { Some(ref prefix) => format!("{} {}", prefix, key), None => key, }; auth_headers.insert("api-key".to_owned(), val); }; let method = hyper::Method::Get; let query_string = { let mut query = ::url::form_urlencoded::Serializer::new(String::new()); for (key, val) in &auth_query { query.append_pair(key, val); } query.finish() }; let uri_str = format!("{}/user/{userId}?{}", configuration.base_path, query_string, userId=user_id); // TODO(farcaller): handle error // if let Err(e) = uri { // return Box::new(futures::future::err(e)); // } let mut uri: hyper::Uri = uri_str.parse().unwrap(); let mut req = hyper::Request::new(method, uri); if let Some(ref user_agent) = configuration.user_agent { req.headers_mut().set(UserAgent::new(Cow::Owned(user_agent.clone()))); } for (key, val) in auth_headers { req.headers_mut().set_raw(key, val); } // send request Box::new( configuration.client.request(req) .map_err(|e| Error::from(e)) .and_then(|resp| { let status = resp.status(); resp.body().concat2() .and_then(move |body| Ok((status, body))) .map_err(|e| Error::from(e)) }) .and_then(|(status, body)| { if status.is_success() { Ok(body) } else { Err(Error::from((status, &*body))) } }) .and_then(|body| { let parsed: Result<::models::User, _> = serde_json::from_slice(&body); parsed.map_err(|e| Error::from(e)) }) ) } }
33.563636
385
0.562568
d9a4b57574aed3c6b8afb3f5b1b7e22fad354866
9,487
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. use deno_core::error::AnyError; use deno_core::ResourceId; use deno_core::{OpState, Resource}; use serde::Deserialize; use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; use super::error::WebGpuResult; pub(crate) struct WebGpuBindGroupLayout( pub(crate) wgpu_core::id::BindGroupLayoutId, ); impl Resource for WebGpuBindGroupLayout { fn name(&self) -> Cow<str> { "webGPUBindGroupLayout".into() } } pub(crate) struct WebGpuBindGroup(pub(crate) wgpu_core::id::BindGroupId); impl Resource for WebGpuBindGroup { fn name(&self) -> Cow<str> { "webGPUBindGroup".into() } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GpuBufferBindingLayout { r#type: GpuBufferBindingType, has_dynamic_offset: bool, min_binding_size: u64, } #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] enum GpuBufferBindingType { Uniform, Storage, ReadOnlyStorage, } impl From<GpuBufferBindingType> for wgpu_types::BufferBindingType { fn from(binding_type: GpuBufferBindingType) -> Self { match binding_type { GpuBufferBindingType::Uniform => wgpu_types::BufferBindingType::Uniform, GpuBufferBindingType::Storage => { wgpu_types::BufferBindingType::Storage { read_only: false } } GpuBufferBindingType::ReadOnlyStorage => { wgpu_types::BufferBindingType::Storage { read_only: true } } } } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GpuSamplerBindingLayout { r#type: wgpu_types::SamplerBindingType, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GpuTextureBindingLayout { sample_type: GpuTextureSampleType, view_dimension: wgpu_types::TextureViewDimension, multisampled: bool, } #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] enum GpuTextureSampleType { Float, UnfilterableFloat, Depth, Sint, Uint, } impl From<GpuTextureSampleType> for wgpu_types::TextureSampleType { fn from(sample_type: GpuTextureSampleType) -> Self { match sample_type { GpuTextureSampleType::Float => { wgpu_types::TextureSampleType::Float { filterable: true } } GpuTextureSampleType::UnfilterableFloat => { wgpu_types::TextureSampleType::Float { filterable: false } } GpuTextureSampleType::Depth => wgpu_types::TextureSampleType::Depth, GpuTextureSampleType::Sint => wgpu_types::TextureSampleType::Sint, GpuTextureSampleType::Uint => wgpu_types::TextureSampleType::Uint, } } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GpuStorageTextureBindingLayout { access: GpuStorageTextureAccess, format: wgpu_types::TextureFormat, view_dimension: wgpu_types::TextureViewDimension, } #[derive(Deserialize)] #[serde(rename_all = "kebab-case")] enum GpuStorageTextureAccess { WriteOnly, } impl From<GpuStorageTextureAccess> for wgpu_types::StorageTextureAccess { fn from(access: GpuStorageTextureAccess) -> Self { match access { GpuStorageTextureAccess::WriteOnly => { wgpu_types::StorageTextureAccess::WriteOnly } } } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GpuBindGroupLayoutEntry { binding: u32, visibility: u32, #[serde(flatten)] binding_type: GpuBindingType, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] enum GpuBindingType { Buffer(GpuBufferBindingLayout), Sampler(GpuSamplerBindingLayout), Texture(GpuTextureBindingLayout), StorageTexture(GpuStorageTextureBindingLayout), } impl TryFrom<GpuBindingType> for wgpu_types::BindingType { type Error = AnyError; fn try_from( binding_type: GpuBindingType, ) -> Result<wgpu_types::BindingType, Self::Error> { let binding_type = match binding_type { GpuBindingType::Buffer(buffer) => wgpu_types::BindingType::Buffer { ty: buffer.r#type.into(), has_dynamic_offset: buffer.has_dynamic_offset, min_binding_size: std::num::NonZeroU64::new(buffer.min_binding_size), }, GpuBindingType::Sampler(sampler) => { wgpu_types::BindingType::Sampler(sampler.r#type) } GpuBindingType::Texture(texture) => wgpu_types::BindingType::Texture { sample_type: texture.sample_type.into(), view_dimension: texture.view_dimension, multisampled: texture.multisampled, }, GpuBindingType::StorageTexture(storage_texture) => { wgpu_types::BindingType::StorageTexture { access: storage_texture.access.into(), format: storage_texture.format, view_dimension: storage_texture.view_dimension, } } }; Ok(binding_type) } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct CreateBindGroupLayoutArgs { device_rid: ResourceId, label: Option<String>, entries: Vec<GpuBindGroupLayoutEntry>, } pub fn op_webgpu_create_bind_group_layout( state: &mut OpState, args: CreateBindGroupLayoutArgs, _: (), ) -> Result<WebGpuResult, AnyError> { let instance = state.borrow::<super::Instance>(); let device_resource = state .resource_table .get::<super::WebGpuDevice>(args.device_rid)?; let device = device_resource.0; let mut entries = vec![]; for entry in args.entries { entries.push(wgpu_types::BindGroupLayoutEntry { binding: entry.binding, visibility: wgpu_types::ShaderStages::from_bits(entry.visibility) .unwrap(), ty: entry.binding_type.try_into()?, count: None, // native-only }); } let descriptor = wgpu_core::binding_model::BindGroupLayoutDescriptor { label: args.label.map(Cow::from), entries: Cow::from(entries), }; gfx_put!(device => instance.device_create_bind_group_layout( device, &descriptor, std::marker::PhantomData ) => state, WebGpuBindGroupLayout) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct CreatePipelineLayoutArgs { device_rid: ResourceId, label: Option<String>, bind_group_layouts: Vec<u32>, } pub fn op_webgpu_create_pipeline_layout( state: &mut OpState, args: CreatePipelineLayoutArgs, _: (), ) -> Result<WebGpuResult, AnyError> { let instance = state.borrow::<super::Instance>(); let device_resource = state .resource_table .get::<super::WebGpuDevice>(args.device_rid)?; let device = device_resource.0; let mut bind_group_layouts = vec![]; for rid in &args.bind_group_layouts { let bind_group_layout = state.resource_table.get::<WebGpuBindGroupLayout>(*rid)?; bind_group_layouts.push(bind_group_layout.0); } let descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor { label: args.label.map(Cow::from), bind_group_layouts: Cow::from(bind_group_layouts), push_constant_ranges: Default::default(), }; gfx_put!(device => instance.device_create_pipeline_layout( device, &descriptor, std::marker::PhantomData ) => state, super::pipeline::WebGpuPipelineLayout) } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] struct GpuBindGroupEntry { binding: u32, kind: String, resource: ResourceId, offset: Option<u64>, size: Option<u64>, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct CreateBindGroupArgs { device_rid: ResourceId, label: Option<String>, layout: ResourceId, entries: Vec<GpuBindGroupEntry>, } pub fn op_webgpu_create_bind_group( state: &mut OpState, args: CreateBindGroupArgs, _: (), ) -> Result<WebGpuResult, AnyError> { let instance = state.borrow::<super::Instance>(); let device_resource = state .resource_table .get::<super::WebGpuDevice>(args.device_rid)?; let device = device_resource.0; let mut entries = vec![]; for entry in &args.entries { let e = wgpu_core::binding_model::BindGroupEntry { binding: entry.binding, resource: match entry.kind.as_str() { "GPUSampler" => { let sampler_resource = state .resource_table .get::<super::sampler::WebGpuSampler>(entry.resource)?; wgpu_core::binding_model::BindingResource::Sampler(sampler_resource.0) } "GPUTextureView" => { let texture_view_resource = state .resource_table .get::<super::texture::WebGpuTextureView>(entry.resource)?; wgpu_core::binding_model::BindingResource::TextureView( texture_view_resource.0, ) } "GPUBufferBinding" => { let buffer_resource = state .resource_table .get::<super::buffer::WebGpuBuffer>(entry.resource)?; wgpu_core::binding_model::BindingResource::Buffer( wgpu_core::binding_model::BufferBinding { buffer_id: buffer_resource.0, offset: entry.offset.unwrap_or(0), size: std::num::NonZeroU64::new(entry.size.unwrap_or(0)), }, ) } _ => unreachable!(), }, }; entries.push(e); } let bind_group_layout = state .resource_table .get::<WebGpuBindGroupLayout>(args.layout)?; let descriptor = wgpu_core::binding_model::BindGroupDescriptor { label: args.label.map(Cow::from), layout: bind_group_layout.0, entries: Cow::from(entries), }; gfx_put!(device => instance.device_create_bind_group( device, &descriptor, std::marker::PhantomData ) => state, WebGpuBindGroup) }
27.739766
80
0.68947
fb74135182ad8c4d07b574138dca6644f93b0bf9
5,229
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! SQL-dataflow translation. #![deny(missing_debug_implementations)] use ::expr::GlobalId; use catalog::names::{DatabaseSpecifier, FullName}; use catalog::{Catalog, CatalogEntry}; use dataflow_types::{PeekWhen, RowSetFinishing, SinkConnector, SourceConnector}; use ore::future::MaybeFuture; use repr::{RelationDesc, Row, ScalarType}; use sql_parser::parser::Parser as SqlParser; pub use session::{InternalSession, PlanSession, PreparedStatement, Session, TransactionStatus}; pub use sql_parser::ast::{ObjectType, Statement}; pub use statement::StatementContext; pub mod normalize; mod expr; mod query; mod scope; mod session; mod statement; mod transform; // this is used by sqllogictest to turn sql values into `Datum` pub use query::scalar_type_from_sql; /// Instructions for executing a SQL query. #[derive(Debug)] pub enum Plan { CreateDatabase { name: String, if_not_exists: bool, }, CreateSchema { database_name: DatabaseSpecifier, schema_name: String, if_not_exists: bool, }, CreateSource { name: FullName, source: Source, if_not_exists: bool, }, CreateSink { name: FullName, sink: Sink, if_not_exists: bool, }, CreateTable { name: FullName, desc: RelationDesc, if_not_exists: bool, }, CreateView { name: FullName, view: View, /// The ID of the object that this view is replacing, if any. replace: Option<GlobalId>, /// whether we should auto-materialize the view materialize: bool, }, CreateIndex { name: FullName, index: Index, if_not_exists: bool, }, DropDatabase { name: String, }, DropSchema { database_name: DatabaseSpecifier, schema_name: String, }, DropItems { items: Vec<GlobalId>, ty: ObjectType, }, EmptyQuery, ShowAllVariables, ShowVariable(String), SetVariable { /// The name of the variable name: String, value: String, }, /// Nothing needs to happen, but the frontend must be notified StartTransaction, /// Commit a transaction /// /// We don't do anything for transactions, so other than changing the session state /// this is a no-op Commit, /// Rollback a transaction /// /// We don't do anything for transactions, so other than changing the session state /// this is a no-op Rollback, Peek { source: ::expr::RelationExpr, when: PeekWhen, finishing: RowSetFinishing, materialize: bool, }, Tail(CatalogEntry), SendRows(Vec<Row>), ExplainPlan(::expr::RelationExpr), SendDiffs { id: GlobalId, updates: Vec<(Row, isize)>, affected_rows: usize, kind: MutationKind, }, ShowViews { ids: Vec<(String, GlobalId)>, full: bool, materialized: bool, }, } #[derive(Clone, Debug)] pub struct Source { pub create_sql: String, pub connector: SourceConnector, pub desc: RelationDesc, } #[derive(Clone, Debug)] pub struct Sink { pub create_sql: String, pub from: GlobalId, pub connector: SinkConnector, } #[derive(Clone, Debug)] pub struct View { pub create_sql: String, pub expr: ::expr::RelationExpr, pub desc: RelationDesc, } #[derive(Clone, Debug)] pub struct Index { pub create_sql: String, pub on: GlobalId, pub keys: Vec<::expr::ScalarExpr>, } #[derive(Debug)] pub enum MutationKind { Insert, Update, Delete, } /// A vector of values to which parameter references should be bound. #[derive(Debug)] pub struct Params { pub datums: Row, pub types: Vec<ScalarType>, } /// Parses a raw SQL string into a [`Statement`]. pub fn parse(sql: String) -> Result<Vec<Statement>, failure::Error> { Ok(SqlParser::parse_sql(sql)?) } /// Produces a [`Plan`] from a [`Statement`]. pub fn plan( catalog: &Catalog, session: &dyn PlanSession, stmt: Statement, params: &Params, ) -> MaybeFuture<'static, Result<Plan, failure::Error>> { statement::handle_statement(catalog, session, stmt, params) } /// Determines the type of the rows that will be returned by `stmt` and the type /// of the parameters required by `stmt`. If the statement will not produce a /// result set (e.g., most `CREATE` or `DROP` statements), no `RelationDesc` /// will be returned. If the query uses no parameters, then the returned vector /// of types will be empty. pub fn describe( catalog: &Catalog, session: &Session, stmt: Statement, ) -> Result<(Option<RelationDesc>, Vec<pgrepr::Type>), failure::Error> { let (desc, types) = statement::describe_statement(catalog, session, stmt)?; let types = types.into_iter().map(pgrepr::Type::from).collect(); Ok((desc, types)) }
25.886139
95
0.647734
72039c64092cc1fdc462b0eca1bdc50ebdc02479
1,470
use actix::prelude::*; use influx_db_client::{Client, Point, Value}; #[derive(Clone, Deserialize)] pub struct InfluxSettings { pub uri: String, pub db: String, pub user: String, pub password: String, } pub fn get_client(settings: InfluxSettings) -> Client { Client::new(settings.uri, settings.db) .set_authentication(settings.user, settings.password) } pub fn send(client: Client) { let point = Point::new("test1") .add_tag("tags", Value::String("filter".to_string())) .add_field("count", Value::Integer(1)) .to_owned(); // if Precision is None, the default is second // Multiple write let _ = client.write_point(point, None, None).unwrap(); } pub struct InfluxEmitter { client: Client, } impl InfluxEmitter { pub fn from_settings(settings: InfluxSettings) -> Self { let client = get_client(settings); Self { client } } } #[derive(Message, Debug)] pub struct Metric(pub Point); impl Actor for InfluxEmitter { type Context = Context<Self>; fn started(&mut self, _ctx: &mut Self::Context) { info!("Starting Influx Emitter"); } fn stopped(&mut self, _ctx: &mut Self::Context) { info!("Stopping Influx Emitter"); } } impl Handler<Metric> for InfluxEmitter { type Result = (); fn handle(&mut self, msg: Metric, _ctx: &mut Self::Context) { self.client.write_point(msg.0, None, None).unwrap(); } }
24.098361
65
0.633333
79d152812ae2b7e5df9f6cadd9d0fab95e1c2dc3
32,833
use crate::graphql_api::avatar::change_picture_display; use crate::graphql_api::avatar::save_picture; use crate::settings::Fossil; use chrono::DateTime; use chrono::Utc; use cis_profile::crypto::Signer; use cis_profile::schema::AccessInformationProviderSubObject; use cis_profile::schema::Display; use cis_profile::schema::IdentitiesAttributesValuesArray; use cis_profile::schema::KeyValue; use cis_profile::schema::Profile; use cis_profile::schema::PublisherAuthority; use cis_profile::schema::StandardAttributeString; use cis_profile::schema::StandardAttributeValues; use dino_park_trust::Trust; use failure::format_err; use failure::Error; use juniper::GraphQLInputObject; use std::collections::BTreeMap; const DISPLAY_ANY: &[Display; 6] = &[ Display::Private, Display::Staff, Display::Ndaed, Display::Vouched, Display::Authenticated, Display::Public, ]; const DISPLAY_NOT_PRIVATE: &[Display; 5] = &[ Display::Staff, Display::Ndaed, Display::Vouched, Display::Authenticated, Display::Public, ]; const DISPLAY_PRIVATE_STAFF: &[Display; 2] = &[Display::Private, Display::Staff]; fn create_usernames_key(typ: &str) -> String { format!("HACK#{}", typ) } fn update_access_information_display( d: &Option<Display>, p: &mut AccessInformationProviderSubObject, now: &DateTime<Utc>, store: &impl Signer, allowed: &[Display], ) -> Result<bool, Error> { if *d != p.metadata.display { if let Some(display) = &d { if !allowed.contains(display) { return Err(format_err!("invalid display level")); } // Initialize with empty values if there are now access groups. if p.values.is_none() { p.values = Some(KeyValue(BTreeMap::default())); } p.metadata.display = Some(display.clone()); p.metadata.last_modified = *now; p.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(p)?; return Ok(true); } } Ok(false) } async fn update_picture( s: &Option<StringWithDisplay>, p: &mut StandardAttributeString, uuid: &StandardAttributeString, now: &DateTime<Utc>, store: &impl Signer, fossil_settings: &Fossil, ) -> Result<bool, Error> { let mut changed = false; if let Some(new_picture) = s { if new_picture.display != p.metadata.display { if let Some(display) = &new_picture.display { if !DISPLAY_NOT_PRIVATE.contains(display) { return Err(format_err!("invalid display level")); } // if display changed but field is null change it to empty string if p.value.is_none() { p.value = Some(String::default()); } p.metadata.display = Some(display.clone()); changed = true; } } if new_picture.value != p.value && new_picture.value != Some(String::default()) { if let Some(display) = &p.metadata.display { if let Some(value) = &new_picture.value { let uuid = uuid .value .as_ref() .ok_or_else(|| failure::err_msg("no uuid in profile"))?; let url = save_picture( &value, uuid, &display, p.value.as_deref(), &fossil_settings.upload_endpoint, ) .await?; p.value = Some(url); changed = true; } } } else if changed && p.value != Some(String::default()) { // if only the display level changed we have to send a display update to fossil if let Some(display) = &p.metadata.display { let uuid = uuid .value .as_ref() .ok_or_else(|| failure::err_msg("no uuid in profile"))?; let url = change_picture_display( uuid, &display, p.value.as_deref(), &fossil_settings.upload_endpoint, ) .await?; p.value = Some(url); changed = true; } } else if new_picture.value != p.value && new_picture.value == Some(String::default()) { // TODO: delete picture p.value = new_picture.value.clone(); changed = true; } if changed { p.metadata.last_modified = *now; p.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(p)?; } } Ok(changed) } fn update_google_identity( google: &IdentityWithDisplay, p: &mut IdentitiesAttributesValuesArray, now: &DateTime<Utc>, store: &impl Signer, ) -> Result<bool, Error> { let mut changed_google = false; if google.remove.unwrap_or_default() { p.google_oauth2_id.metadata.display = Some(Display::Staff); p.google_primary_email.metadata.display = Some(Display::Staff); p.google_oauth2_id.value = Some(String::default()); p.google_primary_email.value = Some(String::default()); changed_google = true; } else if google.display != p.google_oauth2_id.metadata.display || google.display != p.google_primary_email.metadata.display { if let Some(display) = &google.display { if !DISPLAY_NOT_PRIVATE.contains(display) { return Err(format_err!("invalid display level")); } if p.google_oauth2_id.value.is_none() { p.google_oauth2_id.value = Some(String::default()) } if p.google_primary_email.value.is_none() { p.google_primary_email.value = Some(String::default()) } p.google_oauth2_id.metadata.display = Some(display.clone()); p.google_primary_email.metadata.display = Some(display.clone()); changed_google = true; } } if changed_google { p.google_oauth2_id.metadata.last_modified = *now; p.google_primary_email.metadata.last_modified = now.to_owned(); p.google_oauth2_id.signature.publisher.name = PublisherAuthority::Mozilliansorg; p.google_primary_email.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(&mut p.google_oauth2_id)?; store.sign_attribute(&mut p.google_primary_email)?; } Ok(changed_google) } fn update_bugzilla_identity( bugzilla: &IdentityWithDisplay, p: &mut IdentitiesAttributesValuesArray, u: &mut StandardAttributeValues, now: &DateTime<Utc>, store: &impl Signer, ) -> Result<bool, Error> { let mut changed_bugzilla = false; let mut changed_usernames = false; if bugzilla.remove.unwrap_or_default() { p.bugzilla_mozilla_org_id.metadata.display = Some(Display::Staff); p.bugzilla_mozilla_org_primary_email.metadata.display = Some(Display::Staff); if let Some(KeyValue(usernames)) = &mut u.values { if usernames.remove(&create_usernames_key("BMOMAIL")).is_some() { changed_usernames = true; } if usernames.remove(&create_usernames_key("BMONICK")).is_some() { changed_usernames = true; } } p.bugzilla_mozilla_org_id.value = Some(String::default()); p.bugzilla_mozilla_org_primary_email.value = Some(String::default()); changed_bugzilla = true; } else if bugzilla.display != p.bugzilla_mozilla_org_id.metadata.display || bugzilla.display != p.bugzilla_mozilla_org_primary_email.metadata.display { if let Some(display) = &bugzilla.display { if !DISPLAY_NOT_PRIVATE.contains(display) { return Err(format_err!("invalid display level")); } if p.bugzilla_mozilla_org_id.value.is_none() { p.bugzilla_mozilla_org_id.value = Some(String::default()) } if p.bugzilla_mozilla_org_primary_email.value.is_none() { p.bugzilla_mozilla_org_primary_email.value = Some(String::default()) } p.bugzilla_mozilla_org_id.metadata.display = Some(display.clone()); p.bugzilla_mozilla_org_primary_email.metadata.display = Some(display.clone()); changed_bugzilla = true; } } if changed_bugzilla { p.bugzilla_mozilla_org_id.metadata.last_modified = *now; p.bugzilla_mozilla_org_primary_email.metadata.last_modified = now.to_owned(); p.bugzilla_mozilla_org_id.signature.publisher.name = PublisherAuthority::Mozilliansorg; p.bugzilla_mozilla_org_primary_email .signature .publisher .name = PublisherAuthority::Mozilliansorg; store.sign_attribute(&mut p.bugzilla_mozilla_org_id)?; store.sign_attribute(&mut p.bugzilla_mozilla_org_primary_email)?; } if changed_usernames { u.metadata.last_modified = *now; u.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(u)?; } Ok(changed_bugzilla || changed_usernames) } fn update_github_identity( github: &IdentityWithDisplay, p: &mut IdentitiesAttributesValuesArray, u: &mut StandardAttributeValues, now: &DateTime<Utc>, store: &impl Signer, ) -> Result<bool, Error> { let mut changed_github = false; let mut changed_usernames = false; if github.remove.unwrap_or_default() { p.github_id_v3.metadata.display = Some(Display::Staff); p.github_id_v4.metadata.display = Some(Display::Staff); p.github_primary_email.metadata.display = Some(Display::Staff); if let Some(KeyValue(usernames)) = &mut u.values { if usernames.remove(&create_usernames_key("GITHUB")).is_some() { changed_usernames = true; } } p.github_id_v3.value = Some(String::default()); p.github_id_v4.value = Some(String::default()); p.github_primary_email.value = Some(String::default()); changed_github = true; } else if github.display != p.github_id_v3.metadata.display || github.display != p.github_id_v4.metadata.display || github.display != p.github_primary_email.metadata.display { if let Some(display) = &github.display { if !DISPLAY_NOT_PRIVATE.contains(display) { return Err(format_err!("invalid display level")); } if p.github_id_v3.value.is_none() { p.github_id_v3.value = Some(String::default()) } if p.github_id_v4.value.is_none() { p.github_id_v4.value = Some(String::default()) } if p.github_primary_email.value.is_none() { p.github_primary_email.value = Some(String::default()) } p.github_id_v3.metadata.display = Some(display.clone()); p.github_id_v4.metadata.display = Some(display.clone()); p.github_primary_email.metadata.display = Some(display.clone()); changed_github = true; } } if changed_github { p.github_id_v3.metadata.last_modified = *now; p.github_id_v4.metadata.last_modified = *now; p.github_primary_email.metadata.last_modified = now.to_owned(); p.github_id_v3.signature.publisher.name = PublisherAuthority::Mozilliansorg; p.github_id_v4.signature.publisher.name = PublisherAuthority::Mozilliansorg; p.github_primary_email.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(&mut p.github_id_v3)?; store.sign_attribute(&mut p.github_id_v4)?; store.sign_attribute(&mut p.github_primary_email)?; } if changed_usernames { u.metadata.last_modified = *now; u.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(u)?; } Ok(changed_github || changed_usernames) } fn update_identities( i: &Option<IdentitiesWithDisplay>, p: &mut IdentitiesAttributesValuesArray, u: &mut StandardAttributeValues, now: &DateTime<Utc>, store: &impl Signer, ) -> Result<bool, Error> { let mut changed = false; if let Some(identities) = i { if let Some(github) = &identities.github { changed |= update_github_identity(github, p, u, now, store)?; } if let Some(bugzilla) = &identities.bugzilla { changed |= update_bugzilla_identity(bugzilla, p, u, now, store)?; } if let Some(google) = &identities.google { changed |= update_google_identity(google, p, now, store)?; } } Ok(changed) } fn update_display_for_string( d: &Option<Display>, p: &mut StandardAttributeString, now: &DateTime<Utc>, store: &impl Signer, allowed: &[Display], ) -> Result<bool, Error> { let mut changed = false; if d != &p.metadata.display { if let Some(display) = &d { if !allowed.contains(display) { return Err(format_err!("invalid display level")); } // if display changed but field is null we cannot do anything if p.value.is_some() { p.metadata.display = Some(display.clone()); changed = true; } } } if changed { p.metadata.last_modified = *now; p.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(p)?; } Ok(changed) } fn update_display_for_key_values( d: &Option<Display>, p: &mut StandardAttributeValues, now: &DateTime<Utc>, store: &impl Signer, allowed: &[Display], ) -> Result<bool, Error> { let mut changed = false; if d != &p.metadata.display { if let Some(display) = &d { if !allowed.contains(display) { return Err(format_err!("invalid display level")); } // if display changed but field is null change it to empty string if p.values.is_some() { p.metadata.display = Some(display.clone()); changed = true; } } } if changed { p.metadata.last_modified = *now; p.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(p)?; } Ok(changed) } fn update_string( s: &Option<StringWithDisplay>, p: &mut StandardAttributeString, now: &DateTime<Utc>, store: &impl Signer, allowed: &[Display], ) -> Result<bool, Error> { let mut changed = false; if let Some(x) = s { if x.value != p.value { if let Some(value) = &x.value { p.value = Some(value.clone()); changed = true; } } if x.display != p.metadata.display { if let Some(display) = &x.display { if !allowed.contains(display) { return Err(format_err!("invalid display level")); } // if display changed but field is null change it to empty string if p.value.is_none() { p.value = Some(String::default()); } p.metadata.display = Some(display.clone()); changed = true; } } if changed { p.metadata.last_modified = *now; p.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(p)?; } } Ok(changed) } fn update_key_values( s: &Option<KeyValuesWithDisplay>, p: &mut StandardAttributeValues, now: &DateTime<Utc>, store: &impl Signer, filter_empty_values: bool, allowed: &[Display], ) -> Result<bool, Error> { let mut changed = false; if let Some(x) = s { if let Some(values) = &x.values { let values: BTreeMap<String, Option<String>> = if filter_empty_values { values .iter() .filter_map(|e| { if e.v.as_ref().map(|s| s.is_empty()).unwrap_or_default() { None } else { Some((e.k.clone(), e.v.clone())) } }) .collect() } else { values.iter().map(|e| (e.k.clone(), e.v.clone())).collect() }; let kv = Some(KeyValue(values)); if kv != p.values { p.values = kv; } changed = true; } if x.display != p.metadata.display { if let Some(display) = &x.display { if !allowed.contains(display) { return Err(format_err!("invalid display level")); } // if display changed but field is null change it to empty dict if p.values.is_none() { p.values = Some(KeyValue(BTreeMap::default())); } p.metadata.display = Some(display.clone()); changed = true; } } if changed { p.metadata.last_modified = *now; p.signature.publisher.name = PublisherAuthority::Mozilliansorg; store.sign_attribute(p)?; } } Ok(changed) } #[derive(GraphQLInputObject, Default)] pub struct BoolWithDisplay { pub display: Option<Display>, pub value: Option<bool>, } #[derive(GraphQLInputObject, Default)] pub struct StringWithDisplay { pub display: Option<Display>, pub value: Option<String>, } #[derive(GraphQLInputObject, Default)] pub struct KeyValueInput { pub k: String, pub v: Option<String>, } #[derive(GraphQLInputObject, Default)] pub struct KeyValuesWithDisplay { pub display: Option<Display>, pub values: Option<Vec<KeyValueInput>>, } #[derive(GraphQLInputObject, Default)] pub struct IdentityWithDisplay { pub remove: Option<bool>, pub display: Option<Display>, } #[derive(GraphQLInputObject, Default)] pub struct IdentitiesWithDisplay { pub github: Option<IdentityWithDisplay>, pub bugzilla: Option<IdentityWithDisplay>, pub google: Option<IdentityWithDisplay>, } #[derive(GraphQLInputObject, Default)] pub struct InputProfile { pub access_information_ldap_display: Option<Display>, // TODO: delete after upgrade pub access_information_mozilliansorg: Option<Display>, pub access_information_mozilliansorg_display: Option<Display>, pub active: Option<BoolWithDisplay>, pub alternative_name: Option<StringWithDisplay>, pub created: Option<StringWithDisplay>, pub custom_1_primary_email: Option<StringWithDisplay>, pub custom_2_primary_email: Option<StringWithDisplay>, pub description: Option<StringWithDisplay>, pub first_name: Option<StringWithDisplay>, pub fun_title: Option<StringWithDisplay>, pub identities: Option<IdentitiesWithDisplay>, pub languages: Option<KeyValuesWithDisplay>, pub last_modified: Option<StringWithDisplay>, pub last_name: Option<StringWithDisplay>, pub location: Option<StringWithDisplay>, pub login_method: Option<StringWithDisplay>, pub pgp_public_keys_display: Option<Display>, pub phone_numbers: Option<KeyValuesWithDisplay>, pub picture: Option<StringWithDisplay>, pub primary_email_display: Option<Display>, pub primary_username: Option<StringWithDisplay>, pub pronouns: Option<StringWithDisplay>, pub ssh_public_keys_display: Option<Display>, pub staff_information_title_display: Option<Display>, pub staff_information_office_location_display: Option<Display>, pub tags: Option<KeyValuesWithDisplay>, pub timezone: Option<StringWithDisplay>, pub uris: Option<KeyValuesWithDisplay>, pub user_id: Option<StringWithDisplay>, pub usernames: Option<KeyValuesWithDisplay>, } impl InputProfile { pub async fn update_profile( &self, p: &mut Profile, scope: &Trust, secret_store: &impl Signer, fossil_settings: &Fossil, ) -> Result<bool, Error> { let now = &Utc::now(); let mut changed = false; changed |= update_string( &self.alternative_name, &mut p.alternative_name, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.created, &mut p.created, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.custom_1_primary_email, &mut p.identities.custom_1_primary_email, now, secret_store, DISPLAY_ANY, )?; changed |= update_string( &self.custom_2_primary_email, &mut p.identities.custom_2_primary_email, now, secret_store, DISPLAY_ANY, )?; changed |= update_string( &self.description, &mut p.description, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.first_name, &mut p.first_name, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.fun_title, &mut p.fun_title, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.last_modified, &mut p.last_modified, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.last_name, &mut p.last_name, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.location, &mut p.location, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.login_method, &mut p.login_method, now, secret_store, DISPLAY_ANY, )?; changed |= update_picture( &self.picture, &mut p.picture, &p.uuid, now, secret_store, &fossil_settings, ) .await?; changed |= update_display_for_string( &self.primary_email_display, &mut p.primary_email, now, secret_store, if scope == &Trust::Staff { DISPLAY_NOT_PRIVATE } else { DISPLAY_ANY }, )?; changed |= update_string( &self.primary_username, &mut p.primary_username, now, secret_store, &[Display::Public], )?; changed |= update_string( &self.pronouns, &mut p.pronouns, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.timezone, &mut p.timezone, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_string( &self.user_id, &mut p.user_id, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_key_values( &self.languages, &mut p.languages, now, secret_store, false, DISPLAY_NOT_PRIVATE, )?; changed |= update_key_values( &self.phone_numbers, &mut p.phone_numbers, now, secret_store, true, DISPLAY_ANY, )?; changed |= update_key_values( &self.tags, &mut p.tags, now, secret_store, false, DISPLAY_NOT_PRIVATE, )?; changed |= update_key_values( &self.usernames, &mut p.usernames, now, secret_store, true, DISPLAY_NOT_PRIVATE, )?; changed |= update_key_values( &self.uris, &mut p.uris, now, secret_store, true, DISPLAY_NOT_PRIVATE, )?; changed |= update_display_for_key_values( &self.pgp_public_keys_display, &mut p.pgp_public_keys, now, secret_store, DISPLAY_ANY, )?; changed |= update_display_for_key_values( &self.ssh_public_keys_display, &mut p.ssh_public_keys, now, secret_store, DISPLAY_ANY, )?; changed |= update_identities( &self.identities, &mut p.identities, &mut p.usernames, now, secret_store, )?; // TODO: delete after upgrade changed |= update_access_information_display( &self.access_information_mozilliansorg, &mut p.access_information.mozilliansorg, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_access_information_display( &self.access_information_mozilliansorg_display, &mut p.access_information.mozilliansorg, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_access_information_display( &self.access_information_ldap_display, &mut p.access_information.ldap, now, secret_store, DISPLAY_PRIVATE_STAFF, )?; changed |= update_display_for_string( &self.staff_information_title_display, &mut p.staff_information.title, now, secret_store, DISPLAY_NOT_PRIVATE, )?; changed |= update_display_for_string( &self.staff_information_office_location_display, &mut p.staff_information.office_location, now, secret_store, DISPLAY_NOT_PRIVATE, )?; Ok(changed) } } #[cfg(test)] mod test { use super::*; use cis_profile::crypto::SecretStore; use cis_profile::schema::Profile; fn get_fake_secret_store() -> SecretStore { let v = vec![( String::from("mozilliansorg"), String::from(include_str!("../../tests/data/fake_key.json")), )]; SecretStore::default() .with_sign_keys_from_inline_iter(v) .unwrap() } #[tokio::test] async fn test_simple_update() -> Result<(), Error> { let secret_store = get_fake_secret_store(); let fossil_settings = Fossil { upload_endpoint: String::default(), }; let mut p = Profile::default(); let mut update = InputProfile::default(); update.fun_title = Some(StringWithDisplay { value: Some(String::from("Pope")), display: None, }); assert_eq!(p.fun_title.value, None); update .update_profile(&mut p, &Trust::Staff, &secret_store, &fossil_settings) .await?; assert_eq!(p.fun_title.value, update.fun_title.unwrap().value); Ok(()) } #[tokio::test] async fn test_update_with_invalid_display_fails() -> Result<(), Error> { let secret_store = get_fake_secret_store(); let fossil_settings = Fossil { upload_endpoint: String::default(), }; let mut p = Profile::default(); let mut update = InputProfile::default(); update.fun_title = Some(StringWithDisplay { value: None, display: Some(Display::Private), }); assert_eq!(p.pronouns.value, None); assert_eq!(p.fun_title.value, None); assert_ne!(p.fun_title.metadata.display, Some(Display::Private)); assert!(update .update_profile(&mut p, &Trust::Staff, &secret_store, &fossil_settings) .await .is_err()); Ok(()) } #[tokio::test] async fn test_update_display_only_with_null_value_string() -> Result<(), Error> { let secret_store = get_fake_secret_store(); let fossil_settings = Fossil { upload_endpoint: String::default(), }; let mut p = Profile::default(); let mut update = InputProfile::default(); update.fun_title = Some(StringWithDisplay { value: None, display: Some(Display::Vouched), }); assert_eq!(p.pronouns.value, None); assert_eq!(p.fun_title.value, None); assert_ne!(p.fun_title.metadata.display, Some(Display::Vouched)); update .update_profile(&mut p, &Trust::Staff, &secret_store, &fossil_settings) .await?; assert_eq!(p.pronouns.value, None); assert_eq!(p.fun_title.value, Some(String::default())); assert_eq!(p.fun_title.metadata.display, Some(Display::Vouched)); Ok(()) } #[tokio::test] async fn test_update_display_only_with_null_value_kv() -> Result<(), Error> { let secret_store = get_fake_secret_store(); let fossil_settings = Fossil { upload_endpoint: String::default(), }; let mut p = Profile::default(); let mut update = InputProfile::default(); update.languages = Some(KeyValuesWithDisplay { values: None, display: Some(Display::Vouched), }); assert_eq!(p.tags.values, None); assert_eq!(p.languages.values, None); assert_ne!(p.languages.metadata.display, Some(Display::Vouched)); update .update_profile(&mut p, &Trust::Staff, &secret_store, &fossil_settings) .await?; assert_eq!(p.tags.values, None); assert_eq!(p.languages.values, Some(Default::default())); assert_eq!(p.languages.metadata.display, Some(Display::Vouched)); Ok(()) } #[tokio::test] async fn test_update_access_information_display_initializes_groups() -> Result<(), Error> { let secret_store = get_fake_secret_store(); let fossil_settings = Fossil { upload_endpoint: String::default(), }; let mut p = Profile::default(); let update = InputProfile { access_information_mozilliansorg_display: Some(Display::Ndaed), ..Default::default() }; assert_eq!(p.access_information.mozilliansorg.values, None); assert_ne!( p.access_information.mozilliansorg.metadata.display, Some(Display::Ndaed) ); update .update_profile(&mut p, &Trust::Staff, &secret_store, &fossil_settings) .await?; assert_eq!( p.access_information.mozilliansorg.values, Some(KeyValue(BTreeMap::default())) ); assert_eq!( p.access_information.mozilliansorg.metadata.display, Some(Display::Ndaed) ); Ok(()) } #[tokio::test] async fn test_update_access_information_display_keeps_groups() -> Result<(), Error> { let secret_store = get_fake_secret_store(); let fossil_settings = Fossil { upload_endpoint: String::default(), }; let mut groups = BTreeMap::new(); groups.insert(String::from("Something"), None); let mut p = Profile::default(); p.access_information.mozilliansorg.values = Some(KeyValue(groups.clone())); p.access_information.mozilliansorg.metadata.display = Some(Display::Private); let update = InputProfile { access_information_mozilliansorg_display: Some(Display::Ndaed), ..Default::default() }; update .update_profile(&mut p, &Trust::Staff, &secret_store, &fossil_settings) .await?; assert_eq!( p.access_information.mozilliansorg.values, Some(KeyValue(groups)) ); assert_eq!( p.access_information.mozilliansorg.metadata.display, Some(Display::Ndaed) ); Ok(()) } }
33.709446
96
0.575671
7935f3341ec39f73087639424e56ed7adc7eaa5d
1,693
// // Copyright 2016 Andrew Hunter // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //! //! A library for working with deterministic and non-deterministic finite-state automata. //! #[macro_use] extern crate serde; #[macro_use] extern crate serde_derive; pub use self::countable::*; pub use self::symbol_range::*; pub use self::symbol_reader::*; pub use self::state_machine::*; pub use self::pattern_matcher::*; pub use self::ndfa::*; pub use self::regular_pattern::*; pub use self::regular_expression::*; pub use self::dfa_builder::*; pub use self::symbol_range_dfa::*; pub use self::dfa_compiler::*; pub use self::prepare::*; pub use self::matches::*; pub use self::tape::*; pub use self::split_reader::*; pub use self::tokenizer::*; pub use self::tagged_stream::*; pub mod countable; pub mod symbol_range; pub mod symbol_reader; pub mod state_machine; pub mod overlapping_symbols; pub mod pattern_matcher; pub mod ndfa; pub mod regular_pattern; pub mod regular_expression; pub mod dfa_builder; pub mod symbol_range_dfa; pub mod dfa_compiler; pub mod prepare; pub mod matches; pub mod tape; pub mod split_reader; pub mod tokenizer; pub mod tagged_stream;
28.216667
89
0.737153
16da728262ddb695fbd1b9571c1e59d85351b699
18,932
//! Abstractions for asynchronous programming. //! //! This crate provides a number of core abstractions for writing asynchronous //! code: //! //! - [Futures](crate::future::Future) are single eventual values produced by //! asynchronous computations. Some programming languages (e.g. JavaScript) //! call this concept "promise". //! - [Streams](crate::stream::Stream) represent a series of values //! produced asynchronously. //! - [Sinks](crate::sink::Sink) provide support for asynchronous writing of //! data. //! - [Executors](crate::executor) are responsible for running asynchronous //! tasks. //! //! The crate also contains abstractions for [asynchronous I/O](crate::io) and //! [cross-task communication](crate::channel). //! //! Underlying all of this is the *task system*, which is a form of lightweight //! threading. Large asynchronous computations are built up using futures, //! streams and sinks, and then spawned as independent tasks that are run to //! completion, but *do not block* the thread running them. #![cfg_attr(feature = "cfg-target-has-atomic", feature(cfg_target_has_atomic))] #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms, unreachable_pub)] // It cannot be included in the published code because this lints have false positives in the minimum required version. #![cfg_attr(test, warn(single_use_lifetimes))] #![warn(clippy::all)] #![doc(test(attr(deny(warnings), allow(dead_code, unused_assignments, unused_variables))))] #![doc(html_root_url = "https://rust-lang-nursery.github.io/futures-api-docs/0.3.0-alpha.18/futures")] #[cfg(all(feature = "cfg-target-has-atomic", not(feature = "nightly")))] compile_error!("The `cfg-target-has-atomic` feature requires the `nightly` feature as an explicit opt-in to unstable features"); #[doc(hidden)] pub use futures_core::core_reexport; #[doc(hidden)] pub use futures_core::future::Future; #[doc(hidden)] pub use futures_core::future::TryFuture; #[doc(hidden)] pub use futures_util::future::FutureExt; #[doc(hidden)] pub use futures_util::try_future::TryFutureExt; #[doc(hidden)] pub use futures_core::stream::Stream; #[doc(hidden)] pub use futures_core::stream::TryStream; #[doc(hidden)] pub use futures_util::stream::StreamExt; #[doc(hidden)] pub use futures_util::try_stream::TryStreamExt; #[doc(hidden)] pub use futures_sink::Sink; #[doc(hidden)] pub use futures_util::sink::SinkExt; #[cfg(feature = "std")] #[doc(hidden)] pub use futures_io::{AsyncRead, AsyncWrite, AsyncSeek, AsyncBufRead}; #[cfg(feature = "std")] #[doc(hidden)] pub use futures_util::{AsyncReadExt, AsyncWriteExt, AsyncSeekExt, AsyncBufReadExt}; #[doc(hidden)] pub use futures_core::task::Poll; #[doc(hidden)] pub use futures_core::never::Never; // Macro reexports pub use futures_core::ready; // Readiness propagation pub use futures_util::pin_mut; #[cfg(feature = "std")] #[cfg(feature = "async-await")] pub use futures_util::{pending, poll}; // Async-await #[cfg_attr( feature = "cfg-target-has-atomic", cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) )] #[cfg(feature = "alloc")] pub mod channel { //! Cross-task communication. //! //! Like threads, concurrent tasks sometimes need to communicate with each //! other. This module contains two basic abstractions for doing so: //! //! - [oneshot](crate::channel::oneshot), a way of sending a single value //! from one task to another. //! - [mpsc](crate::channel::mpsc), a multi-producer, single-consumer //! channel for sending values between tasks, analogous to the //! similarly-named structure in the standard library. //! //! This module is only available when the `std` or `alloc` feature of this //! library is activated, and it is activated by default. pub use futures_channel::oneshot; #[cfg(feature = "std")] pub use futures_channel::mpsc; } #[cfg(feature = "compat")] pub mod compat { //! Interop between `futures` 0.1 and 0.3. //! //! This module is only available when the `compat` feature of this //! library is activated. pub use futures_util::compat::{ Compat, CompatSink, Compat01As03, Compat01As03Sink, Executor01Future, Executor01As03, Executor01CompatExt, Future01CompatExt, Stream01CompatExt, Sink01CompatExt, }; #[cfg(feature = "io-compat")] pub use futures_util::compat::{ AsyncRead01CompatExt, AsyncWrite01CompatExt, }; } #[cfg(feature = "std")] pub mod executor { //! Task execution. //! //! All asynchronous computation occurs within an executor, which is //! capable of spawning futures as tasks. This module provides several //! built-in executors, as well as tools for building your own. //! //! This module is only available when the `std` feature of this //! library is activated, and it is activated by default. //! //! # Using a thread pool (M:N task scheduling) //! //! Most of the time tasks should be executed on a [thread //! pool](crate::executor::ThreadPool). A small set of worker threads can //! handle a very large set of spawned tasks (which are much lighter weight //! than threads). //! //! The simplest way to use a thread pool is to //! [`run`](crate::executor::ThreadPool::run) an initial task on it, which //! can then spawn further tasks back onto the pool to complete its work: //! //! ``` //! use futures::executor::ThreadPool; //! # use futures::future::lazy; //! # let my_app = lazy(|_| 42); //! //! // assuming `my_app: Future` //! ThreadPool::new().expect("Failed to create threadpool").run(my_app); //! ``` //! //! The call to [`run`](crate::executor::ThreadPool::run) will block the //! current thread until the future defined by `my_app` completes, and will //! return the result of that future. //! //! # Spawning additional tasks //! //! Tasks can be spawned onto a spawner by calling its //! [`spawn_obj`](crate::task::Spawn::spawn_obj) method directly. //! In the case of `!Send` futures, //! [`spawn_local_obj`](crate::task::LocalSpawn::spawn_local_obj) //! can be used instead. //! //! # Single-threaded execution //! //! In addition to thread pools, it's possible to run a task (and the tasks //! it spawns) entirely within a single thread via the //! [`LocalPool`](crate::executor::LocalPool) executor. Aside from cutting //! down on synchronization costs, this executor also makes it possible to //! spawn non-`Send` tasks, via //! [`spawn_local_obj`](crate::task::LocalSpawn::spawn_local_obj). //! The `LocalPool` is best suited for running I/O-bound tasks that do //! relatively little work between I/O operations. //! //! There is also a convenience function, //! [`block_on`](crate::executor::block_on), for simply running a future to //! completion on the current thread, while routing any spawned tasks //! to a global thread pool. pub use futures_executor::{ BlockingStream, Enter, EnterError, LocalSpawner, LocalPool, ThreadPool, ThreadPoolBuilder, block_on, block_on_stream, enter, }; } pub mod future { //! Asynchronous values. //! //! This module contains: //! //! - The [`Future` trait](crate::future::Future). //! - The [`FutureExt`](crate::future::FutureExt) trait, which provides //! adapters for chaining and composing futures. //! - Top-level future combinators like [`lazy`](crate::future::lazy) which //! creates a future from a closure that defines its return value, and //! [`ready`](crate::future::ready), which constructs a future with an //! immediate defined value. pub use futures_core::future::{ Future, TryFuture, FusedFuture, FutureObj, LocalFutureObj, UnsafeFutureObj, }; #[cfg(feature = "alloc")] pub use futures_core::future::{BoxFuture, LocalBoxFuture}; pub use futures_util::future::{ lazy, Lazy, maybe_done, MaybeDone, pending, Pending, poll_fn, PollFn, ready, ok, err, Ready, select, Select, join, join3, join4, join5, Join, Join3, Join4, Join5, Either, OptionFuture, FutureExt, FlattenStream, Flatten, Fuse, Inspect, IntoStream, Map, Then, UnitError, NeverError, }; #[cfg(feature = "alloc")] pub use futures_util::future::{ join_all, JoinAll, select_all, SelectAll, }; #[cfg_attr( feature = "cfg-target-has-atomic", cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) )] #[cfg(feature = "alloc")] pub use futures_util::future::{ abortable, Abortable, AbortHandle, AbortRegistration, Aborted, }; #[cfg(feature = "std")] pub use futures_util::future::{ Remote, RemoteHandle, // For FutureExt: CatchUnwind, Shared, }; pub use futures_util::try_future::{ try_join, try_join3, try_join4, try_join5, TryJoin, TryJoin3, TryJoin4, TryJoin5, try_select, TrySelect, TryFutureExt, AndThen, ErrInto, FlattenSink, IntoFuture, MapErr, MapOk, OrElse, InspectOk, InspectErr, TryFlattenStream, UnwrapOrElse, }; #[cfg(feature = "alloc")] pub use futures_util::try_future::{ try_join_all, TryJoinAll, select_ok, SelectOk, }; } #[cfg(feature = "std")] pub mod io { //! Asynchronous I/O. //! //! This module is the asynchronous version of `std::io`. It defines two //! traits, [`AsyncRead`](crate::io::AsyncRead), //! [`AsyncWrite`](crate::io::AsyncWrite), //! [`AsyncSeek`](crate::io::AsyncSeek), and //! [`AsyncBufRead`](crate::io::AsyncBufRead), which mirror the `Read`, //! `Write`, `Seek`, and `BufRead` traits of the standard library. However, //! these traits integrate //! with the asynchronous task system, so that if an I/O object isn't ready //! for reading (or writing), the thread is not blocked, and instead the //! current task is queued to be woken when I/O is ready. //! //! In addition, the [`AsyncReadExt`](crate::io::AsyncReadExt), //! [`AsyncWriteExt`](crate::io::AsyncWriteExt), //! [`AsyncSeekExt`](crate::io::AsyncSeekExt), and //! [`AsyncBufReadExt`](crate::io::AsyncBufReadExt) extension traits offer a //! variety of useful combinators for operating with asynchronous I/O //! objects, including ways to work with them using futures, streams and //! sinks. //! //! This module is only available when the `std` feature of this //! library is activated, and it is activated by default. pub use futures_io::{ AsyncRead, AsyncWrite, AsyncSeek, AsyncBufRead, Error, ErrorKind, Initializer, IoSlice, IoSliceMut, Result, SeekFrom, }; pub use futures_util::io::{ AsyncReadExt, AsyncWriteExt, AsyncSeekExt, AsyncBufReadExt, AllowStdIo, BufReader, BufWriter, Chain, Close, CopyInto, CopyBufInto, Flush, IntoSink, Lines, Read, ReadExact, ReadHalf, ReadLine, ReadToEnd, ReadToString, ReadUntil, ReadVectored, Seek, Take, Window, Write, WriteAll, WriteHalf, WriteVectored, }; } #[cfg(feature = "std")] pub mod lock { //! Futures-powered synchronization primitives. //! //! This module is only available when the `std` feature of this //! library is activated, and it is activated by default. pub use futures_util::lock::{Mutex, MutexLockFuture, MutexGuard}; } pub mod prelude { //! A "prelude" for crates using the `futures` crate. //! //! This prelude is similar to the standard library's prelude in that you'll //! almost always want to import its entire contents, but unlike the //! standard library's prelude you'll have to do so manually: //! //! ``` //! # #[allow(unused_imports)] //! use futures::prelude::*; //! ``` //! //! The prelude may grow over time as additional items see ubiquitous use. pub use crate::future::{self, Future, TryFuture}; pub use crate::stream::{self, Stream, TryStream}; pub use crate::sink::{self, Sink}; #[doc(no_inline)] pub use crate::future::{FutureExt as _, TryFutureExt as _}; #[doc(no_inline)] pub use crate::stream::{StreamExt as _, TryStreamExt as _}; #[doc(no_inline)] pub use crate::sink::SinkExt as _; #[cfg(feature = "std")] pub use crate::io::{ AsyncRead, AsyncWrite, AsyncSeek, AsyncBufRead, }; #[cfg(feature = "std")] #[doc(no_inline)] pub use crate::io::{ AsyncReadExt as _, AsyncWriteExt as _, AsyncSeekExt as _, AsyncBufReadExt as _, }; } pub mod sink { //! Asynchronous sinks. //! //! This module contains: //! //! - The [`Sink` trait](crate::sink::Sink), which allows you to //! asynchronously write data. //! - The [`SinkExt`](crate::sink::SinkExt) trait, which provides adapters //! for chaining and composing sinks. pub use futures_sink::Sink; pub use futures_util::sink::{ Close, Flush, Send, SendAll, SinkErrInto, SinkMapErr, With, SinkExt, Fanout, Drain, drain, WithFlatMap, }; #[cfg(feature = "alloc")] pub use futures_util::sink::Buffer; } pub mod stream { //! Asynchronous streams. //! //! This module contains: //! //! - The [`Stream` trait](crate::stream::Stream), for objects that can //! asynchronously produce a sequence of values. //! - The [`StreamExt`](crate::stream::StreamExt) trait, which provides //! adapters for chaining and composing streams. //! - Top-level stream contructors like [`iter`](crate::stream::iter) //! which creates a stream from an iterator. pub use futures_core::stream::{ Stream, TryStream, FusedStream, }; #[cfg(feature = "alloc")] pub use futures_core::stream::{BoxStream, LocalBoxStream}; pub use futures_util::stream::{ iter, Iter, repeat, Repeat, empty, Empty, pending, Pending, once, Once, poll_fn, PollFn, select, Select, unfold, Unfold, StreamExt, Chain, Collect, Concat, Enumerate, Filter, FilterMap, Flatten, Fold, Forward, ForEach, Fuse, StreamFuture, Inspect, Map, Next, SelectNextSome, Peekable, Skip, SkipWhile, Take, TakeWhile, Then, Zip }; #[cfg(feature = "alloc")] pub use futures_util::stream::{ // For StreamExt: Chunks, }; #[cfg_attr( feature = "cfg-target-has-atomic", cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) )] #[cfg(feature = "alloc")] pub use futures_util::stream::{ FuturesOrdered, futures_unordered, FuturesUnordered, // For StreamExt: BufferUnordered, Buffered, ForEachConcurrent, SplitStream, SplitSink, ReuniteError, select_all, SelectAll, }; #[cfg(feature = "std")] pub use futures_util::stream::{ // For StreamExt: CatchUnwind, }; pub use futures_util::try_stream::{ TryStreamExt, AndThen, ErrInto, MapOk, MapErr, OrElse, InspectOk, InspectErr, TryNext, TryForEach, TryFilter, TryFilterMap, TryFlatten, TryCollect, TryConcat, TryFold, TrySkipWhile, IntoStream, }; #[cfg_attr( feature = "cfg-target-has-atomic", cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) )] #[cfg(feature = "alloc")] pub use futures_util::try_stream::{ // For TryStreamExt: TryBufferUnordered, TryForEachConcurrent, }; #[cfg(feature = "std")] pub use futures_util::try_stream::IntoAsyncRead; } pub mod task { //! Tools for working with tasks. //! //! This module contains: //! //! - [`Spawn`](crate::task::Spawn), a trait for spawning new tasks. //! - [`Context`](crate::task::Context), a context of an asynchronous task, //! including a handle for waking up the task. //! - [`Waker`](crate::task::Waker), a handle for waking up a task. //! //! The remaining types and traits in the module are used for implementing //! executors or dealing with synchronization issues around task wakeup. pub use futures_core::task::{ Context, Poll, Spawn, LocalSpawn, SpawnError, Waker, RawWaker, RawWakerVTable }; pub use futures_util::task::noop_waker; #[cfg(feature = "std")] pub use futures_util::task::noop_waker_ref; #[cfg(feature = "alloc")] pub use futures_util::task::{SpawnExt, LocalSpawnExt}; #[cfg_attr( feature = "cfg-target-has-atomic", cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) )] #[cfg(feature = "alloc")] pub use futures_util::task::{waker, waker_ref, WakerRef, ArcWake}; #[cfg_attr( feature = "cfg-target-has-atomic", cfg(all(target_has_atomic = "cas", target_has_atomic = "ptr")) )] pub use futures_util::task::AtomicWaker; } pub mod never { //! This module contains the `Never` type. //! //! Values of this type can never be created and will never exist. pub use futures_core::never::Never; } // proc-macro re-export -------------------------------------- #[cfg(feature = "std")] #[cfg(feature = "async-await")] #[doc(hidden)] pub use futures_util::rand_reexport; #[cfg(feature = "std")] #[cfg(feature = "async-await")] #[doc(hidden)] pub mod inner_macro { pub use futures_util::join; pub use futures_util::try_join; pub use futures_util::select; } #[cfg(feature = "std")] #[cfg(feature = "async-await")] futures_util::document_join_macro! { #[macro_export] macro_rules! join { // replace `::futures_util` with `::futures` as the crate path ($($tokens:tt)*) => { $crate::inner_macro::join! { futures_crate_path ( ::futures ) $( $tokens )* } } } #[macro_export] macro_rules! try_join { // replace `::futures_util` with `::futures` as the crate path ($($tokens:tt)*) => { $crate::inner_macro::try_join! { futures_crate_path ( ::futures ) $( $tokens )* } } } } #[cfg(feature = "std")] #[cfg(feature = "async-await")] futures_util::document_select_macro! { #[macro_export] macro_rules! select { // replace `::futures_util` with `::futures` as the crate path ($($tokens:tt)*) => { $crate::inner_macro::select! { futures_crate_path ( ::futures ) $( $tokens )* } } } }
33.389771
128
0.633689
dedfb3879490fb47e85697aec9dc49fd563d8545
7,482
use crate::runtime::task::RawTask; use std::fmt; use std::future::Future; use std::marker::PhantomData; use std::pin::Pin; use std::task::{Context, Poll}; cfg_rt! { /// An owned permission to join on a task (await its termination). /// /// This can be thought of as the equivalent of [`std::thread::JoinHandle`] for /// a task rather than a thread. /// /// A `JoinHandle` *detaches* the associated task when it is dropped, which /// means that there is no longer any handle to the task, and no way to `join` /// on it. /// /// This `struct` is created by the [`task::spawn`] and [`task::spawn_blocking`] /// functions. /// /// # Examples /// /// Creation from [`task::spawn`]: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<_> = task::spawn(async { /// // some work here /// }); /// # } /// ``` /// /// Creation from [`task::spawn_blocking`]: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<_> = task::spawn_blocking(|| { /// // some blocking work here /// }); /// # } /// ``` /// /// The generic parameter `T` in `JoinHandle<T>` is the return type of the spawned task. /// If the return value is an i32, the join handle has type `JoinHandle<i32>`: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<i32> = task::spawn(async { /// 5 + 3 /// }); /// # } /// /// ``` /// /// If the task does not have a return value, the join handle has type `JoinHandle<()>`: /// /// ``` /// use tokio::task; /// /// # async fn doc() { /// let join_handle: task::JoinHandle<()> = task::spawn(async { /// println!("I return nothing."); /// }); /// # } /// ``` /// /// Note that `handle.await` doesn't give you the return type directly. It is wrapped in a /// `Result` because panics in the spawned task are caught by Tokio. The `?` operator has /// to be double chained to extract the returned value: /// /// ``` /// use tokio::task; /// use std::io; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let join_handle: task::JoinHandle<Result<i32, io::Error>> = tokio::spawn(async { /// Ok(5 + 3) /// }); /// /// let result = join_handle.await??; /// assert_eq!(result, 8); /// Ok(()) /// } /// ``` /// /// If the task panics, the error is a [`JoinError`] that contains the panic: /// /// ``` /// use tokio::task; /// use std::io; /// use std::panic; /// /// #[tokio::main] /// async fn main() -> io::Result<()> { /// let join_handle: task::JoinHandle<Result<i32, io::Error>> = tokio::spawn(async { /// panic!("boom"); /// }); /// /// let err = join_handle.await.unwrap_err(); /// assert!(err.is_panic()); /// Ok(()) /// } /// /// ``` /// Child being detached and outliving its parent: /// /// ```no_run /// use tokio::task; /// use tokio::time; /// use std::time::Duration; /// /// # #[tokio::main] async fn main() { /// let original_task = task::spawn(async { /// let _detached_task = task::spawn(async { /// // Here we sleep to make sure that the first task returns before. /// time::sleep(Duration::from_millis(10)).await; /// // This will be called, even though the JoinHandle is dropped. /// println!("♫ Still alive ♫"); /// }); /// }); /// /// original_task.await.expect("The task being joined has panicked"); /// println!("Original task is joined."); /// /// // We make sure that the new task has time to run, before the main /// // task returns. /// /// time::sleep(Duration::from_millis(1000)).await; /// # } /// ``` /// /// [`task::spawn`]: crate::task::spawn() /// [`task::spawn_blocking`]: crate::task::spawn_blocking /// [`std::thread::JoinHandle`]: std::thread::JoinHandle /// [`JoinError`]: crate::task::JoinError pub struct JoinHandle<T> { raw: Option<RawTask>, _p: PhantomData<T>, } } unsafe impl<T: Send> Send for JoinHandle<T> {} unsafe impl<T: Send> Sync for JoinHandle<T> {} impl<T> JoinHandle<T> { pub(super) fn new(raw: RawTask) -> JoinHandle<T> { JoinHandle { raw: Some(raw), _p: PhantomData, } } /// Abort the task associated with the handle. /// /// Awaiting a cancelled task might complete as usual if the task was /// already completed at the time it was cancelled, but most likely it /// will complete with a `Err(JoinError::Cancelled)`. /// /// ```rust /// use tokio::time; /// /// #[tokio::main] /// async fn main() { /// let mut handles = Vec::new(); /// /// handles.push(tokio::spawn(async { /// time::sleep(time::Duration::from_secs(10)).await; /// true /// })); /// /// handles.push(tokio::spawn(async { /// time::sleep(time::Duration::from_secs(10)).await; /// false /// })); /// /// for handle in &handles { /// handle.abort(); /// } /// /// for handle in handles { /// assert!(handle.await.unwrap_err().is_cancelled()); /// } /// } /// ``` pub fn abort(&self) { if let Some(raw) = self.raw { raw.shutdown(); } } } impl<T> Unpin for JoinHandle<T> {} impl<T> Future for JoinHandle<T> { type Output = super::Result<T>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { let mut ret = Poll::Pending; // Keep track of task budget let coop = ready!(crate::coop::poll_proceed(cx)); // Raw should always be set. If it is not, this is due to polling after // completion let raw = self .raw .as_ref() .expect("polling after `JoinHandle` already completed"); // Try to read the task output. If the task is not yet complete, the // waker is stored and is notified once the task does complete. // // The function must go via the vtable, which requires erasing generic // types. To do this, the function "return" is placed on the stack // **before** calling the function and is passed into the function using // `*mut ()`. // // Safety: // // The type of `T` must match the task's output type. unsafe { raw.try_read_output(&mut ret as *mut _ as *mut (), cx.waker()); } if ret.is_ready() { coop.made_progress(); } ret } } impl<T> Drop for JoinHandle<T> { fn drop(&mut self) { if let Some(raw) = self.raw.take() { if raw.header().state.drop_join_handle_fast().is_ok() { return; } raw.drop_join_handle_slow(); } } } impl<T> fmt::Debug for JoinHandle<T> where T: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt.debug_struct("JoinHandle").finish() } }
28.666667
94
0.509489
0ae8d9c24d1f9a338abfc11db0a1d1e943177d13
549
fn main() { let mut cfg = cc::Build::new(); cfg.include("../external/musashi"); cfg.include("../external/musashi/generated"); let files = &[ "../external/musashi/m68kcpu.c", "../external/musashi/m68kdasm.c", "../external/musashi/generated/m68kopac.c", "../external/musashi/generated/m68kopdm.c", "../external/musashi/generated/m68kopnz.c", "../external/musashi/generated/m68kops.c", ]; for f in files.iter() { cfg.file(*f); } cfg.compile("libmusashi.a"); }
24.954545
51
0.571949
751ad89ba1dc3fd29d654e077e51e9ef7101512f
1,514
use vcell::VolatileCell; #[doc = r" Register block"] #[repr(C)] pub struct RegisterBlock { #[doc = "0x00 - Control Register"] pub ctrl: CTRL, #[doc = "0x04 - Input Selection Register"] pub inputsel: INPUTSEL, #[doc = "0x08 - Status Register"] pub status: STATUS, #[doc = "0x0c - Interrupt Enable Register"] pub ien: IEN, #[doc = "0x10 - Interrupt Flag Register"] pub if_: IF, #[doc = "0x14 - Interrupt Flag Set Register"] pub ifs: IFS, #[doc = "0x18 - Interrupt Flag Clear Register"] pub ifc: IFC, } #[doc = "Control Register"] pub struct CTRL { register: VolatileCell<u32>, } #[doc = "Control Register"] pub mod ctrl; #[doc = "Input Selection Register"] pub struct INPUTSEL { register: VolatileCell<u32>, } #[doc = "Input Selection Register"] pub mod inputsel; #[doc = "Status Register"] pub struct STATUS { register: VolatileCell<u32>, } #[doc = "Status Register"] pub mod status; #[doc = "Interrupt Enable Register"] pub struct IEN { register: VolatileCell<u32>, } #[doc = "Interrupt Enable Register"] pub mod ien; #[doc = "Interrupt Flag Register"] pub struct IF { register: VolatileCell<u32>, } #[doc = "Interrupt Flag Register"] pub mod if_; #[doc = "Interrupt Flag Set Register"] pub struct IFS { register: VolatileCell<u32>, } #[doc = "Interrupt Flag Set Register"] pub mod ifs; #[doc = "Interrupt Flag Clear Register"] pub struct IFC { register: VolatileCell<u32>, } #[doc = "Interrupt Flag Clear Register"] pub mod ifc;
24.419355
51
0.657199
1ac1f27499987816e44105775bd842012bb79b59
2,847
/// Geometry2D - Definition of Line use crate::geometry2d::point::*; #[derive(Debug, Clone, Copy)] pub struct Line(pub Point, pub Point); impl Line { pub fn new(x: Point, y: Point) -> Self { assert!(x != y); Self(x, y) } pub fn distance_from(&self, p: Point) -> f64 { let u = p - self.0; let v = self.1 - self.0; (u.det(&v) / v.norm()).abs() } } impl std::cmp::PartialEq for Line { fn eq(&self, other: &Line) -> bool { let a = Point::zero(); let b = Point(1.0, 0.0); let c = Point(0.0, 1.0); let eps = 1e-6; for p in &[a, b, c] { if (self.distance_from(*p) - other.distance_from(*p)).abs() > eps { return false; } } true } } impl std::cmp::Eq for Line {} #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct LineSegment(pub Point, pub Point); impl LineSegment { pub fn new(x: Point, y: Point) -> Self { assert!(x != y); Self(x, y) } pub fn to_line(&self) -> Line { Line(self.0, self.1) } } impl std::ops::Neg for LineSegment { type Output = Self; fn neg(self) -> Self { Self(self.1, self.0) } } #[macro_export] macro_rules! line { ($x0:expr, $y0:expr; $x1:expr, $y1:expr) => { Line::new(Point($x0, $y0), Point($x1, $y1)) }; ($x0:expr, $y0:expr => $x1:expr, $y1:expr) => { LineSegment::new(Point($x0, $y0), Point($x1, $y1)) }; ($a:expr; $b:expr) => { Line::new($a, $b) }; ($a:expr => $b:expr) => { LineSegment::new($a, $b) }; } #[cfg(test)] mod test_line { use crate::geometry2d::line::*; #[test] fn use_macro() { assert_eq!( line!(0.0, 0.0; 1.0, 1.0), Line::new(Point(0.0, 0.0), Point(1.0, 1.0)) ); assert_eq!( line!(0.0, 0.0 => 1.0, 1.0), LineSegment::new(Point(0.0, 0.0), Point(1.0, 1.0)) ); let p = Point(1.0, 2.0); let q = Point(2.0, -1.0); assert_eq!(line!(p; q), Line::new(p, q)); assert_eq!(line!(p => q), LineSegment::new(p, q)); } #[test] fn line_equality() { assert_eq!(line!(0.0, 0.0; 1.0, 1.0), line!(2.0, 2.0; -1.0, -1.0),); assert_eq!(line!(0.0, 0.0; 1.0, 1.0), line!(1.0, 1.0; 2.0, 2.0),); assert_ne!(line!(0.0, 0.0; 1.0, 1.0), line!(1.0, 1.0; 2.0, 2.01),); } #[test] fn line_segment_equality() { assert_eq!(line!(0.0, 0.0 => 1.0, 1.0), line!(0.0, 0.0 => 1.0, 1.0),); assert_ne!(line!(0.0, 0.0 => 1.0, 1.0), line!(0.0, 0.0 => 1.0, 1.01),); assert_ne!(line!(0.0, 1.0 => 1.0, 0.0), line!(1.0, 0.0 => 0.0, 1.0),); } #[test] fn arithmetic() { assert_eq!(line!(0.0, 1.0 => 1.0, 0.0), -line!(1.0, 0.0 => 0.0, 1.0),); } }
26.361111
79
0.465402
eb82fee6e5e9ad16f688ed3a583d7818247598d1
294
use ink_lang as ink; #[ink::contract] mod noop { #[ink(storage)] pub struct Noop {} impl Noop { #[ink(constructor)] pub fn self_mut_arg(&mut self) -> Self { Self {} } #[ink(message)] pub fn noop(&self) {} } } fn main() {}
14.7
48
0.47619
01d82b247140161ccf1c669794dc5bbff5e2375d
25,135
// Copyright 2019 The Fuchsia Authors. All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // // found in the LICENSE file. use { anyhow::Error, diagnostics_data::InspectData, diagnostics_hierarchy::{self, hierarchy, DiagnosticsHierarchy, InspectHierarchyMatcher}, difference::{ self, Difference::{Add, Rem, Same}, }, fidl_fuchsia_diagnostics::Selector, selectors, std::cmp::{max, min}, std::collections::HashSet, std::convert::TryInto, std::fs::read_to_string, std::io::{stdin, stdout, Write}, std::path::PathBuf, std::sync::Arc, structopt::StructOpt, termion::{ cursor, event::{Event, Key}, input::TermRead, raw::IntoRawMode, }, }; #[derive(Debug, StructOpt)] struct Options { #[structopt(short, long, help = "Inspect JSON file to read")] snapshot: String, #[structopt(subcommand)] command: Command, } #[derive(Debug, StructOpt)] enum Command { #[structopt(name = "generate")] Generate { #[structopt( short, name = "component", help = "Generate selectors for only this component" )] component_name: Option<String>, #[structopt(help = "The output file to generate")] selector_file: String, }, #[structopt(name = "apply")] Apply { #[structopt( short, name = "component", help = "Apply selectors from the provided selector_file for only this component" )] component_name: Option<String>, #[structopt(help = "The selector file to apply to the snapshot")] selector_file: String, }, } #[derive(Debug)] struct Line { value: String, removed: bool, } impl Line { fn new(s: impl ToString) -> Self { Self { value: s.to_string(), removed: false } } fn removed(s: impl ToString) -> Self { Self { value: s.to_string(), removed: true } } fn len(&self) -> usize { self.value.len() } } struct Output { lines: Vec<Line>, offset_top: usize, offset_left: usize, max_line_len: usize, filter_removed: bool, } impl Output { fn new(lines: Vec<Line>) -> Self { let max_line_len = lines.iter().map(|l| l.len()).max().unwrap_or(0); Output { lines, offset_top: 0, offset_left: 0, max_line_len, filter_removed: false } } fn set_lines(&mut self, lines: Vec<Line>) { self.max_line_len = lines.iter().map(|l| l.len()).max().unwrap_or(0); self.lines = lines; self.scroll(0, 0); } fn max_lines() -> i64 { let (_, h) = termion::terminal_size().unwrap(); h as i64 - 2 // Leave 2 lines for info. } fn refresh(&self, stdout: &mut impl Write) { let (w, h) = termion::terminal_size().unwrap(); let max_lines = Output::max_lines() as usize; self.lines .iter() .filter(|l| !self.filter_removed || !l.removed) .skip(self.offset_top) .take(max_lines) .enumerate() .for_each(|(i, line)| { if self.offset_left >= line.value.len() { return; } let end = min(line.value.len(), self.offset_left + w as usize); if line.removed { write!(stdout, "{}", termion::color::Fg(termion::color::Red)).unwrap(); } write!( stdout, "{}{}{}", termion::cursor::Goto(1, (i + 1) as u16), line.value[self.offset_left..end].to_string(), termion::color::Fg(termion::color::Reset), ) .unwrap(); }); write!( stdout, "{}------------------- T: {}/{}, L: {}/{}{}Controls: [Q]uit. [R]efresh. {} filtered data. Arrow keys scroll.", termion::cursor::Goto(1, h - 1), self.offset_top, self.visible_line_count(), self.offset_left, self.max_line_len, termion::cursor::Goto(1, h), if self.filter_removed { "S[h]ow" } else { "[H]ide" }, ) .unwrap(); } fn visible_line_count(&self) -> usize { self.lines.iter().filter(|l| !self.filter_removed || !l.removed).count() } fn scroll(&mut self, down: i64, right: i64) { let (w, h) = termion::terminal_size().unwrap(); self.offset_top = max(0, self.offset_top as i64 + down) as usize; self.offset_left = max(0, self.offset_left as i64 + right) as usize; self.offset_top = min(self.offset_top as i64, max(0, self.visible_line_count() as i64 - h as i64)) as usize; self.offset_left = min(self.offset_left as i64, max(0, self.max_line_len as i64 - w as i64)) as usize; } fn set_filter_removed(&mut self, val: bool) { if self.filter_removed == val { return; } self.filter_removed = val; if self.filter_removed { // Starting to filter, tweak offset_top to remove offsets from newly filtered lines. self.offset_top -= self.lines.iter().take(self.offset_top).filter(|l| l.removed).count() } else { // TODO: Fix this } } } fn filter_json_schema_by_selectors( mut schema: InspectData, selector_vec: &Vec<Arc<Selector>>, ) -> Option<InspectData> { // A failure here implies a malformed snapshot. We want to panic. let moniker = selectors::parse_path_to_moniker(&schema.moniker) .expect("Snapshot contained an unparsable path."); if schema.payload.is_none() { schema.payload = Some(hierarchy! { root: { "filter error": format!("Node hierarchy was missing for {}", schema.moniker), } }); return Some(schema); } let hierarchy = schema.payload.unwrap(); match selectors::match_component_moniker_against_selectors(&moniker, &selector_vec) { Ok(matched_selectors) => { if matched_selectors.is_empty() { return None; } let inspect_matcher: InspectHierarchyMatcher = (&matched_selectors).try_into().unwrap(); match diagnostics_hierarchy::filter_hierarchy(hierarchy, &inspect_matcher) { Ok(Some(filtered)) => { schema.payload = Some(filtered); Some(schema) } Ok(None) => { // Ok(None) implies the tree was fully filtered. This means that // it genuinely should not be included in the output. None } Err(e) => { schema.payload = Some(hierarchy! { root: { "filter error": format!( "Filtering the hierarchy of {}, an error occurred: {:?}", schema.moniker, e ), } }); Some(schema) } } } Err(e) => { schema.payload = Some(hierarchy! { root: { "filter error": format!( "Evaulating selectors for {} met an unexpected error condition: {:?}", schema.moniker, e ), } }); Some(schema) } } } /// Consumes a file containing Inspect selectors and applies them to an array of node hierarchies /// which had previously been serialized to their json schema. /// /// Returns a vector of Line printed diffs between the unfiltered and filtered hierarchies, /// or an Error. fn filter_data_to_lines( selector_file: &str, data: &[InspectData], requested_name_opt: &Option<String>, ) -> Result<Vec<Line>, Error> { let selector_vec: Vec<Arc<Selector>> = selectors::parse_selector_file(&PathBuf::from(selector_file))? .into_iter() .map(Arc::new) .collect(); // Filter the source data that we diff against to only contain the component // of interest. let mut diffable_source: Vec<InspectData> = match requested_name_opt { Some(requested_name) => data .into_iter() .cloned() .filter(|schema| { let moniker = selectors::parse_path_to_moniker(&schema.moniker) .expect("Snapshot contained an unparsable path."); let component_name = moniker .last() .expect("Monikers in provided data dumps are required to be non-empty."); requested_name == component_name }) .collect(), None => data.to_vec(), }; let mut filtered_node_hierarchies: Vec<InspectData> = diffable_source .clone() .into_iter() .filter_map(|schema| filter_json_schema_by_selectors(schema, &selector_vec)) .collect(); let moniker_cmp = |a: &InspectData, b: &InspectData| { a.moniker.partial_cmp(&b.moniker).expect("schema comparison") }; diffable_source.sort_by(moniker_cmp); filtered_node_hierarchies.sort_by(moniker_cmp); let sort_payload = |schema: &mut InspectData| match &mut schema.payload { Some(payload) => { payload.sort(); } _ => {} }; diffable_source.iter_mut().for_each(sort_payload); filtered_node_hierarchies.iter_mut().for_each(sort_payload); let orig_str = serde_json::to_string_pretty(&diffable_source).unwrap(); let new_str = serde_json::to_string_pretty(&filtered_node_hierarchies).unwrap(); let cs = difference::Changeset::new(&orig_str, &new_str, "\n"); // "Added" lines only appear when a property that was once in the middle of a // nested object, and thus ended its line with a comma, becomes the final property // in a node and thus loses the comma. The difference library doesn't expose edit distance // per-line, so we must instead track these "added" lines, and check if any of the "removed" // lines are one of the "added" lines with a comma on the end. let added_line_tracker: HashSet<&str> = cs.diffs.iter().fold(HashSet::new(), |mut acc, change| { if let Add(val) = change { acc.insert(val); } acc }); Ok(cs .diffs .iter() .map(|change| match change { Same(val) | Add(val) => val.split("\n").map(|l| Line::new(l)).collect::<Vec<Line>>(), Rem(val) => val .split("\n") .filter_map(|l| { let last_char_truncated: &str = &l[..l.len() - 1]; if !added_line_tracker.contains(last_char_truncated) { Some(Line::removed(l)) } else { None } }) .collect::<Vec<Line>>(), }) .flatten() .collect()) } fn generate_selectors<'a>( data: Vec<InspectData>, component_name: Option<String>, ) -> Result<String, Error> { struct MatchedHierarchy { moniker: Vec<String>, hierarchy: DiagnosticsHierarchy, } let matching_hierarchies: Vec<MatchedHierarchy> = data .into_iter() .filter_map(|schema| { let moniker = selectors::parse_path_to_moniker(&schema.moniker) .expect("Snapshot contained an unparsable moniker."); let component_name_matches = component_name.is_none() || component_name.as_ref().unwrap() == moniker .last() .expect("Monikers in provided data dumps are required to be non-empty."); schema.payload.and_then(|hierarchy| { if component_name_matches { Some(MatchedHierarchy { moniker, hierarchy }) } else { None } }) }) .collect(); let mut output: Vec<String> = vec![]; for matching_hierarchy in matching_hierarchies { let sanitized_moniker = matching_hierarchy .moniker .iter() .map(|s| selectors::sanitize_string_for_selectors(s)) .collect::<Vec<String>>() .join("/"); for (node_path, property_opt) in matching_hierarchy.hierarchy.property_iter() { match property_opt { Some(property) => { let formatted_node_path = node_path .iter() .map(|s| selectors::sanitize_string_for_selectors(s)) .collect::<Vec<String>>() .join("/"); let sanitized_property = selectors::sanitize_string_for_selectors(property.name()); output.push(format!( "{}:{}:{}", sanitized_moniker.clone(), formatted_node_path, sanitized_property )); } None => { continue; } } } } // DiagnosticsHierarchy has an intentionally non-deterministic iteration order, but for client // facing tools we'll want to sort the outputs. output.sort(); Ok(output.join("\n")) } fn interactive_apply( data: Vec<InspectData>, selector_file: &str, component_name: Option<String>, ) -> Result<(), Error> { let stdin = stdin(); let mut stdout = stdout().into_raw_mode().unwrap(); let mut output = Output::new(filter_data_to_lines(&selector_file, &data, &component_name)?); write!(stdout, "{}{}{}", cursor::Restore, cursor::Hide, termion::clear::All).unwrap(); output.refresh(&mut stdout); stdout.flush().unwrap(); for c in stdin.events() { let evt = c.unwrap(); match evt { Event::Key(Key::Char('q')) => break, Event::Key(Key::Char('h')) => output.set_filter_removed(!output.filter_removed), Event::Key(Key::Char('r')) => { output.set_lines(vec![Line::new("Refreshing filtered hierarchies...")]); write!(stdout, "{}", termion::clear::All).unwrap(); output.refresh(&mut stdout); stdout.flush().unwrap(); output.set_lines(filter_data_to_lines(&selector_file, &data, &component_name)?) } Event::Key(Key::PageUp) => { output.scroll(-Output::max_lines(), 0); } Event::Key(Key::PageDown) => { output.scroll(Output::max_lines(), 0); } Event::Key(Key::Up) => { output.scroll(-1, 0); } Event::Key(Key::Down) => { output.scroll(1, 0); } Event::Key(Key::Left) => { output.scroll(0, -1); } Event::Key(Key::Right) => { output.scroll(0, 1); } e => { println!("{:?}", e); } } write!(stdout, "{}", termion::clear::All).unwrap(); output.refresh(&mut stdout); stdout.flush().unwrap(); } write!(stdout, "{}{}{}", cursor::Restore, cursor::Show, termion::clear::All,).unwrap(); stdout.flush().unwrap(); Ok(()) } fn main() -> Result<(), Error> { let opts = Options::from_args(); let filename = &opts.snapshot; let data: Vec<InspectData> = serde_json::from_str( &read_to_string(filename).expect(&format!("Failed to read {} ", filename)), ) .expect(&format!("Failed to parse {} as JSON", filename)); match opts.command { Command::Generate { selector_file, component_name } => { std::fs::write( &selector_file, generate_selectors(data, component_name) .expect(&format!("failed to generate selectors")), )?; } Command::Apply { selector_file, component_name } => { interactive_apply(data, &selector_file, component_name)?; } } Ok(()) } #[cfg(test)] mod tests { use super::*; use tempfile; #[test] fn generate_selectors_test() { let schemas: Vec<InspectData> = serde_json::from_value(get_v1_json_dump()).expect("schemas"); let named_selector_string = generate_selectors(schemas.clone(), Some("account_manager.cmx".to_string())) .expect("Generating selectors with matching name should succeed."); let expected_named_selector_string = "\ realm1/realm2/session5/account_manager.cmx:root/accounts:active\n\ realm1/realm2/session5/account_manager.cmx:root/accounts:total\n\ realm1/realm2/session5/account_manager.cmx:root/auth_providers:types\n\ realm1/realm2/session5/account_manager.cmx:root/listeners:active\n\ realm1/realm2/session5/account_manager.cmx:root/listeners:events\n\ realm1/realm2/session5/account_manager.cmx:root/listeners:total_opened"; assert_eq!(named_selector_string, expected_named_selector_string); assert_eq!( generate_selectors(schemas.clone(), Some("bloop.cmx".to_string())) .expect("Generating selectors with unmatching name should succeed"), "" ); assert_eq!( generate_selectors(schemas, None) .expect("Generating selectors with no name should succeed"), expected_named_selector_string ); } fn setup_and_run_selector_filtering( selector_string: &str, source_hierarchy: serde_json::Value, golden_json: serde_json::Value, requested_component: Option<String>, ) { let mut selector_path = tempfile::NamedTempFile::new().expect("Creating tmp selector file should succeed."); selector_path .write_all(selector_string.as_bytes()) .expect("writing selectors to file should be fine..."); let schemas: Vec<InspectData> = serde_json::from_value(source_hierarchy).expect("load schemas"); let filtered_data_string = filter_data_to_lines( &selector_path.path().to_string_lossy(), &schemas, &requested_component, ) .expect("filtering hierarchy should have succeeded.") .into_iter() .filter(|line| !line.removed) .fold(String::new(), |mut acc, line| { acc.push_str(&line.value); acc }); let filtered_json_value: serde_json::Value = serde_json::from_str(&filtered_data_string) .expect(&format!( "Resultant json dump should be parsable json: {}", filtered_data_string )); assert_eq!(filtered_json_value, golden_json); } #[test] fn trailing_comma_diff_test() { let trailing_comma_hierarchy = serde_json::json!( [ { "data_source": "Inspect", "metadata": { "errors": null, "filename": "fuchsia.inspect.Tree", "component_url": "fuchsia-pkg://fuchsia.com/blooper#meta/blooper.cmx", "timestamp": 0 }, "moniker": "blooper.cmx", "payload": { "root": { "a": { "b": 0, "c": 1 } } }, "version": 1 } ] ); let selector = "blooper.cmx:root/a:b"; let mut selector_path = tempfile::NamedTempFile::new().expect("Creating tmp selector file should succeed."); selector_path .write_all(selector.as_bytes()) .expect("writing selectors to file should be fine..."); let mut schemas: Vec<InspectData> = serde_json::from_value(trailing_comma_hierarchy).expect("ok"); for schema in schemas.iter_mut() { if let Some(hierarchy) = &mut schema.payload { hierarchy.sort(); } } let filtered_data_string = filter_data_to_lines( &selector_path.path().to_string_lossy(), &schemas, &Some("blooper.cmx".to_string()), ) .expect("filtering hierarchy should succeed."); let removed_lines = filtered_data_string.iter().fold(HashSet::new(), |mut acc, line| { if line.removed { eprintln!("line removed bloop:{}", line.value.clone()); acc.insert(line.value.clone()); } acc }); assert!(removed_lines.len() == 1); assert!(removed_lines.contains(&r#" "c": 1"#.to_string())); } #[test] fn v1_filter_data_to_lines_test() { let full_tree_selector = "*/realm2/session5/account_manager.cmx:root/accounts:active realm1/realm*/sessio*/account_manager.cmx:root/accounts:total realm1/realm2/session5/account_manager.cmx:root/auth_providers:types realm1/realm2/session5/account_manager.cmx:root/listeners:active realm1/realm2/session5/account_*:root/listeners:events realm1/realm2/session5/account_manager.cmx:root/listeners:total_opened"; setup_and_run_selector_filtering( full_tree_selector, get_v1_json_dump(), get_v1_json_dump(), None, ); setup_and_run_selector_filtering( full_tree_selector, get_v1_json_dump(), get_v1_json_dump(), Some("account_manager.cmx".to_string()), ); let single_value_selector = "realm1/realm2/session5/account_manager.cmx:root/accounts:active"; setup_and_run_selector_filtering( single_value_selector, get_v1_json_dump(), get_v1_single_value_json(), None, ); setup_and_run_selector_filtering( single_value_selector, get_v1_json_dump(), get_v1_single_value_json(), Some("account_manager.cmx".to_string()), ); setup_and_run_selector_filtering( single_value_selector, get_v1_json_dump(), get_empty_value_json(), Some("bloop.cmx".to_string()), ); } fn get_v1_json_dump() -> serde_json::Value { serde_json::json!( [ { "data_source":"Inspect", "metadata":{ "errors":null, "filename":"fuchsia.inspect.Tree", "component_url": "fuchsia-pkg://fuchsia.com/account#meta/account_manager.cmx", "timestamp":0 }, "moniker":"realm1/realm2/session5/account_manager.cmx", "payload":{ "root": { "accounts": { "active": 0, "total": 0 }, "auth_providers": { "types": "google" }, "listeners": { "active": 1, "events": 0, "total_opened": 1 } } }, "version":1 } ] ) } fn get_v1_single_value_json() -> serde_json::Value { serde_json::json!( [ { "data_source":"Inspect", "metadata":{ "errors":null, "filename":"fuchsia.inspect.Tree", "component_url": "fuchsia-pkg://fuchsia.com/account#meta/account_manager.cmx", "timestamp":0 }, "moniker":"realm1/realm2/session5/account_manager.cmx", "payload":{ "root": { "accounts": { "active": 0 } } }, "version":1 } ] ) } fn get_empty_value_json() -> serde_json::Value { serde_json::json!([]) } }
33.829071
122
0.519435
75ebe814f5c2890a23788764791ab0da36e8b645
7,897
use std::vec; use std::slice; use std::borrow::Borrow; use std::default::Default; use std::hash::Hash; use std::hash::Hasher; use std::iter::FromIterator; use std::iter::IntoIterator; use std::ops::Index; use std::ops::IndexMut; use std::ops::Deref; use std::ops::DerefMut; use std::cmp::Ordering; use std::fmt; use clear::Clear; pub struct RepeatedField<T> { vec: Vec<T>, len: usize, } impl<T> RepeatedField<T> { #[inline] fn len(&self) -> usize { self.len } #[inline] pub fn clear(&mut self) { self.len = 0; } } impl<T> Clear for RepeatedField<T> { #[inline] fn clear(&mut self) { self.len = 0; } } impl<T> Default for RepeatedField<T> { #[inline] fn default() -> RepeatedField<T> { RepeatedField { vec: Vec::new(), len: 0, } } } impl<T> RepeatedField<T> { #[inline] pub fn new() -> RepeatedField<T> { Default::default() } #[inline] pub fn from_vec(vec: Vec<T>) -> RepeatedField<T> { let len = vec.len(); RepeatedField { vec: vec, len: len, } } #[inline] pub fn into_vec(self) -> Vec<T> { let mut vec = self.vec; vec.truncate(self.len); vec } #[inline] pub fn capacity(&self) -> usize { self.vec.capacity() } #[inline] pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] { &mut self.vec[..self.len] } #[inline] pub fn slice<'a>(&'a self, start: usize, end: usize) -> &'a [T] { &self.as_ref()[start..end] } #[inline] pub fn slice_mut<'a>(&'a mut self, start: usize, end: usize) -> &'a mut [T] { &mut self.as_mut_slice()[start..end] } #[inline] pub fn slice_from<'a>(&'a self, start: usize) -> &'a [T] { &self.as_ref()[start..] } #[inline] pub fn slice_from_mut<'a>(&'a mut self, start: usize) -> &'a mut [T] { &mut self.as_mut_slice()[start..] } #[inline] pub fn slice_to<'a>(&'a self, end: usize) -> &'a [T] { &self.as_ref()[..end] } #[inline] pub fn slice_to_mut<'a>(&'a mut self, end: usize) -> &'a mut [T] { &mut self.as_mut_slice()[..end] } #[inline] pub fn split_at<'a>(&'a self, mid: usize) -> (&'a [T], &'a [T]) { self.as_ref().split_at(mid) } #[inline] pub fn split_at_mut<'a>(&'a mut self, mid: usize) -> (&'a mut [T], &'a mut [T]) { self.as_mut_slice().split_at_mut(mid) } #[inline] pub fn tail<'a>(&'a self) -> &'a [T] { &self.as_ref()[1..] } #[inline] pub fn last<'a>(&'a self) -> Option<&'a T> { self.as_ref().last() } #[inline] pub fn last_mut<'a>(&'a mut self) -> Option<&'a mut T> { self.as_mut_slice().last_mut() } #[inline] pub fn init<'a>(&'a self) -> &'a [T] { let s = self.as_ref(); &s[0..s.len() - 1] } #[inline] pub fn push(&mut self, value: T) { if self.len == self.vec.len() { self.vec.push(value); } else { self.vec[self.len] = value; } self.len += 1; } #[inline] pub fn pop(&mut self) -> Option<T> { if self.len == 0 { None } else { self.vec.truncate(self.len); self.len -= 1; self.vec.pop() } } #[inline] pub fn insert(&mut self, index: usize, value: T) { assert!(index <= self.len); self.vec.insert(index, value); self.len += 1; } #[inline] pub fn remove(&mut self, index: usize) -> T { assert!(index < self.len); self.len -= 1; self.vec.remove(index) } #[inline] pub fn truncate(&mut self, len: usize) { if self.len > len { self.len = len; } } #[inline] pub fn reverse(&mut self) { self.as_mut_slice().reverse() } #[inline] pub fn into_iter(mut self) -> vec::IntoIter<T> { self.vec.truncate(self.len); self.vec.into_iter() } #[inline] pub fn iter<'a>(&'a self) -> slice::Iter<'a, T> { self.as_ref().iter() } #[inline] pub fn iter_mut<'a>(&'a mut self) -> slice::IterMut<'a, T> { self.as_mut_slice().iter_mut() } #[inline] pub fn sort_by<F>(&mut self, compare: F) where F: Fn(&T, &T) -> Ordering { self.as_mut_slice().sort_by(compare) } #[inline] pub fn as_ptr(&self) -> *const T { self.vec.as_ptr() } #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self.vec.as_mut_ptr() } } impl<T : Default+Clear> RepeatedField<T> { pub fn push_default<'a>(&'a mut self) -> &'a mut T { if self.len == self.vec.len() { self.vec.push(Default::default()); } else { self.vec[self.len].clear(); } self.len += 1; self.last_mut().unwrap() } } impl<T : Clone> RepeatedField<T> { // TODO: implement to_vec() #[inline] pub fn from_slice(values: &[T]) -> RepeatedField<T> { RepeatedField::from_vec(values.to_vec()) } } impl<T : Clone> Clone for RepeatedField<T> { #[inline] fn clone(&self) -> RepeatedField<T> { RepeatedField { vec: self.to_vec(), len: self.len(), } } } impl<T> FromIterator<T> for RepeatedField<T> { #[inline] fn from_iter<I : IntoIterator<Item = T>>(iter: I) -> RepeatedField<T> { RepeatedField::from_vec(FromIterator::from_iter(iter)) } } impl<'a, T> IntoIterator for &'a RepeatedField<T> { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; fn into_iter(self) -> slice::Iter<'a, T> { self.iter() } } impl<T : PartialEq> PartialEq for RepeatedField<T> { #[inline] fn eq(&self, other: &RepeatedField<T>) -> bool { self.as_ref() == other.as_ref() } } impl<T : Eq> Eq for RepeatedField<T> {} impl<T : PartialEq> RepeatedField<T> { #[inline] pub fn contains(&self, value: &T) -> bool { self.as_ref().contains(value) } } impl<T : Hash> Hash for RepeatedField<T> { fn hash<H : Hasher>(&self, state: &mut H) { self.as_ref().hash(state); } } impl<T> AsRef<[T]> for RepeatedField<T> { #[inline] fn as_ref<'a>(&'a self) -> &'a [T] { &self.vec[..self.len] } } impl<T> Borrow<[T]> for RepeatedField<T> { #[inline] fn borrow(&self) -> &[T] { &self.vec[..self.len] } } impl<T> Deref for RepeatedField<T> { type Target = [T]; #[inline] fn deref(&self) -> &[T] { &self.vec[..self.len] } } impl<T> DerefMut for RepeatedField<T> { #[inline] fn deref_mut(&mut self) -> &mut [T] { &mut self.vec[..self.len] } } impl<T> Index<usize> for RepeatedField<T> { type Output = T; #[inline] fn index<'a>(&'a self, index: usize) -> &'a T { &self.as_ref()[index] } } impl<T> IndexMut<usize> for RepeatedField<T> { #[inline] fn index_mut<'a>(&'a mut self, index: usize) -> &'a mut T { &mut self.as_mut_slice()[index] } } impl<T : fmt::Debug> fmt::Debug for RepeatedField<T> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_ref().fmt(f) } } #[cfg(test)] mod test { use super::RepeatedField; #[test] fn as_mut_slice() { let mut v = RepeatedField::new(); v.push(10); v.push(20); v.clear(); assert_eq!(v.as_mut_slice(), &mut []); v.push(30); assert_eq!(v.as_mut_slice(), &mut [30]); } #[test] fn push_default() { let mut v = RepeatedField::new(); v.push("aa".to_string()); v.push("bb".to_string()); v.clear(); assert_eq!("".to_string(), *v.push_default()); } }
21.459239
85
0.508041
cc1820ead64a8f664684810b7f578a97a4cb0a81
2,427
use std::fmt::{self, Display, Formatter}; use std::time::{Duration, Instant}; use duration_ext::DurationExt; #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct FpsCounter { pub interval: Duration, frame_accum: u64, last_time: Instant, } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct FpsStats { pub frame_accum: u64, pub interval: Duration, } impl FpsCounter { pub fn with_interval(interval: Duration) -> Self { Self { interval, frame_accum: 0, last_time: Instant::now(), } } pub fn add_frame(&mut self) { self.frame_accum += 1; } pub fn try_sampling_fps(&mut self) -> Option<FpsStats> { debug_assert_ne!(self.interval, Duration::default(), "Sampling over an empty interval will yield incorrect results!"); if self.last_time.elapsed() < self.interval { return None; } let fps_stats = FpsStats { frame_accum: self.frame_accum, interval: self.interval, }; self.last_time += self.interval; self.frame_accum = 0; Some(fps_stats) } } impl FpsStats { pub fn fps(&self) -> f64 { self.frame_accum as f64 / self.interval.to_f64_seconds() } } impl Display for FpsStats { fn fmt(&self, f: &mut Formatter) -> fmt::Result { write!(f, concat!("{} frames under {} seconds = ", "{} milliseconds/frame = ", "{} FPS"), self.frame_accum, self.interval.to_f64_seconds(), 1000. / self.fps(), self.fps() ) } } #[derive(Debug, Copy, Clone, PartialEq)] pub struct FpsManager { pub fps_counter: FpsCounter, pub desired_fps_ceil: f64, pub enable_fixing_broken_vsync: bool, } impl FpsManager { pub fn end_main_loop_iteration(&mut self, fps_ceil: &mut Option<f64>) -> Option<FpsStats> { self.fps_counter.add_frame(); if let Some(stats) = self.fps_counter.try_sampling_fps() { trace!("Main: New FPS stats: {}", &stats); if stats.fps() > self.desired_fps_ceil && self.enable_fixing_broken_vsync { warn!("Main: Broken VSync detected; FPS ceil is now set to {}", self.desired_fps_ceil); *fps_ceil = Some(self.desired_fps_ceil); } Some(stats) } else { None } } }
27.579545
126
0.583024
ac530c63c3ac2b361e5b961958fa22a59527a6da
594
extern crate bio; use bio::seq::min_indices; use bio::strings::gc_skew; /// Minimum Skew Problem: Find a position in a genome where the skew diagram attains a minimum. /// Input: A DNA string Genome. /// Output: All integer(s) i minimizing Skewi (Genome) among all values of i (from 0 to |Genome|). fn main() { let mut genome = String::new(); bio::io::read_line(&mut genome); let skew = gc_skew(&genome); let mut inds = min_indices(skew); // grading program expect 1-based indexing for i in inds.iter_mut() { *i += 1; } bio::io::print_vec(&inds); }
25.826087
98
0.648148
4855fa4a204a77a6bb0a13e034c9901180c4957b
11,764
pub mod entry; mod allocation; mod allocator; mod layer; use std::num::NonZeroU32; pub use allocation::Allocation; pub use entry::Entry; pub use layer::Layer; use allocator::Allocator; pub const SIZE: u32 = 2048; #[derive(Debug)] pub struct Atlas { texture: wgpu::Texture, texture_view: wgpu::TextureView, layers: Vec<Layer>, } impl Atlas { pub fn new(device: &wgpu::Device) -> Self { let extent = wgpu::Extent3d { width: SIZE, height: SIZE, depth_or_array_layers: 1, }; let texture = device.create_texture(&wgpu::TextureDescriptor { label: Some("iced_wgpu::image texture atlas"), size: extent, mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, format: wgpu::TextureFormat::Bgra8UnormSrgb, usage: wgpu::TextureUsage::COPY_DST | wgpu::TextureUsage::COPY_SRC | wgpu::TextureUsage::SAMPLED, }); let texture_view = texture.create_view(&wgpu::TextureViewDescriptor { dimension: Some(wgpu::TextureViewDimension::D2Array), ..Default::default() }); Atlas { texture, texture_view, layers: vec![Layer::Empty], } } pub fn view(&self) -> &wgpu::TextureView { &self.texture_view } pub fn layer_count(&self) -> usize { self.layers.len() } pub fn upload( &mut self, width: u32, height: u32, data: &[u8], device: &wgpu::Device, encoder: &mut wgpu::CommandEncoder, ) -> Option<Entry> { use wgpu::util::DeviceExt; let entry = { let current_size = self.layers.len(); let entry = self.allocate(width, height)?; // We grow the internal texture after allocating if necessary let new_layers = self.layers.len() - current_size; self.grow(new_layers, device, encoder); entry }; log::info!("Allocated atlas entry: {:?}", entry); // It is a webgpu requirement that: // BufferCopyView.layout.bytes_per_row % wgpu::COPY_BYTES_PER_ROW_ALIGNMENT == 0 // So we calculate padded_width by rounding width up to the next // multiple of wgpu::COPY_BYTES_PER_ROW_ALIGNMENT. let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT; let padding = (align - (4 * width) % align) % align; let padded_width = (4 * width + padding) as usize; let padded_data_size = padded_width * height as usize; let mut padded_data = vec![0; padded_data_size]; for row in 0..height as usize { let offset = row * padded_width; padded_data[offset..offset + 4 * width as usize].copy_from_slice( &data[row * 4 * width as usize..(row + 1) * 4 * width as usize], ) } let buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("iced_wgpu::image staging buffer"), contents: &padded_data, usage: wgpu::BufferUsage::COPY_SRC, }); match &entry { Entry::Contiguous(allocation) => { self.upload_allocation( &buffer, width, height, padding, 0, &allocation, encoder, ); } Entry::Fragmented { fragments, .. } => { for fragment in fragments { let (x, y) = fragment.position; let offset = (y * padded_width as u32 + 4 * x) as usize; self.upload_allocation( &buffer, width, height, padding, offset, &fragment.allocation, encoder, ); } } } log::info!("Current atlas: {:?}", self); Some(entry) } pub fn remove(&mut self, entry: &Entry) { log::info!("Removing atlas entry: {:?}", entry); match entry { Entry::Contiguous(allocation) => { self.deallocate(allocation); } Entry::Fragmented { fragments, .. } => { for fragment in fragments { self.deallocate(&fragment.allocation); } } } } fn allocate(&mut self, width: u32, height: u32) -> Option<Entry> { // Allocate one layer if texture fits perfectly if width == SIZE && height == SIZE { let mut empty_layers = self .layers .iter_mut() .enumerate() .filter(|(_, layer)| layer.is_empty()); if let Some((i, layer)) = empty_layers.next() { *layer = Layer::Full; return Some(Entry::Contiguous(Allocation::Full { layer: i })); } self.layers.push(Layer::Full); return Some(Entry::Contiguous(Allocation::Full { layer: self.layers.len() - 1, })); } // Split big textures across multiple layers if width > SIZE || height > SIZE { let mut fragments = Vec::new(); let mut y = 0; while y < height { let height = std::cmp::min(height - y, SIZE); let mut x = 0; while x < width { let width = std::cmp::min(width - x, SIZE); let allocation = self.allocate(width, height)?; if let Entry::Contiguous(allocation) = allocation { fragments.push(entry::Fragment { position: (x, y), allocation, }); } x += width; } y += height; } return Some(Entry::Fragmented { size: (width, height), fragments, }); } // Try allocating on an existing layer for (i, layer) in self.layers.iter_mut().enumerate() { match layer { Layer::Empty => { let mut allocator = Allocator::new(SIZE); if let Some(region) = allocator.allocate(width, height) { *layer = Layer::Busy(allocator); return Some(Entry::Contiguous(Allocation::Partial { region, layer: i, })); } } Layer::Busy(allocator) => { if let Some(region) = allocator.allocate(width, height) { return Some(Entry::Contiguous(Allocation::Partial { region, layer: i, })); } } _ => {} } } // Create new layer with atlas allocator let mut allocator = Allocator::new(SIZE); if let Some(region) = allocator.allocate(width, height) { self.layers.push(Layer::Busy(allocator)); return Some(Entry::Contiguous(Allocation::Partial { region, layer: self.layers.len() - 1, })); } // We ran out of memory (?) None } fn deallocate(&mut self, allocation: &Allocation) { log::info!("Deallocating atlas: {:?}", allocation); match allocation { Allocation::Full { layer } => { self.layers[*layer] = Layer::Empty; } Allocation::Partial { layer, region } => { let layer = &mut self.layers[*layer]; if let Layer::Busy(allocator) = layer { allocator.deallocate(region); if allocator.is_empty() { *layer = Layer::Empty; } } } } } fn upload_allocation( &mut self, buffer: &wgpu::Buffer, image_width: u32, image_height: u32, padding: u32, offset: usize, allocation: &Allocation, encoder: &mut wgpu::CommandEncoder, ) { let (x, y) = allocation.position(); let (width, height) = allocation.size(); let layer = allocation.layer(); let extent = wgpu::Extent3d { width, height, depth_or_array_layers: 1, }; encoder.copy_buffer_to_texture( wgpu::ImageCopyBuffer { buffer, layout: wgpu::ImageDataLayout { offset: offset as u64, bytes_per_row: NonZeroU32::new(4 * image_width + padding), rows_per_image: NonZeroU32::new(image_height), }, }, wgpu::ImageCopyTexture { texture: &self.texture, mip_level: 0, origin: wgpu::Origin3d { x, y, z: layer as u32, }, }, extent, ); } fn grow( &mut self, amount: usize, device: &wgpu::Device, encoder: &mut wgpu::CommandEncoder, ) { if amount == 0 { return; } let new_texture = device.create_texture(&wgpu::TextureDescriptor { label: Some("iced_wgpu::image texture atlas"), size: wgpu::Extent3d { width: SIZE, height: SIZE, depth_or_array_layers: self.layers.len() as u32, }, mip_level_count: 1, sample_count: 1, dimension: wgpu::TextureDimension::D2, format: wgpu::TextureFormat::Bgra8UnormSrgb, usage: wgpu::TextureUsage::COPY_DST | wgpu::TextureUsage::COPY_SRC | wgpu::TextureUsage::SAMPLED, }); let amount_to_copy = self.layers.len() - amount; for (i, layer) in self.layers.iter_mut().take(amount_to_copy).enumerate() { if layer.is_empty() { continue; } encoder.copy_texture_to_texture( wgpu::ImageCopyTexture { texture: &self.texture, mip_level: 0, origin: wgpu::Origin3d { x: 0, y: 0, z: i as u32, }, }, wgpu::ImageCopyTexture { texture: &new_texture, mip_level: 0, origin: wgpu::Origin3d { x: 0, y: 0, z: i as u32, }, }, wgpu::Extent3d { width: SIZE, height: SIZE, depth_or_array_layers: 1, }, ); } self.texture = new_texture; self.texture_view = self.texture.create_view(&wgpu::TextureViewDescriptor { dimension: Some(wgpu::TextureViewDimension::D2Array), ..Default::default() }); } }
29.857868
90
0.454692
e2ed023af8e42a26b75f901336564ea3c704dc50
17,366
//! ## feature //! //! * `with-tokio` - default feature run on `tokio` runtime. //! * `with-async-std` - run on `smol` runtime. //! * `with-ntex` - run on `ntex` and `actix` runtime //! //! # Example: //!```ignore //!use std::time::Duration; //! //!use futures_util::TryStreamExt; //!use tokio_postgres_tang::{Builder, PostgresPoolError, PostgresManager}; //! //!#[tokio::main] //!async fn main() -> std::io::Result<()> { //! let db_url = "postgres://postgres:123@localhost/test"; //! //! // setup manager //! let mgr = //! PostgresManager::new_from_stringlike( //! db_url, //! tokio_postgres::NoTls, //! ).unwrap_or_else(|_| panic!("can't make postgres manager")); //! //! //make prepared statements to speed up frequent used queries. It just stores your statement info in a hash map and //! //you can skip this step if you don't need any prepared statement. //! let mgr = mgr //! // alias is used to call according statement later. //! // pass &[tokio_postgres::types::Type] if you want typed statement. pass &[] for no typed statement. //! .prepare_statement("get_topics", "SELECT * FROM topics WHERE id=ANY($1)", &[tokio_postgres::types::Type::OID_ARRAY]) //! .prepare_statement("get_users", "SELECT * FROM posts WHERE id=ANY($1)", &[]); //! //! // make pool //! let pool = Builder::new() //! .always_check(false) // if set true every connection will be checked before checkout. //! .idle_timeout(None) // set idle_timeout and max_lifetime both to None to ignore idle connection drop. //! .max_lifetime(Some(Duration::from_secs(30 * 60))) //! .connection_timeout(Duration::from_secs(5)) // set the timeout when connection to database(used when establish new connection and doing always_check). //! .wait_timeout(Duration::from_secs(5)) // set the timeout when waiting for a connection. //! .min_idle(1) //! .max_size(12) //! .build(mgr) //! .await //! .unwrap_or_else(|_| panic!("can't make pool")); //! //! // wait a bit as the pool spawn connections asynchronously //! tokio::timer::delay(std::time::Instant::now() + std::time::Duration::from_secs(1)).await; //! //! // get a pool ref //! let pool_ref = pool.get().await.expect("can't get pool ref"); //! //! // deref or derefmut to get connection. //! let (client, statements) = &*pool_ref; //! //! /* //! It's possible to insert new statement into statements from pool_ref. //! But be ware the statement will only work on this specific connection and not other connections in the pool. //! The additional statement will be dropped when the connection is dropped from pool. //! A newly spawned connection will not include this additional statement. //! //! * This newly inserted statement most likely can't take advantage of the pipeline query features //! as we didn't join futures when prepare this statement. //! //! * It's suggested that if you want pipelined statements you should join the futures of prepare before calling await on them. //! There is tang_rs::CacheStatement trait for PoolRef<PostgresManager<T>> to help you streamline this operation. //! */ //! //! // use the alias input when building manager to get specific statement. //! let statement = statements.get("get_topics").unwrap(); //! let rows = client.query(statement, &[]).await.expect("Query failed"); //! //! // drop the pool ref to return connection to pool //! drop(pool_ref); //! //! Ok(()) //!} //!``` use std::collections::HashMap; use std::fmt; use std::future::Future; use std::pin::Pin; use std::str::FromStr; use std::sync::RwLock; use std::time::Duration; use futures_util::{future::join_all, TryFutureExt}; use tokio_postgres::Client; #[cfg(not(feature = "with-async-std"))] use tokio_postgres::{ tls::{MakeTlsConnect, TlsConnect}, types::Type, Config, Error, Socket, Statement, }; pub use tang_rs::{Builder, Pool, PoolRef, PoolRefOwned}; use tang_rs::{ GarbageCollect, Manager, ManagerFuture, ManagerInterval, ManagerTimeout, ScheduleReaping, SharedManagedPool, }; #[cfg(feature = "with-async-std")] use { async_postgres::{ tls::{MakeTlsConnect, TlsConnect}, types::Type, Config, Error, Socket, Statement, }, async_std::prelude::StreamExt, }; pub struct PostgresManager<Tls> where Tls: MakeTlsConnect<Socket>, { config: Config, tls: Tls, prepares: RwLock<PreparedHashMap>, } impl<Tls> PostgresManager<Tls> where Tls: MakeTlsConnect<Socket>, { /// Create a new `PostgresManager` with the specified `config`. /// prepared statements can be passed when connecting to speed up frequent used queries. pub fn new(config: Config, tls: Tls) -> PostgresManager<Tls> { PostgresManager { config, tls, prepares: RwLock::new(HashMap::new()), } } /// Create a new `PostgresManager`, parsing the config from `params`. pub fn new_from_stringlike<T>(params: T, tls: Tls) -> Result<PostgresManager<Tls>, Error> where T: ToString, { let stringified_params = params.to_string(); let config = Config::from_str(&stringified_params)?; Ok(Self::new(config, tls)) } /// example: /// ```no_run /// use tokio_postgres::types::Type; /// use tokio_postgres::NoTls; /// use tokio_postgres_tang::PostgresManager; /// /// let db_url = "postgres://postgres:123@localhost/test"; /// let mgr = PostgresManager::new_from_stringlike(db_url, NoTls) /// .expect("Can't make manager") /// .prepare_statement("get_table", "SELECT * from table", &[]) /// .prepare_statement("get_table_by_id", "SELECT * from table where id=$1, key=$2", &[Type::OID, Type::VARCHAR]); /// ``` /// alias is used to call specific statement when using the connection. pub fn prepare_statement(self, alias: &str, query: &str, types: &[Type]) -> Self { self.prepares .write() .expect("Failed to lock/write prepared statements") .insert(alias.into(), (query, types).into()); self } } macro_rules! manager_interval { ($interval_type: path, $interval_fn: path, $tick_type: path, $tick_method: ident) => { impl<Tls> ManagerInterval for PostgresManager<Tls> where Tls: MakeTlsConnect<Socket> + Send + Sync + Clone + 'static, Tls::Stream: Send, Tls::TlsConnect: Send, <Tls::TlsConnect as TlsConnect<Socket>>::Future: Send, { type Interval = $interval_type; type Tick = $tick_type; fn interval(dur: Duration) -> Self::Interval { $interval_fn(dur) } fn tick(tick: &mut Self::Interval) -> ManagerFuture<'_, Self::Tick> { Box::pin(tick.$tick_method()) } } }; } #[cfg(not(feature = "with-async-std"))] manager_interval!( tokio::time::Interval, tokio::time::interval, tokio::time::Instant, tick ); #[cfg(feature = "with-async-std")] manager_interval!( async_std::stream::Interval, async_std::stream::interval, Option<()>, next ); impl<Tls> ScheduleReaping for PostgresManager<Tls> where Tls: MakeTlsConnect<Socket> + Send + Sync + Clone + 'static, Tls::Stream: Send, Tls::TlsConnect: Send, <Tls::TlsConnect as TlsConnect<Socket>>::Future: Send, { } impl<Tls> GarbageCollect for PostgresManager<Tls> where Tls: MakeTlsConnect<Socket> + Send + Sync + Clone + 'static, Tls::Stream: Send, Tls::TlsConnect: Send, <Tls::TlsConnect as TlsConnect<Socket>>::Future: Send, { } macro_rules! manager { ($connection: ty, $spawn: path, $timeout: path, $timeout_err: ty, $delay_fn: path) => { impl<Tls> Manager for PostgresManager<Tls> where Tls: MakeTlsConnect<Socket> + Send + Sync + Clone + 'static, Tls::Stream: Send, Tls::TlsConnect: Send, <Tls::TlsConnect as TlsConnect<Socket>>::Future: Send, { type Connection = $connection; type Error = PostgresPoolError; type Timeout = $timeout; type TimeoutError = $timeout_err; fn connect(&self) -> ManagerFuture<Result<Self::Connection, Self::Error>> { Box::pin(async move { #[cfg(not(feature = "with-async-std"))] let (c, conn) = self.config.connect(self.tls.clone()).await?; // ToDo: fix this error convertion. #[cfg(feature = "with-async-std")] let (c, conn) = async_postgres::connect_tls(self.config.clone(), self.tls.clone()) .await .map_err(|_| PostgresPoolError::TimeOut)?; $spawn(async move { let _ = conn.await; }); let prepares = self .prepares .read() .expect("Failed to lock/read prepared statements") .clone(); let mut sts = HashMap::with_capacity(prepares.len()); let mut futures = Vec::with_capacity(prepares.len()); // make prepared statements if there is any and set manager prepares for later use. for p in prepares.iter() { let (alias, PreparedStatement(query, types)) = p; let alias = alias.to_string(); let future = c.prepare_typed(query, &types).map_ok(|st| (alias, st)); futures.push(future); } for result in join_all(futures).await.into_iter() { let (alias, st) = result?; sts.insert(alias, st); } Ok((c, sts)) }) } fn is_valid<'a>( &self, c: &'a mut Self::Connection, ) -> ManagerFuture<'a, Result<(), Self::Error>> { Box::pin(c.0.simple_query("").map_ok(|_| ()).err_into()) } fn is_closed(&self, conn: &mut Self::Connection) -> bool { conn.0.is_closed() } #[cfg(not(feature = "with-ntex"))] fn spawn<Fut>(&self, fut: Fut) where Fut: Future<Output = ()> + 'static + Send, { $spawn(fut); } #[cfg(feature = "with-ntex")] fn spawn<Fut>(&self, fut: Fut) where Fut: Future<Output = ()> + 'static, { $spawn(fut); } fn timeout<Fut: Future>( &self, fut: Fut, dur: Duration, ) -> ManagerTimeout<Fut, Self::Timeout> { ManagerTimeout::new(fut, $delay_fn(dur)) } fn on_start(&self, shared_pool: &SharedManagedPool<Self>) { self.schedule_reaping(shared_pool); self.garbage_collect(shared_pool); } } }; } #[cfg(feature = "with-ntex")] manager!( (Client, HashMap<String, Statement>), tokio::task::spawn_local, tokio::time::Delay, (), tokio::time::delay_for ); #[cfg(feature = "with-tokio")] manager!( (Client, HashMap<String, Statement>), tokio::spawn, tokio::time::Delay, (), tokio::time::delay_for ); #[cfg(feature = "with-async-std")] manager!( (Client, HashMap<String, Statement>), async_std::task::spawn, smol::Timer, std::time::Instant, smol::Timer::after ); impl<Tls> fmt::Debug for PostgresManager<Tls> where Tls: MakeTlsConnect<Socket>, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("PostgresConnectionManager") .field("config", &self.config) .finish() } } #[cfg(not(feature = "with-ntex"))] type StatementFuture<'a, SELF> = Pin<Box<dyn Future<Output = Result<&'a mut SELF, PostgresPoolError>> + Send + 'a>>; #[cfg(feature = "with-ntex")] type StatementFuture<'a, SELF> = Pin<Box<dyn Future<Output = Result<&'a mut SELF, PostgresPoolError>> + 'a>>; /// helper trait for cached statement for this connection. /// Statements only work on the connection prepare them and not other connections in the pool. pub trait CacheStatement<'a> { /// Only statements with new alias as key in HashMap will be inserted. /// /// The statements with an already existed alias will be ignored. /// /// The format of statement is (<alias str> , <query str>, <tokio_postgres::types::Type>) fn insert_statements( &'a mut self, statements: &'a [(&'a str, &'a str, &'a [Type])], ) -> StatementFuture<'a, Self>; /// Clear the statements of this connection. fn clear_statements(&mut self) -> &mut Self; } impl<'a, Tls> CacheStatement<'a> for PoolRef<'_, PostgresManager<Tls>> where Tls: MakeTlsConnect<Socket> + Send + Sync + Clone + 'static, Tls::Stream: Send, Tls::TlsConnect: Send, <Tls::TlsConnect as TlsConnect<Socket>>::Future: Send, { fn insert_statements( &'a mut self, statements: &'a [(&'a str, &'a str, &'a [Type])], ) -> StatementFuture<'a, Self> { Box::pin(async move { let (cli, sts) = &mut **self; let mut futures = Vec::with_capacity(statements.len()); for (alias, query, types) in statements .iter() .map(|(alias, query, types)| (*alias, *query, *types)) { if !sts.contains_key(alias) { let alias = alias.to_owned(); let f = cli.prepare_typed(query, types).map_ok(|st| (alias, st)); futures.push(f); } } for result in join_all(futures).await.into_iter() { let (alias, st) = result?; sts.insert(alias, st); } Ok(self) }) } fn clear_statements(&mut self) -> &mut Self { let (_cli, sts) = &mut **self; sts.clear(); self } } /// helper trait for add/remove prepared statements of PostgresManager. pub trait PrepareStatement { /// The prepared statements will be constructed when new connections spawns into the pool. /// /// This can be achieved by calling `PoolRef.take_conn()` until all connections in pool are dropped. /// /// The format of statement is (<alias str> , <query str>, <tokio_postgres::types::Type>) fn prepare_statements(&mut self, statements: &[(&str, &str, &[Type])]) -> &mut Self; /// Clear the statements of PostgresManager. fn clear_prepared_statements(&mut self) -> &mut Self; } impl<Tls> PrepareStatement for PoolRef<'_, PostgresManager<Tls>> where Tls: MakeTlsConnect<Socket> + Send + Sync + Clone + 'static, Tls::Stream: Send, Tls::TlsConnect: Send, <Tls::TlsConnect as TlsConnect<Socket>>::Future: Send, { fn prepare_statements(&mut self, statements: &[(&str, &str, &[Type])]) -> &mut Self { // ToDo: check this {}. { let mut prepares = self .get_manager() .prepares .write() .expect("Failed to lock/write prepared statements"); for (alias, query, types) in statements.iter() { prepares.insert((*alias).into(), (*query, *types).into()); } } self } fn clear_prepared_statements(&mut self) -> &mut Self { self.get_manager() .prepares .write() .expect("Failed to lock/write prepared statements") .clear(); self } } // type for prepared statement's hash map. key is used as statement's alias type PreparedHashMap = HashMap<String, PreparedStatement>; // wrapper type for prepared statement #[derive(Clone)] pub struct PreparedStatement(String, Vec<Type>); impl From<(&str, &[Type])> for PreparedStatement { fn from((query, types): (&str, &[Type])) -> Self { PreparedStatement(query.into(), types.into()) } } pub enum PostgresPoolError { Inner(Error), TimeOut, } impl fmt::Debug for PostgresPoolError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { PostgresPoolError::Inner(e) => e.fmt(f), PostgresPoolError::TimeOut => f .debug_struct("PostgresError") .field("source", &"Connection Timeout") .finish(), } } } impl From<Error> for PostgresPoolError { fn from(e: Error) -> Self { PostgresPoolError::Inner(e) } } #[cfg(not(feature = "with-async-std"))] impl From<()> for PostgresPoolError { fn from(_: ()) -> PostgresPoolError { PostgresPoolError::TimeOut } } #[cfg(feature = "with-async-std")] impl From<std::time::Instant> for PostgresPoolError { fn from(_: std::time::Instant) -> PostgresPoolError { PostgresPoolError::TimeOut } }
33.332054
161
0.573419
fcb3e3fbe74fb9bec8237a1c8da0b85d26b564e5
1,693
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT use std::convert::TryFrom; use storage::persistent::{ KeyValueSchema, Encoder, Decoder, SchemaError, database::RocksDbKeyValueSchema, }; use rocksdb::{ColumnFamilyDescriptor, Cache}; use super::*; /// WARNING: this index work only with 56 bit index, should be enough /// * bytes layout: `[initiator(1)][index(7)]` pub struct Item { pub initiator: Initiator, pub index: u64, } impl Encoder for Item { fn encode(&self) -> Result<Vec<u8>, SchemaError> { let mut v = self.index.to_be_bytes(); v[0] = if self.initiator.incoming() { 0xff } else { 0x00 }; Ok(v.into()) } } impl Decoder for Item { fn decode(bytes: &[u8]) -> Result<Self, SchemaError> { let mut bytes = <[u8; 8]>::try_from(bytes).map_err(|_| SchemaError::DecodeError)?; let initiator = Initiator::new(bytes[0] != 0); bytes[0] = 0; Ok(Item { initiator, index: u64::from_be_bytes(bytes), }) } } pub struct Schema; impl KeyValueSchema for Schema { type Key = Item; type Value = (); } impl RocksDbKeyValueSchema for Schema { fn descriptor(_cache: &Cache) -> ColumnFamilyDescriptor { use rocksdb::{Options, SliceTransform}; let mut cf_opts = Options::default(); cf_opts.set_prefix_extractor(SliceTransform::create_fixed_prefix(1)); cf_opts.set_memtable_prefix_bloom_ratio(0.2); ColumnFamilyDescriptor::new(Self::name(), cf_opts) } fn name() -> &'static str { "message_initiator_secondary_index" } }
26.873016
90
0.626108
14de21dddd92c4dd8d057d4250723c2f918cbf8c
3,005
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use std::marker::PhantomData; use chrono::DateTime; use chrono_tz::Tz; use common_exception::*; use opensrv_clickhouse::types::column::ArcColumnWrapper; use opensrv_clickhouse::types::column::ColumnFrom; use serde_json::Value; use crate::prelude::*; #[derive(Debug, Clone)] pub struct TimestampSerializer<T: PrimitiveType> { _marker: PhantomData<T>, precision: u32, tz: Tz, } impl<T: PrimitiveType> TimestampSerializer<T> { pub fn create(tz: Tz, precision: u32) -> Self { Self { _marker: PhantomData, precision, tz, } } pub fn to_date_time(&self, value: &T) -> DateTime<Tz> { let value = value.to_i64().unwrap(); match T::SIZE { 4 => value.to_date_time(&self.tz), 8 => value.to_date_time64(self.precision as usize, &self.tz), _ => unreachable!(), } } } const TIME_FMT: &str = "%Y-%m-%d %H:%M:%S"; impl<T: PrimitiveType> TypeSerializer for TimestampSerializer<T> { fn serialize_value(&self, value: &DataValue) -> Result<String> { let value = DFTryFrom::try_from(value.clone())?; let dt = self.to_date_time(&value); Ok(dt.format(TIME_FMT).to_string()) } fn serialize_column(&self, column: &ColumnRef) -> Result<Vec<String>> { let column: &PrimitiveColumn<T> = Series::check_get(column)?; let result: Vec<String> = column .iter() .map(|v| { let dt = self.to_date_time(v); dt.format(TIME_FMT).to_string() }) .collect(); Ok(result) } fn serialize_json(&self, column: &ColumnRef) -> Result<Vec<Value>> { let array: &PrimitiveColumn<T> = Series::check_get(column)?; let result: Vec<Value> = array .iter() .map(|v| { let dt = self.to_date_time(v); serde_json::to_value(dt.format(TIME_FMT).to_string()).unwrap() }) .collect(); Ok(result) } fn serialize_clickhouse_format( &self, column: &ColumnRef, ) -> Result<opensrv_clickhouse::types::column::ArcColumnData> { let array: &PrimitiveColumn<T> = Series::check_get(column)?; let values: Vec<DateTime<Tz>> = array.iter().map(|v| self.to_date_time(v)).collect(); Ok(Vec::column_from::<ArcColumnWrapper>(values)) } }
31.631579
93
0.611647
7a32e1327792b4d9672d82fa8b7322d8f80b3b16
11,895
use common::Result; use native::mbuf::MBuf; use packets::ip::v6::Ipv6Packet; use packets::ip::ProtocolNumbers; use packets::{buffer, checksum, Fixed, Header, Packet, ParseError}; use std::fmt; pub use self::echo_reply::*; pub use self::echo_request::*; pub use self::ndp::neighbor_advert::*; pub use self::ndp::neighbor_solicit::*; pub use self::ndp::options::*; pub use self::ndp::router_advert::*; pub use self::ndp::router_solicit::*; pub use self::ndp::*; pub use self::too_big::*; pub mod echo_reply; pub mod echo_request; pub mod ndp; pub mod too_big; /* From (https://tools.ietf.org/html/rfc4443) The ICMPv6 messages have the following general format: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Code | Checksum | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + Message Body + | | The type field indicates the type of the message. Its value determines the format of the remaining data. The code field depends on the message type. It is used to create an additional level of message granularity. The checksum field is used to detect data corruption in the ICMPv6 message and parts of the IPv6 header. */ /// Type of ICMPv6 message #[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Hash)] #[repr(C, packed)] pub struct Icmpv6Type(pub u8); impl Icmpv6Type { pub fn new(value: u8) -> Self { Icmpv6Type(value) } } /// Supported ICMPv6 message types #[allow(non_snake_case)] #[allow(non_upper_case_globals)] pub mod Icmpv6Types { use super::Icmpv6Type; pub const PacketTooBig: Icmpv6Type = Icmpv6Type(2); pub const EchoRequest: Icmpv6Type = Icmpv6Type(128); pub const EchoReply: Icmpv6Type = Icmpv6Type(129); // NDP types pub const RouterSolicitation: Icmpv6Type = Icmpv6Type(133); pub const RouterAdvertisement: Icmpv6Type = Icmpv6Type(134); pub const NeighborSolicitation: Icmpv6Type = Icmpv6Type(135); pub const NeighborAdvertisement: Icmpv6Type = Icmpv6Type(136); pub const Redirect: Icmpv6Type = Icmpv6Type(137); } impl fmt::Display for Icmpv6Type { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", match *self { Icmpv6Types::PacketTooBig => "Packet Too Big".to_string(), Icmpv6Types::EchoRequest => "Echo Request".to_string(), Icmpv6Types::EchoReply => "Echo Reply".to_string(), Icmpv6Types::RouterSolicitation => "Router Solicitation".to_string(), Icmpv6Types::RouterAdvertisement => "Router Advertisement".to_string(), Icmpv6Types::NeighborSolicitation => "Neighbor Solicitation".to_string(), Icmpv6Types::NeighborAdvertisement => "Neighbor Advertisement".to_string(), Icmpv6Types::Redirect => "Redirect".to_string(), _ => format!("{}", self.0), } ) } } /// ICMPv6 packet header #[derive(Default, Debug)] #[repr(C, packed)] pub struct Icmpv6Header { msg_type: u8, code: u8, checksum: u16, } impl Header for Icmpv6Header {} /// ICMPv6 packet payload /// /// The ICMPv6 packet may contain a variable length payload. This /// is only the fixed portion. The variable length portion has to /// be parsed separately. pub trait Icmpv6Payload: Fixed + Default { /// Returns the ICMPv6 message type that corresponds to the payload fn msg_type() -> Icmpv6Type; } /// ICMPv6 unit payload `()` impl Icmpv6Payload for () { fn msg_type() -> Icmpv6Type { // Unit payload does not have a type unreachable!(); } } /// Common behaviors shared by ICMPv6 packets pub trait Icmpv6Packet<E: Ipv6Packet, P: Icmpv6Payload>: Packet<Header = Icmpv6Header, Envelope = E> { /// Returns a reference to the fixed payload fn payload(&self) -> &P; /// Returns a mutable reference to the fixed payload fn payload_mut(&mut self) -> &mut P; #[inline] fn msg_type(&self) -> Icmpv6Type { Icmpv6Type::new(self.header().msg_type) } #[inline] fn code(&self) -> u8 { self.header().code } #[inline] fn set_code(&mut self, code: u8) { self.header_mut().code = code } #[inline] fn checksum(&self) -> u16 { u16::from_be(self.header().checksum) } #[inline] fn compute_checksum(&mut self) { self.header_mut().checksum = 0; if let Ok(data) = buffer::read_slice(self.mbuf(), self.offset(), self.len()) { let data = unsafe { &(*data) }; let pseudo_header_sum = self .envelope() .pseudo_header(data.len() as u16, ProtocolNumbers::Icmpv6) .sum(); let checksum = checksum::compute(pseudo_header_sum, data); self.header_mut().checksum = u16::to_be(checksum); } else { // we are reading till the end of buffer, should never run out unreachable!() } } } /// ICMPv6 packet #[derive(Debug)] pub struct Icmpv6<E: Ipv6Packet, P: Icmpv6Payload> { envelope: E, mbuf: *mut MBuf, offset: usize, header: *mut Icmpv6Header, payload: *mut P, } /// ICMPv6 packet with unit payload /// /// Use unit payload `()` when the payload type is not known yet. /// /// # Example /// /// ``` /// if ipv6.next_header() == NextHeaders::Icmpv6 { /// let icmpv6 = ipv6.parse::<Icmpv6<()>>().unwrap(); /// } /// ``` impl<E: Ipv6Packet> Icmpv6<E, ()> { /// Downcasts from unit payload to typed payload /// /// # Example /// /// ``` /// if icmpv6.msg_type() == Icmpv6Types::RouterAdvertisement { /// let advert = icmpv6.downcast::<RouterAdvertisement>().unwrap(); /// } /// ``` pub fn downcast<P: Icmpv6Payload>(self) -> Result<Icmpv6<E, P>> { Icmpv6::<E, P>::do_parse(self.envelope) } } impl<E: Ipv6Packet> fmt::Display for Icmpv6<E, ()> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "type: {}, code: {}, checksum: 0x{:04x}", self.msg_type(), self.code(), self.checksum() ) } } impl<E: Ipv6Packet, P: Icmpv6Payload> Icmpv6Packet<E, P> for Icmpv6<E, P> { fn payload(&self) -> &P { unsafe { &(*self.payload) } } fn payload_mut(&mut self) -> &mut P { unsafe { &mut (*self.payload) } } } impl<E: Ipv6Packet, P: Icmpv6Payload> Packet for Icmpv6<E, P> { type Header = Icmpv6Header; type Envelope = E; #[inline] fn envelope(&self) -> &Self::Envelope { &self.envelope } #[inline] fn envelope_mut(&mut self) -> &mut Self::Envelope { &mut self.envelope } #[doc(hidden)] #[inline] fn mbuf(&self) -> *mut MBuf { self.mbuf } #[inline] fn offset(&self) -> usize { self.offset } #[doc(hidden)] #[inline] fn header(&self) -> &Self::Header { unsafe { &(*self.header) } } #[doc(hidden)] #[inline] fn header_mut(&mut self) -> &mut Self::Header { unsafe { &mut (*self.header) } } #[inline] fn header_len(&self) -> usize { Self::Header::size() } #[doc(hidden)] #[inline] fn do_parse(envelope: Self::Envelope) -> Result<Self> { let mbuf = envelope.mbuf(); let offset = envelope.payload_offset(); let header = buffer::read_item::<Self::Header>(mbuf, offset)?; let payload = buffer::read_item::<P>(mbuf, offset + Self::Header::size())?; Ok(Icmpv6 { envelope, mbuf, offset, header, payload, }) } #[doc(hidden)] #[inline] fn do_push(envelope: Self::Envelope) -> Result<Self> { let mbuf = envelope.mbuf(); let offset = envelope.payload_offset(); buffer::alloc(mbuf, offset, Self::Header::size() + P::size())?; let header = buffer::write_item::<Self::Header>(mbuf, offset, &Default::default())?; let payload = buffer::write_item::<P>(mbuf, offset + Self::Header::size(), &Default::default())?; unsafe { (*header).msg_type = P::msg_type().0; } Ok(Icmpv6 { envelope, mbuf, offset, header, payload, }) } #[inline] fn remove(self) -> Result<Self::Envelope> { buffer::dealloc(self.mbuf, self.offset, self.header_len())?; Ok(self.envelope) } #[inline] default fn cascade(&mut self) { self.compute_checksum(); self.envelope_mut().cascade(); } #[inline] fn deparse(self) -> Self::Envelope { self.envelope } } /// An ICMPv6 message with parsed payload pub enum Icmpv6Message<E: Ipv6Packet> { EchoRequest(Icmpv6<E, EchoRequest>), EchoReply(Icmpv6<E, EchoReply>), NeighborAdvertisement(Icmpv6<E, NeighborAdvertisement>), NeighborSolicitation(Icmpv6<E, NeighborSolicitation>), RouterAdvertisement(Icmpv6<E, RouterAdvertisement>), RouterSolicitation(Icmpv6<E, RouterSolicitation>), /// an ICMPv6 message with undefined payload Undefined(Icmpv6<E, ()>), } /// ICMPv6 helper functions for IPv6 packets pub trait Icmpv6Parse { type Envelope: Ipv6Packet; /// Parses the payload as an ICMPv6 packet /// /// # Example /// /// ``` /// match ipv6.parse_icmpv6()? { /// Icmpv6Message::RouterAdvertisement(advert) => { /// advert.set_router_lifetime(0); /// }, /// Icmpv6Message::Undefined(icmpv6) => { /// println!("undefined"); /// } /// } /// ``` fn parse_icmpv6(self) -> Result<Icmpv6Message<Self::Envelope>>; } impl<T: Ipv6Packet> Icmpv6Parse for T { type Envelope = T; fn parse_icmpv6(self) -> Result<Icmpv6Message<Self::Envelope>> { if self.next_proto() == ProtocolNumbers::Icmpv6 { let icmpv6 = self.parse::<Icmpv6<Self::Envelope, ()>>()?; match icmpv6.msg_type() { Icmpv6Types::EchoRequest => { let packet = icmpv6.downcast::<EchoRequest>()?; Ok(Icmpv6Message::EchoRequest(packet)) } Icmpv6Types::EchoReply => { let packet = icmpv6.downcast::<EchoReply>()?; Ok(Icmpv6Message::EchoReply(packet)) } Icmpv6Types::NeighborAdvertisement => { let packet = icmpv6.downcast::<NeighborAdvertisement>()?; Ok(Icmpv6Message::NeighborAdvertisement(packet)) } Icmpv6Types::NeighborSolicitation => { let packet = icmpv6.downcast::<NeighborSolicitation>()?; Ok(Icmpv6Message::NeighborSolicitation(packet)) } Icmpv6Types::RouterAdvertisement => { let packet = icmpv6.downcast::<RouterAdvertisement>()?; Ok(Icmpv6Message::RouterAdvertisement(packet)) } Icmpv6Types::RouterSolicitation => { let packet = icmpv6.downcast::<RouterSolicitation>()?; Ok(Icmpv6Message::RouterSolicitation(packet)) } _ => Ok(Icmpv6Message::Undefined(icmpv6)), } } else { Err(ParseError::new("Packet is not ICMPv6").into()) } } }
29.589552
95
0.557041
62535bdcea023a764eb3e71a9b541e07f25e4b18
598
use std::io; use std::process::ExitStatus; pub trait StatusExt { fn as_result(self) -> io::Result<()>; } impl StatusExt for ExitStatus { fn as_result(self) -> io::Result<()> { if self.success() { Ok(()) } else if let Some(127) = self.code() { Err(io::Error::new( io::ErrorKind::NotFound, "command was not found" )) } else { Err(io::Error::new( io::ErrorKind::Other, format!("command failed with exit status: {}", self) )) } } }
23.92
68
0.466555
1c73fd98cbcac098cc5e64f18624d4f10e92cc77
5,180
//! egui core library //! //! To quickly get started with egui, you can take a look at [`egui_template`](https://github.com/emilk/egui_template) //! which uses [`eframe`](https://docs.rs/eframe). //! //! To create a GUI using egui you first need a [`CtxRef`] (by convention referred to by `ctx`). //! Use one of [`SidePanel`], [`TopPanel`], [`CentralPanel`], [`Window`] or [`Area`] to //! get access to an [`Ui`] where you can put widgets. For example: //! //! ``` //! # let mut ctx = egui::CtxRef::default(); //! # ctx.begin_frame(Default::default()); //! egui::CentralPanel::default().show(&ctx, |ui| { //! ui.label("Hello"); //! }); //! ``` //! //! //! To write your own integration for egui you need to do this: //! //! ``` ignore //! let mut egui_ctx = egui::CtxRef::default(); //! //! // Game loop: //! loop { //! let raw_input: egui::RawInput = my_integration.gather_input(); //! egui_ctx.begin_frame(raw_input); //! my_app.ui(&egui_ctx); // add panels, windows and widgets to `egui_ctx` here //! let (output, shapes) = egui_ctx.end_frame(); //! let clipped_meshes = egui_ctx.tessellate(shapes); // create triangles to paint //! my_integration.paint(clipped_meshes); //! my_integration.set_cursor_icon(output.cursor_icon); //! // Also see `egui::Output` for more //! } //! ``` #![cfg_attr(not(debug_assertions), deny(warnings))] // Forbid warnings in release builds #![forbid(unsafe_code)] #![warn( clippy::all, clippy::await_holding_lock, clippy::dbg_macro, clippy::doc_markdown, clippy::empty_enum, clippy::enum_glob_use, clippy::exit, clippy::filter_map_next, clippy::fn_params_excessive_bools, clippy::if_let_mutex, clippy::imprecise_flops, clippy::inefficient_to_string, clippy::linkedlist, clippy::lossy_float_literal, clippy::macro_use_imports, clippy::match_on_vec_items, clippy::match_wildcard_for_single_variants, clippy::mem_forget, clippy::mismatched_target_os, clippy::missing_errors_doc, clippy::missing_safety_doc, clippy::needless_borrow, clippy::needless_continue, clippy::needless_pass_by_value, clippy::option_option, clippy::pub_enum_variant_names, clippy::rest_pat_in_fully_bound_structs, clippy::todo, clippy::unimplemented, clippy::unnested_or_patterns, clippy::verbose_file_reads, future_incompatible, missing_crate_level_docs, missing_doc_code_examples, // missing_docs, nonstandard_style, rust_2018_idioms, unused_doc_comments, )] #![allow(clippy::manual_range_contains)] mod animation_manager; pub mod containers; mod context; mod data; pub mod experimental; pub(crate) mod grid; mod id; mod input_state; mod introspection; pub mod layers; mod layout; mod memory; pub mod menu; mod painter; pub(crate) mod placer; mod response; mod sense; pub mod style; mod ui; pub mod util; pub mod widgets; pub use emath as math; pub use epaint as paint; pub use epaint::emath; pub use emath::{ clamp, lerp, pos2, remap, remap_clamp, vec2, Align, Align2, NumExt, Pos2, Rect, Vec2, }; pub use epaint::{ color, mutex, text::{FontDefinitions, FontFamily, TextStyle}, ClippedMesh, Color32, Rgba, Shape, Stroke, Texture, TextureId, }; pub use { containers::*, context::{Context, CtxRef}, data::{input::*, output::*}, grid::Grid, id::Id, input_state::InputState, layers::{LayerId, Order}, layout::*, memory::Memory, painter::Painter, response::Response, sense::Sense, style::Style, ui::Ui, widgets::*, }; // ---------------------------------------------------------------------------- #[cfg(debug_assertions)] pub(crate) const fn has_debug_assertions() -> bool { true } #[cfg(not(debug_assertions))] pub(crate) const fn has_debug_assertions() -> bool { false } /// Helper function that adds a label when compiling with debug assertions enabled. pub fn warn_if_debug_build(ui: &mut crate::Ui) { if crate::has_debug_assertions() { ui.label( crate::Label::new("‼ Debug build ‼") .small() .text_color(crate::Color32::RED), ) .on_hover_text("egui was compiled with debug assertions enabled."); } } // ---------------------------------------------------------------------------- /// Create a [`Hyperlink`](crate::Hyperlink) to the current [`file!()`] (and line) on Github /// /// Example: `ui.add(github_link_file_line!("https://github.com/YOUR/PROJECT/blob/master/", "(source code)"));` #[macro_export] macro_rules! github_link_file_line { ($github_url:expr, $label:expr) => {{ let url = format!("{}{}#L{}", $github_url, file!(), line!()); $crate::Hyperlink::new(url).text($label) }}; } /// Create a [`Hyperlink`](crate::Hyperlink) to the current [`file!()`] on github. /// /// Example: `ui.add(github_link_file!("https://github.com/YOUR/PROJECT/blob/master/", "(source code)"));` #[macro_export] macro_rules! github_link_file { ($github_url:expr, $label:expr) => {{ let url = format!("{}{}", $github_url, file!()); $crate::Hyperlink::new(url).text($label) }}; }
28.618785
118
0.638031
e4fbafcbf2889c2f4154bc398c8e605eb1da297a
526
//!Request handlers. use context::Context; use response::Response; ///A trait for request handlers. pub trait Handler: Send + Sync + 'static { ///Handle a request from the client. Panicking within this method is ///discouraged, to allow the server to run smoothly. fn handle_request(&self, context: Context, response: Response); } impl<F: Fn(Context, Response) + Send + Sync + 'static> Handler for F { fn handle_request(&self, context: Context, response: Response) { self(context, response); } }
30.941176
72
0.692015
c1e08b4cb35a8de5579c2c220402ea2d44941ed7
303
fn main(){ let data = [10,20,30,40,50]; use_slice(&data[1..4]); //this is effectively borrowing elements for a while } fn use_slice(slice:&[i32]) { // is taking a slice or borrowing a part of an array of i32s println!("length of slice is {:?}",slice.len()); println!("{:?}",slice); }
27.545455
63
0.617162
91c76675d93176a717ae3301a18477de973058de
13,775
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT use std::collections::{BTreeMap, HashMap}; use serde::{Deserialize, Serialize}; use serde_json::Value; use crypto::hash::{BlockHash, OperationHash, ProtocolHash}; use tezos_api::ffi::{Applied, Errored}; use tezos_messages::p2p::encoding::operation::Operation; #[derive(Serialize, Deserialize, Debug, Clone, Default)] pub struct MempoolOperations { applied: Vec<HashMap<String, Value>>, refused: Vec<Value>, branch_refused: Vec<Value>, branch_delayed: Vec<Value>, // TODO: unprocessed - we don't have protocol data, because we can get it just from ffi now unprocessed: Vec<Value>, outdated: Vec<Value>, } fn convert_applied( applied: &[Applied], operations: &BTreeMap<OperationHash, Operation>, ) -> Vec<HashMap<String, Value>> { applied .iter() .filter_map(move |v| { let branch = operations.get(&v.hash)?.branch(); let mut m = serde_json::from_str(&v.protocol_data_json).unwrap_or_else(|err| { let mut m = HashMap::new(); m.insert( "protocol_data_parse_error".to_string(), Value::String(err.to_string()), ); m }); m.insert("hash".to_string(), Value::String(v.hash.to_base58_check())); m.insert( "branch".to_string(), Value::String(branch.to_base58_check()), ); Some(m) }) .collect() } fn convert_errored<'a>( errored: impl IntoIterator<Item = &'a Errored>, operations: &BTreeMap<OperationHash, Operation>, protocol: &ProtocolHash, ) -> Vec<Value> { errored .into_iter() .filter_map(|v| { let operation = match operations.get(&v.hash) { Some(b) => b, None => return None, }; let mut m: HashMap<String, Value> = if v.protocol_data_json.is_empty() { HashMap::new() } else { serde_json::from_str(&v.protocol_data_json).unwrap_or_else(|err| { let mut m = HashMap::new(); m.insert( "protocol_data_parse_error".to_string(), Value::String(err.to_string()), ); m }) }; let error = if v.error_json.is_empty() { Value::Null } else { serde_json::from_str(&v.error_json) .unwrap_or_else(|err| Value::String(err.to_string())) }; m.insert( "protocol".to_string(), Value::String(protocol.to_base58_check()), ); m.insert( "branch".to_string(), Value::String(operation.branch().to_base58_check()), ); m.insert("error".to_string(), error); serde_json::to_value(m) .ok() .map(|json| Value::Array(vec![Value::String(v.hash.to_base58_check()), json])) }) .collect() } impl MempoolOperations { pub fn collect<'a>( applied: &[Applied], refused: impl IntoIterator<Item = &'a Errored>, branch_delayed: impl IntoIterator<Item = &'a Errored>, branch_refused: impl IntoIterator<Item = &'a Errored>, outdated: impl IntoIterator<Item = &'a Errored>, operations: &BTreeMap<OperationHash, Operation>, protocol: &ProtocolHash, ) -> Self { MempoolOperations { applied: convert_applied(applied, operations), refused: convert_errored(refused, operations, protocol), branch_delayed: convert_errored(branch_delayed, operations, protocol), branch_refused: convert_errored(branch_refused, operations, protocol), outdated: convert_errored(outdated, operations, protocol), unprocessed: vec![], } } } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct MonitoredOperation<'a> { branch: String, #[serde(flatten)] protocol_data: Value, protocol: &'a str, hash: String, #[serde(skip_serializing_if = "Option::is_none")] error: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] protocol_data_parse_error: Option<String>, } impl<'a> MonitoredOperation<'a> { pub fn new( branch: &BlockHash, protocol_data: Value, protocol: &'a str, hash: &OperationHash, error: Option<String>, protocol_data_parse_error: Option<String>, ) -> MonitoredOperation<'a> { MonitoredOperation { branch: branch.to_base58_check(), protocol_data, protocol, hash: hash.to_base58_check(), error, protocol_data_parse_error, } } pub fn collect_applied( applied: impl IntoIterator<Item = &'a Applied> + 'a, operations: &'a BTreeMap<OperationHash, Operation>, protocol_hash: &'a str, ) -> impl Iterator<Item = MonitoredOperation<'a>> + 'a { applied.into_iter().filter_map(move |applied_op| { let op_hash = applied_op.hash.to_base58_check(); let operation = operations.get(&applied_op.hash)?; let (protocol_data, err) = match serde_json::from_str(&applied_op.protocol_data_json) { Ok(protocol_data) => (protocol_data, None), Err(err) => (serde_json::Value::Null, Some(err.to_string())), }; Some(MonitoredOperation { branch: operation.branch().to_base58_check(), protocol: protocol_hash, hash: op_hash, protocol_data, error: None, protocol_data_parse_error: err, }) }) } pub fn collect_errored( errored: impl IntoIterator<Item = &'a Errored> + 'a, operations: &'a BTreeMap<OperationHash, Operation>, protocol_hash: &'a str, ) -> impl Iterator<Item = MonitoredOperation<'a>> + 'a { errored.into_iter().filter_map(move |errored_op| { let op_hash = errored_op.hash.to_base58_check(); let operation = operations.get(&errored_op.hash)?; let json = &errored_op.protocol_data_json; let (protocol_data, err) = match serde_json::from_str(json) { Ok(protocol_data) => (protocol_data, None), Err(err) => (serde_json::Value::Null, Some(err.to_string())), }; let ocaml_err = &errored_op.error_json; Some(MonitoredOperation { branch: operation.branch().to_base58_check(), protocol: protocol_hash, hash: op_hash, protocol_data, error: Some(ocaml_err.clone()), protocol_data_parse_error: err, }) }) } } #[cfg(test)] mod tests { use std::collections::BTreeMap; use std::convert::TryInto; use assert_json_diff::assert_json_eq; use serde_json::json; use tezos_api::ffi::{Applied, Errored}; use tezos_messages::p2p::binary_message::BinaryRead; use tezos_messages::p2p::encoding::operation::Operation; use super::{convert_applied, convert_errored}; #[test] fn test_convert_applied() { let data = vec![ Applied { hash: "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ".try_into().unwrap(), protocol_data_json: "{ \"contents\": [ { \"kind\": \"endorsement\", \"level\": 459020 } ],\n \"signature\":\n \"siguKbKFVDkXo2m1DqZyftSGg7GZRq43EVLSutfX5yRLXXfWYG5fegXsDT6EUUqawYpjYE1GkyCVHfc2kr3hcaDAvWSAhnV9\" }".to_string(), } ]; let mut operations = BTreeMap::new(); // operation with branch=BKqTKfGwK3zHnVXX33X5PPHy1FDTnbkajj3eFtCXGFyfimQhT1H operations.insert( "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ".try_into().unwrap(), Operation::from_bytes(hex::decode("10490b79070cf19175cd7e3b9c1ee66f6e85799980404b119132ea7e58a4a97e000008c387fa065a181d45d47a9b78ddc77e92a881779ff2cbabbf9646eade4bf1405a08e00b725ed849eea46953b10b5cdebc518e6fd47e69b82d2ca18c4cf6d2f312dd08").unwrap()).unwrap(), ); let expected_json = json!( [ { "hash" : "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ", "branch" : "BKqTKfGwK3zHnVXX33X5PPHy1FDTnbkajj3eFtCXGFyfimQhT1H", "contents": [{ "kind": "endorsement", "level": 459020 } ], "signature": "siguKbKFVDkXo2m1DqZyftSGg7GZRq43EVLSutfX5yRLXXfWYG5fegXsDT6EUUqawYpjYE1GkyCVHfc2kr3hcaDAvWSAhnV9" } ] ); // convert let result = convert_applied(&data, &operations); assert_json_eq!(serde_json::to_value(result).unwrap(), expected_json,); } #[test] fn test_convert_errored() { let data = vec![ Errored { hash: "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ".try_into().unwrap(), is_endorsement: false, protocol_data_json: "{ \"contents\": [ { \"kind\": \"endorsement\", \"level\": 459020 } ],\n \"signature\":\n \"siguKbKFVDkXo2m1DqZyftSGg7GZRq43EVLSutfX5yRLXXfWYG5fegXsDT6EUUqawYpjYE1GkyCVHfc2kr3hcaDAvWSAhnV9\" }".to_string(), error_json: "[ { \"kind\": \"temporary\",\n \"id\": \"proto.005-PsBabyM1.operation.wrong_endorsement_predecessor\",\n \"expected\": \"BMDb9PfcJmiibDDEbd6bEEDj4XNG4C7QACG6TWqz29c9FxNgDLL\",\n \"provided\": \"BLd8dLs4X5Ve6a8B37kUu7iJkRycWzfSF5MrskY4z8YaideQAp4\" } ]".to_string(), } ]; let mut operations = BTreeMap::new(); // operation with branch=BKqTKfGwK3zHnVXX33X5PPHy1FDTnbkajj3eFtCXGFyfimQhT1H operations.insert( "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ".try_into().unwrap(), Operation::from_bytes(hex::decode("10490b79070cf19175cd7e3b9c1ee66f6e85799980404b119132ea7e58a4a97e000008c387fa065a181d45d47a9b78ddc77e92a881779ff2cbabbf9646eade4bf1405a08e00b725ed849eea46953b10b5cdebc518e6fd47e69b82d2ca18c4cf6d2f312dd08").unwrap()).unwrap(), ); let protocol = "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb" .try_into() .unwrap(); let expected_json = json!( [ [ "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ", { "protocol" : "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", "branch" : "BKqTKfGwK3zHnVXX33X5PPHy1FDTnbkajj3eFtCXGFyfimQhT1H", "contents": [{ "kind": "endorsement", "level": 459020}], "signature": "siguKbKFVDkXo2m1DqZyftSGg7GZRq43EVLSutfX5yRLXXfWYG5fegXsDT6EUUqawYpjYE1GkyCVHfc2kr3hcaDAvWSAhnV9", "error" : [ { "kind": "temporary", "id": "proto.005-PsBabyM1.operation.wrong_endorsement_predecessor", "expected": "BMDb9PfcJmiibDDEbd6bEEDj4XNG4C7QACG6TWqz29c9FxNgDLL", "provided": "BLd8dLs4X5Ve6a8B37kUu7iJkRycWzfSF5MrskY4z8YaideQAp4" } ] } ] ] ); // convert let result = convert_errored(&data, &operations, &protocol); assert_json_eq!(serde_json::to_value(result).unwrap(), expected_json,); } #[test] fn test_convert_errored_missing_protocol_data() { let data = vec![ Errored { hash: "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ".try_into().unwrap(), is_endorsement: true, protocol_data_json: "".to_string(), error_json: "[ { \"kind\": \"temporary\",\n \"id\": \"proto.005-PsBabyM1.operation.wrong_endorsement_predecessor\",\n \"expected\": \"BMDb9PfcJmiibDDEbd6bEEDj4XNG4C7QACG6TWqz29c9FxNgDLL\",\n \"provided\": \"BLd8dLs4X5Ve6a8B37kUu7iJkRycWzfSF5MrskY4z8YaideQAp4\" } ]".to_string(), } ]; let mut operations = BTreeMap::new(); // operation with branch=BKqTKfGwK3zHnVXX33X5PPHy1FDTnbkajj3eFtCXGFyfimQhT1H operations.insert( "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ".try_into().unwrap(), Operation::from_bytes(hex::decode("10490b79070cf19175cd7e3b9c1ee66f6e85799980404b119132ea7e58a4a97e000008c387fa065a181d45d47a9b78ddc77e92a881779ff2cbabbf9646eade4bf1405a08e00b725ed849eea46953b10b5cdebc518e6fd47e69b82d2ca18c4cf6d2f312dd08").unwrap()).unwrap(), ); let protocol = "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb" .try_into() .unwrap(); let expected_json = json!( [ [ "onvN8U6QJ6DGJKVYkHXYRtFm3tgBJScj9P5bbPjSZUuFaGzwFuJ", { "protocol" : "PsCARTHAGazKbHtnKfLzQg3kms52kSRpgnDY982a9oYsSXRLQEb", "branch" : "BKqTKfGwK3zHnVXX33X5PPHy1FDTnbkajj3eFtCXGFyfimQhT1H", "error" : [ { "kind": "temporary", "id": "proto.005-PsBabyM1.operation.wrong_endorsement_predecessor", "expected": "BMDb9PfcJmiibDDEbd6bEEDj4XNG4C7QACG6TWqz29c9FxNgDLL", "provided": "BLd8dLs4X5Ve6a8B37kUu7iJkRycWzfSF5MrskY4z8YaideQAp4" } ] } ] ] ); // convert let result = convert_errored(&data, &operations, &protocol); assert_json_eq!(serde_json::to_value(result).unwrap(), expected_json,); } }
42.254601
303
0.603412
d6969321cad4b5e7a1a37e2ee8dfdd787b4cd48b
588
// Checks that an unreachable code warning is emitted when an expression is // preceded by an expression with an uninhabited type. Previously, the // variable liveness analysis was "smarter" than the reachability analysis // in this regard, which led to confusing "unused variable" warnings // without an accompanying explanatory "unreachable expression" warning. // check-pass #![warn(unused_variables,unreachable_code)] enum Foo {} fn f() -> Foo {todo!()} fn main() { let x = f(); //~^ WARNING: unused variable: `x` let _ = x; //~^ WARNING: unreachable expression }
29.4
75
0.712585
d7eccab54ac99cdc2ca48e8951c56f8e22d9ac27
2,594
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::DR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct DRR { bits: u32, } impl DRR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Proxy"] pub struct _DRW<'a> { w: &'a mut W, } impl<'a> _DRW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:31 - Data register bits"] #[inline] pub fn dr(&self) -> DRR { let bits = { const MASK: u32 = 4294967295; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u32 }; DRR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 4294967295 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:31 - Data register bits"] #[inline] pub fn dr(&mut self) -> _DRW { _DRW { w: self } } }
24.471698
60
0.471858
e8f575798859c6abdfbefd85047941bf41f74197
5,804
#![no_std] //! Interoperability library for Rust Windowing applications. //! //! This library provides standard types for accessing a window's platform-specific raw window //! handle. This does not provide any utilities for creating and managing windows; instead, it //! provides a common interface that window creation libraries (e.g. Winit, SDL) can use to easily //! talk with graphics libraries (e.g. gfx-hal). //! //! ## Safety guarantees //! //! Please see the docs of [`HasRawWindowHandle`]. //! //! ## Platform handle initialization //! //! Each platform handle struct is purposefully non-exhaustive, so that additional fields may be //! added without breaking backwards compatibility. Each struct provides an `empty` method that may //! be used along with the struct update syntax to construct it. See each specific struct for //! examples. #[cfg(feature = "alloc")] extern crate alloc; mod android; mod appkit; mod redox; mod uikit; mod unix; mod web; mod windows; pub use android::AndroidNdkHandle; pub use appkit::AppKitHandle; pub use redox::OrbitalHandle; pub use uikit::UiKitHandle; pub use unix::{WaylandHandle, XcbHandle, XlibHandle}; pub use web::WebHandle; pub use windows::{Win32Handle, WinRtHandle}; /// Window that wraps around a raw window handle. /// /// # Safety guarantees /// /// Users can safely assume that non-`null`/`0` fields are valid handles, and it is up to the /// implementer of this trait to ensure that condition is upheld. /// /// Despite that qualification, implementers should still make a best-effort attempt to fill in all /// available fields. If an implementation doesn't, and a downstream user needs the field, it should /// try to derive the field from other fields the implementer *does* provide via whatever methods the /// platform provides. /// /// The exact handles returned by `raw_window_handle` must remain consistent between multiple calls /// to `raw_window_handle` as long as not indicated otherwise by platform specific events. pub unsafe trait HasRawWindowHandle { fn raw_window_handle(&self) -> RawWindowHandle; } unsafe impl<'a, T: HasRawWindowHandle> HasRawWindowHandle for &'a T { fn raw_window_handle(&self) -> RawWindowHandle { (*self).raw_window_handle() } } #[cfg(feature = "alloc")] unsafe impl<T: HasRawWindowHandle> HasRawWindowHandle for alloc::rc::Rc<T> { fn raw_window_handle(&self) -> RawWindowHandle { (**self).raw_window_handle() } } #[cfg(feature = "alloc")] unsafe impl<T: HasRawWindowHandle> HasRawWindowHandle for alloc::sync::Arc<T> { fn raw_window_handle(&self) -> RawWindowHandle { (**self).raw_window_handle() } } /// An enum to simply combine the different possible raw window handle variants. /// /// # Variant Availability /// /// Note that all variants are present on all targets (none are disabled behind /// `#[cfg]`s), but see the "Availability Hints" section on each variant for /// some hints on where this variant might be expected. /// /// Note that these "Availability Hints" are not normative. That is to say, a /// [`HasRawWindowHandle`] implementor is completely allowed to return something /// unexpected. (For example, it's legal for someone to return a /// [`RawWindowHandle::Xlib`] on macOS, it would just be weird, and probably /// requires something like XQuartz be used). #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum RawWindowHandle { /// A raw window handle for UIKit (Apple's non-macOS windowing library). /// /// ## Availability Hints /// This variant is likely to be used on iOS, tvOS, (in theory) watchOS, and /// Mac Catalyst (`$arch-apple-ios-macabi` targets, which can notably use /// UIKit *or* AppKit), as these are the targets that (currently) support /// UIKit. UiKit(UiKitHandle), /// A raw window handle for AppKit. /// /// ## Availability Hints /// This variant is likely to be used on macOS, although Mac Catalyst /// (`$arch-apple-ios-macabi` targets, which can notably use UIKit *or* /// AppKit) can also use it despite being `target_os = "ios"`. AppKit(AppKitHandle), /// A raw window handle for the Redox operating system. /// /// ## Availability Hints /// This variant is used by the Orbital Windowing System in the Redox /// operating system. Orbital(OrbitalHandle), /// A raw window handle for Xlib. /// /// ## Availability Hints /// This variant is likely to show up anywhere someone manages to get X11 /// working that Xlib can be built for, which is to say, most (but not all) /// Unix systems. Xlib(XlibHandle), /// A raw window handle for Xcb. /// /// ## Availability Hints /// This variant is likely to show up anywhere someone manages to get X11 /// working that XCB can be built for, which is to say, most (but not all) /// Unix systems. Xcb(XcbHandle), /// A raw window handle for Wayland. /// /// ## Availability Hints /// This variant should be expected anywhere Wayland works, which is /// currently some subset of unix systems. Wayland(WaylandHandle), /// A raw window handle for Win32. /// /// ## Availability Hints /// This variant is used on Windows systems. Win32(Win32Handle), /// A raw window handle for WinRT. /// /// ## Availability Hints /// This variant is used on Windows systems. WinRt(WinRtHandle), /// A raw window handle for the Web. /// /// ## Availability Hints /// This variant is used on Wasm or asm.js targets when targeting the Web/HTML5. Web(WebHandle), /// A raw window handle for Android NDK. /// /// ## Availability Hints /// This variant is used on Android targets. AndroidNdk(AndroidNdkHandle), }
37.688312
101
0.692626
acbb4fbdccaa5cf00f951cab75d649a92735930e
3,495
// Copyright 2020 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. use super::{IfEvent, Incoming, Provider}; use async_io_crate::Async; use futures::future::{BoxFuture, FutureExt}; use std::io; use std::net; use std::task::{Context, Poll}; #[derive(Copy, Clone)] pub enum Tcp {} impl Provider for Tcp { type Stream = Async<net::TcpStream>; type Listener = Async<net::TcpListener>; type IfWatcher = if_watch::IfWatcher; fn if_watcher() -> BoxFuture<'static, io::Result<Self::IfWatcher>> { if_watch::IfWatcher::new().boxed() } fn new_listener(l: net::TcpListener) -> io::Result<Self::Listener> { Async::new(l) } fn new_stream(s: net::TcpStream) -> BoxFuture<'static, io::Result<Self::Stream>> { async move { // Taken from [`Async::connect`]. let stream = Async::new(s)?; // The stream becomes writable when connected. stream.writable().await?; // Check if there was an error while connecting. match stream.get_ref().take_error()? { None => Ok(stream), Some(err) => Err(err), } } .boxed() } fn poll_accept( l: &mut Self::Listener, cx: &mut Context<'_>, ) -> Poll<io::Result<Incoming<Self::Stream>>> { let (stream, remote_addr) = loop { match l.poll_readable(cx) { Poll::Pending => return Poll::Pending, Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), Poll::Ready(Ok(())) => match l.accept().now_or_never() { Some(Err(e)) => return Poll::Ready(Err(e)), Some(Ok(res)) => break res, None => { // Since it doesn't do any harm, account for false positives of // `poll_readable` just in case, i.e. try again. } }, } }; let local_addr = stream.get_ref().local_addr()?; Poll::Ready(Ok(Incoming { stream, local_addr, remote_addr, })) } fn poll_interfaces(w: &mut Self::IfWatcher, cx: &mut Context<'_>) -> Poll<io::Result<IfEvent>> { w.poll_unpin(cx).map_ok(|e| match e { if_watch::IfEvent::Up(a) => IfEvent::Up(a), if_watch::IfEvent::Down(a) => IfEvent::Down(a), }) } }
35.663265
100
0.596567
f465594300732c536fc6a7fb6545f70a5fbaf155
3,015
// Copyright 2021 The Engula Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use engula_futures::io::{RandomRead, SequentialWrite}; use engula_journal::{StreamReader, StreamWriter}; use crate::{async_trait, KernelUpdate, Result, Sequence}; /// A stateful environment for storage engines. #[async_trait] pub trait Kernel { type UpdateReader: UpdateReader; type UpdateWriter: UpdateWriter; type StreamReader: StreamReader; type StreamWriter: StreamWriter; type RandomReader: RandomRead; type SequentialWriter: SequentialWrite; /// Returns a reader to read updates. async fn new_update_reader(&self) -> Result<Self::UpdateReader>; /// Returns a writer to update the kernel. async fn new_update_writer(&self) -> Result<Self::UpdateWriter>; async fn new_stream_reader(&self, stream_name: &str) -> Result<Self::StreamReader>; async fn new_stream_writer(&self, stream_name: &str) -> Result<Self::StreamWriter>; async fn new_random_reader( &self, bucket_name: &str, object_name: &str, ) -> Result<Self::RandomReader>; async fn new_sequential_writer( &self, bucket_name: &str, object_name: &str, ) -> Result<Self::SequentialWriter>; } pub type UpdateEvent = (Sequence, KernelUpdate); #[async_trait] pub trait UpdateReader { /// Returns the next update event if it is available. async fn try_next(&mut self) -> Result<Option<UpdateEvent>>; /// Returns the next update event or waits until it is available. async fn wait_next(&mut self) -> Result<UpdateEvent>; } #[async_trait] pub trait UpdateWriter { /// Appends an update, returns the sequence of the update just append. async fn append(&mut self, update: KernelUpdate) -> Result<Sequence>; /// Releases updates up to a sequence (exclusive). /// /// Some operations of an update are not executed until the update is /// released. For example, if an update that deletes an object is appended, /// the object will be marked as deleted but it will still be valid for /// reads until the update is released. This allows users to finish /// ongoing requests even if some objects are marked as deleted. However, /// this is not a guarantee but a best-effort optimization. The /// implementation can still delete the objects in some cases, for example, /// if the user fails to keep alive with the kernel. async fn release(&mut self, sequence: Sequence) -> Result<()>; }
37.222222
87
0.709784
08894693a3e5853295cc4c6f4f63aebdd5626ba5
55
pub(crate) mod player_map; pub(crate) mod player_side;
18.333333
27
0.781818
bf6746351e8df8c5fee4c5b7fd28bbd45de3429f
55,664
// DO NOT EDIT ! // This file was generated automatically from 'src/mako/cli/main.rs.mako' // DO NOT EDIT ! #![allow(unused_variables, unused_imports, dead_code, unused_mut)] extern crate tokio; #[macro_use] extern crate clap; extern crate yup_oauth2 as oauth2; use std::env; use std::io::{self, Write}; use clap::{App, SubCommand, Arg}; use google_doubleclickbidmanager1::{api, Error}; mod client; use client::{InvalidOptionsError, CLIError, arg_from_str, writer_from_opts, parse_kv_arg, input_file_from_opts, input_mime_from_opts, FieldCursor, FieldError, CallType, UploadProtocol, calltype_from_str, remove_json_null_values, ComplexType, JsonType, JsonTypeInfo}; use std::default::Default; use std::str::FromStr; use serde_json as json; use clap::ArgMatches; enum DoitError { IoError(String, io::Error), ApiError(Error), } struct Engine<'n> { opt: ArgMatches<'n>, hub: api::DoubleClickBidManager, gp: Vec<&'static str>, gpm: Vec<(&'static str, &'static str)>, } impl<'n> Engine<'n> { async fn _lineitems_downloadlineitems(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut field_cursor = FieldCursor::default(); let mut object = json::value::Value::Object(Default::default()); for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let last_errc = err.issues.len(); let (key, value) = parse_kv_arg(&*kvarg, err, false); let mut temp_cursor = field_cursor.clone(); if let Err(field_err) = temp_cursor.set(&*key) { err.issues.push(field_err); } if value.is_none() { field_cursor = temp_cursor.clone(); if err.issues.len() > last_errc { err.issues.remove(last_errc); } continue; } let type_info: Option<(&'static str, JsonTypeInfo)> = match &temp_cursor.to_string()[..] { "file-spec" => Some(("fileSpec", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "filter-ids" => Some(("filterIds", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })), "filter-type" => Some(("filterType", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "format" => Some(("format", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), _ => { let suggestion = FieldCursor::did_you_mean(key, &vec!["file-spec", "filter-ids", "filter-type", "format"]); err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string())))); None } }; if let Some((field_cursor_str, type_info)) = type_info { FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor); } } let mut request: api::DownloadLineItemsRequest = json::value::from_value(object).unwrap(); let mut call = self.hub.lineitems().downloadlineitems(request); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _lineitems_uploadlineitems(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut field_cursor = FieldCursor::default(); let mut object = json::value::Value::Object(Default::default()); for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let last_errc = err.issues.len(); let (key, value) = parse_kv_arg(&*kvarg, err, false); let mut temp_cursor = field_cursor.clone(); if let Err(field_err) = temp_cursor.set(&*key) { err.issues.push(field_err); } if value.is_none() { field_cursor = temp_cursor.clone(); if err.issues.len() > last_errc { err.issues.remove(last_errc); } continue; } let type_info: Option<(&'static str, JsonTypeInfo)> = match &temp_cursor.to_string()[..] { "dry-run" => Some(("dryRun", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })), "format" => Some(("format", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "line-items" => Some(("lineItems", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), _ => { let suggestion = FieldCursor::did_you_mean(key, &vec!["dry-run", "format", "line-items"]); err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string())))); None } }; if let Some((field_cursor_str, type_info)) = type_info { FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor); } } let mut request: api::UploadLineItemsRequest = json::value::from_value(object).unwrap(); let mut call = self.hub.lineitems().uploadlineitems(request); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _queries_createquery(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut field_cursor = FieldCursor::default(); let mut object = json::value::Value::Object(Default::default()); for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let last_errc = err.issues.len(); let (key, value) = parse_kv_arg(&*kvarg, err, false); let mut temp_cursor = field_cursor.clone(); if let Err(field_err) = temp_cursor.set(&*key) { err.issues.push(field_err); } if value.is_none() { field_cursor = temp_cursor.clone(); if err.issues.len() > last_errc { err.issues.remove(last_errc); } continue; } let type_info: Option<(&'static str, JsonTypeInfo)> = match &temp_cursor.to_string()[..] { "kind" => Some(("kind", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.data-range" => Some(("metadata.dataRange", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.format" => Some(("metadata.format", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.google-cloud-storage-path-for-latest-report" => Some(("metadata.googleCloudStoragePathForLatestReport", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.google-drive-path-for-latest-report" => Some(("metadata.googleDrivePathForLatestReport", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.latest-report-run-time-ms" => Some(("metadata.latestReportRunTimeMs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.locale" => Some(("metadata.locale", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "metadata.report-count" => Some(("metadata.reportCount", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })), "metadata.running" => Some(("metadata.running", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })), "metadata.send-notification" => Some(("metadata.sendNotification", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })), "metadata.share-email-address" => Some(("metadata.shareEmailAddress", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })), "metadata.title" => Some(("metadata.title", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "params.group-bys" => Some(("params.groupBys", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })), "params.include-invite-data" => Some(("params.includeInviteData", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })), "params.metrics" => Some(("params.metrics", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })), "params.type" => Some(("params.type", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "query-id" => Some(("queryId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "report-data-end-time-ms" => Some(("reportDataEndTimeMs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "report-data-start-time-ms" => Some(("reportDataStartTimeMs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "schedule.end-time-ms" => Some(("schedule.endTimeMs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "schedule.frequency" => Some(("schedule.frequency", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "schedule.next-run-minute-of-day" => Some(("schedule.nextRunMinuteOfDay", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })), "schedule.next-run-timezone-code" => Some(("schedule.nextRunTimezoneCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "timezone-code" => Some(("timezoneCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), _ => { let suggestion = FieldCursor::did_you_mean(key, &vec!["data-range", "end-time-ms", "format", "frequency", "google-cloud-storage-path-for-latest-report", "google-drive-path-for-latest-report", "group-bys", "include-invite-data", "kind", "latest-report-run-time-ms", "locale", "metadata", "metrics", "next-run-minute-of-day", "next-run-timezone-code", "params", "query-id", "report-count", "report-data-end-time-ms", "report-data-start-time-ms", "running", "schedule", "send-notification", "share-email-address", "timezone-code", "title", "type"]); err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string())))); None } }; if let Some((field_cursor_str, type_info)) = type_info { FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor); } } let mut request: api::Query = json::value::from_value(object).unwrap(); let mut call = self.hub.queries().createquery(request); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _queries_deletequery(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut call = self.hub.queries().deletequery(opt.value_of("query-id").unwrap_or("")); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok(mut response) => { Ok(()) } } } } async fn _queries_getquery(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut call = self.hub.queries().getquery(opt.value_of("query-id").unwrap_or("")); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _queries_listqueries(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut call = self.hub.queries().listqueries(); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _queries_runquery(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut field_cursor = FieldCursor::default(); let mut object = json::value::Value::Object(Default::default()); for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let last_errc = err.issues.len(); let (key, value) = parse_kv_arg(&*kvarg, err, false); let mut temp_cursor = field_cursor.clone(); if let Err(field_err) = temp_cursor.set(&*key) { err.issues.push(field_err); } if value.is_none() { field_cursor = temp_cursor.clone(); if err.issues.len() > last_errc { err.issues.remove(last_errc); } continue; } let type_info: Option<(&'static str, JsonTypeInfo)> = match &temp_cursor.to_string()[..] { "data-range" => Some(("dataRange", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "report-data-end-time-ms" => Some(("reportDataEndTimeMs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "report-data-start-time-ms" => Some(("reportDataStartTimeMs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "timezone-code" => Some(("timezoneCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), _ => { let suggestion = FieldCursor::did_you_mean(key, &vec!["data-range", "report-data-end-time-ms", "report-data-start-time-ms", "timezone-code"]); err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string())))); None } }; if let Some((field_cursor_str, type_info)) = type_info { FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor); } } let mut request: api::RunQueryRequest = json::value::from_value(object).unwrap(); let mut call = self.hub.queries().runquery(request, opt.value_of("query-id").unwrap_or("")); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok(mut response) => { Ok(()) } } } } async fn _reports_listreports(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut call = self.hub.reports().listreports(opt.value_of("query-id").unwrap_or("")); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _sdf_download(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError) -> Result<(), DoitError> { let mut field_cursor = FieldCursor::default(); let mut object = json::value::Value::Object(Default::default()); for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let last_errc = err.issues.len(); let (key, value) = parse_kv_arg(&*kvarg, err, false); let mut temp_cursor = field_cursor.clone(); if let Err(field_err) = temp_cursor.set(&*key) { err.issues.push(field_err); } if value.is_none() { field_cursor = temp_cursor.clone(); if err.issues.len() > last_errc { err.issues.remove(last_errc); } continue; } let type_info: Option<(&'static str, JsonTypeInfo)> = match &temp_cursor.to_string()[..] { "file-types" => Some(("fileTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })), "filter-ids" => Some(("filterIds", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })), "filter-type" => Some(("filterType", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), "version" => Some(("version", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })), _ => { let suggestion = FieldCursor::did_you_mean(key, &vec!["file-types", "filter-ids", "filter-type", "version"]); err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string())))); None } }; if let Some((field_cursor_str, type_info)) = type_info { FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor); } } let mut request: api::DownloadRequest = json::value::from_value(object).unwrap(); let mut call = self.hub.sdf().download(request); for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { let (key, value) = parse_kv_arg(&*parg, err, false); match key { _ => { let mut found = false; for param in &self.gp { if key == *param { found = true; call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset")); break; } } if !found { err.issues.push(CLIError::UnknownParameter(key.to_string(), {let mut v = Vec::new(); v.extend(self.gp.iter().map(|v|*v)); v } )); } } } } let protocol = CallType::Standard; if dry_run { Ok(()) } else { assert!(err.issues.len() == 0); for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() { call = call.add_scope(scope); } let mut ostream = match writer_from_opts(opt.value_of("out")) { Ok(mut f) => f, Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)), }; match match protocol { CallType::Standard => call.doit().await, _ => unreachable!() } { Err(api_err) => Err(DoitError::ApiError(api_err)), Ok((mut response, output_schema)) => { let mut value = json::value::to_value(&output_schema).expect("serde to work"); remove_json_null_values(&mut value); json::to_writer_pretty(&mut ostream, &value).unwrap(); ostream.flush().unwrap(); Ok(()) } } } } async fn _doit(&self, dry_run: bool) -> Result<Result<(), DoitError>, Option<InvalidOptionsError>> { let mut err = InvalidOptionsError::new(); let mut call_result: Result<(), DoitError> = Ok(()); let mut err_opt: Option<InvalidOptionsError> = None; match self.opt.subcommand() { ("lineitems", Some(opt)) => { match opt.subcommand() { ("downloadlineitems", Some(opt)) => { call_result = self._lineitems_downloadlineitems(opt, dry_run, &mut err).await; }, ("uploadlineitems", Some(opt)) => { call_result = self._lineitems_uploadlineitems(opt, dry_run, &mut err).await; }, _ => { err.issues.push(CLIError::MissingMethodError("lineitems".to_string())); writeln!(io::stderr(), "{}\n", opt.usage()).ok(); } } }, ("queries", Some(opt)) => { match opt.subcommand() { ("createquery", Some(opt)) => { call_result = self._queries_createquery(opt, dry_run, &mut err).await; }, ("deletequery", Some(opt)) => { call_result = self._queries_deletequery(opt, dry_run, &mut err).await; }, ("getquery", Some(opt)) => { call_result = self._queries_getquery(opt, dry_run, &mut err).await; }, ("listqueries", Some(opt)) => { call_result = self._queries_listqueries(opt, dry_run, &mut err).await; }, ("runquery", Some(opt)) => { call_result = self._queries_runquery(opt, dry_run, &mut err).await; }, _ => { err.issues.push(CLIError::MissingMethodError("queries".to_string())); writeln!(io::stderr(), "{}\n", opt.usage()).ok(); } } }, ("reports", Some(opt)) => { match opt.subcommand() { ("listreports", Some(opt)) => { call_result = self._reports_listreports(opt, dry_run, &mut err).await; }, _ => { err.issues.push(CLIError::MissingMethodError("reports".to_string())); writeln!(io::stderr(), "{}\n", opt.usage()).ok(); } } }, ("sdf", Some(opt)) => { match opt.subcommand() { ("download", Some(opt)) => { call_result = self._sdf_download(opt, dry_run, &mut err).await; }, _ => { err.issues.push(CLIError::MissingMethodError("sdf".to_string())); writeln!(io::stderr(), "{}\n", opt.usage()).ok(); } } }, _ => { err.issues.push(CLIError::MissingCommandError); writeln!(io::stderr(), "{}\n", self.opt.usage()).ok(); } } if dry_run { if err.issues.len() > 0 { err_opt = Some(err); } Err(err_opt) } else { Ok(call_result) } } // Please note that this call will fail if any part of the opt can't be handled async fn new(opt: ArgMatches<'n>) -> Result<Engine<'n>, InvalidOptionsError> { let (config_dir, secret) = { let config_dir = match client::assure_config_dir_exists(opt.value_of("folder").unwrap_or("~/.google-service-cli")) { Err(e) => return Err(InvalidOptionsError::single(e, 3)), Ok(p) => p, }; match client::application_secret_from_directory(&config_dir, "doubleclickbidmanager1-secret.json", "{\"installed\":{\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\",\"client_secret\":\"hCsslbCUyfehWMmbkG8vTYxG\",\"token_uri\":\"https://accounts.google.com/o/oauth2/token\",\"client_email\":\"\",\"redirect_uris\":[\"urn:ietf:wg:oauth:2.0:oob\",\"oob\"],\"client_x509_cert_url\":\"\",\"client_id\":\"620010449518-9ngf7o4dhs0dka470npqvor6dc5lqb9b.apps.googleusercontent.com\",\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\"}}") { Ok(secret) => (config_dir, secret), Err(e) => return Err(InvalidOptionsError::single(e, 4)) } }; let auth = yup_oauth2::InstalledFlowAuthenticator::builder( secret, yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, ).persist_tokens_to_disk(format!("{}/doubleclickbidmanager1", config_dir)).build().await.unwrap(); let client = hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots()); let engine = Engine { opt: opt, hub: api::DoubleClickBidManager::new(client, auth), gp: vec!["$-xgafv", "access-token", "alt", "callback", "fields", "key", "oauth-token", "pretty-print", "quota-user", "upload-type", "upload-protocol"], gpm: vec![ ("$-xgafv", "$.xgafv"), ("access-token", "access_token"), ("oauth-token", "oauth_token"), ("pretty-print", "prettyPrint"), ("quota-user", "quotaUser"), ("upload-type", "uploadType"), ("upload-protocol", "upload_protocol"), ] }; match engine._doit(true).await { Err(Some(err)) => Err(err), Err(None) => Ok(engine), Ok(_) => unreachable!(), } } async fn doit(&self) -> Result<(), DoitError> { match self._doit(false).await { Ok(res) => res, Err(_) => unreachable!(), } } } #[tokio::main] async fn main() { let mut exit_status = 0i32; let arg_data = [ ("lineitems", "methods: 'downloadlineitems' and 'uploadlineitems'", vec![ ("downloadlineitems", Some(r##"Retrieves line items in CSV format. YouTube & partners line items are not supported."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/lineitems_downloadlineitems", vec![ (Some(r##"kv"##), Some(r##"r"##), Some(r##"Set various fields of the request structure, matching the key=value form"##), Some(true), Some(true)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ("uploadlineitems", Some(r##"Uploads line items in CSV format. YouTube & partners line items are not supported."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/lineitems_uploadlineitems", vec![ (Some(r##"kv"##), Some(r##"r"##), Some(r##"Set various fields of the request structure, matching the key=value form"##), Some(true), Some(true)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ]), ("queries", "methods: 'createquery', 'deletequery', 'getquery', 'listqueries' and 'runquery'", vec![ ("createquery", Some(r##"Creates a query."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/queries_createquery", vec![ (Some(r##"kv"##), Some(r##"r"##), Some(r##"Set various fields of the request structure, matching the key=value form"##), Some(true), Some(true)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ("deletequery", Some(r##"Deletes a stored query as well as the associated stored reports."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/queries_deletequery", vec![ (Some(r##"query-id"##), None, Some(r##"Query ID to delete."##), Some(true), Some(false)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), ]), ("getquery", Some(r##"Retrieves a stored query."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/queries_getquery", vec![ (Some(r##"query-id"##), None, Some(r##"Query ID to retrieve."##), Some(true), Some(false)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ("listqueries", Some(r##"Retrieves stored queries."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/queries_listqueries", vec![ (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ("runquery", Some(r##"Runs a stored query to generate a report."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/queries_runquery", vec![ (Some(r##"query-id"##), None, Some(r##"Query ID to run."##), Some(true), Some(false)), (Some(r##"kv"##), Some(r##"r"##), Some(r##"Set various fields of the request structure, matching the key=value form"##), Some(true), Some(true)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), ]), ]), ("reports", "methods: 'listreports'", vec![ ("listreports", Some(r##"Retrieves stored reports."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/reports_listreports", vec![ (Some(r##"query-id"##), None, Some(r##"Query ID with which the reports are associated."##), Some(true), Some(false)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ]), ("sdf", "methods: 'download'", vec![ ("download", Some(r##"Retrieves entities in SDF format."##), "Details at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli/sdf_download", vec![ (Some(r##"kv"##), Some(r##"r"##), Some(r##"Set various fields of the request structure, matching the key=value form"##), Some(true), Some(true)), (Some(r##"v"##), Some(r##"p"##), Some(r##"Set various optional parameters, matching the key=value form"##), Some(false), Some(true)), (Some(r##"out"##), Some(r##"o"##), Some(r##"Specify the file into which to write the program's output"##), Some(false), Some(false)), ]), ]), ]; let mut app = App::new("doubleclickbidmanager1") .author("Sebastian Thiel <[email protected]>") .version("2.0.8+20210323") .about("DoubleClick Bid Manager API allows users to manage and create campaigns and reports.") .after_help("All documentation details can be found at http://byron.github.io/google-apis-rs/google_doubleclickbidmanager1_cli") .arg(Arg::with_name("url") .long("scope") .help("Specify the authentication a method should be executed in. Each scope requires the user to grant this application permission to use it.If unset, it defaults to the shortest scope url for a particular method.") .multiple(true) .takes_value(true)) .arg(Arg::with_name("folder") .long("config-dir") .help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation.[default: ~/.google-service-cli") .multiple(false) .takes_value(true)) .arg(Arg::with_name("debug") .long("debug") .help("Debug print all errors") .multiple(false) .takes_value(false)); for &(main_command_name, about, ref subcommands) in arg_data.iter() { let mut mcmd = SubCommand::with_name(main_command_name).about(about); for &(sub_command_name, ref desc, url_info, ref args) in subcommands { let mut scmd = SubCommand::with_name(sub_command_name); if let &Some(desc) = desc { scmd = scmd.about(desc); } scmd = scmd.after_help(url_info); for &(ref arg_name, ref flag, ref desc, ref required, ref multi) in args { let arg_name_str = match (arg_name, flag) { (&Some(an), _ ) => an, (_ , &Some(f)) => f, _ => unreachable!(), }; let mut arg = Arg::with_name(arg_name_str) .empty_values(false); if let &Some(short_flag) = flag { arg = arg.short(short_flag); } if let &Some(desc) = desc { arg = arg.help(desc); } if arg_name.is_some() && flag.is_some() { arg = arg.takes_value(true); } if let &Some(required) = required { arg = arg.required(required); } if let &Some(multi) = multi { arg = arg.multiple(multi); } scmd = scmd.arg(arg); } mcmd = mcmd.subcommand(scmd); } app = app.subcommand(mcmd); } let matches = app.get_matches(); let debug = matches.is_present("debug"); match Engine::new(matches).await { Err(err) => { exit_status = err.exit_code; writeln!(io::stderr(), "{}", err).ok(); }, Ok(engine) => { if let Err(doit_err) = engine.doit().await { exit_status = 1; match doit_err { DoitError::IoError(path, err) => { writeln!(io::stderr(), "Failed to open output file '{}': {}", path, err).ok(); }, DoitError::ApiError(err) => { if debug { writeln!(io::stderr(), "{:#?}", err).ok(); } else { writeln!(io::stderr(), "{}", err).ok(); } } } } } } std::process::exit(exit_status); }
49.655665
570
0.453686
484e52b5c3a498d9ad42842c73014c552f100aa7
1,330
// This file was generated by gir (https://github.com/gtk-rs/gir) // from ../gir-files // DO NOT EDIT use glib::object::IsA; use glib::translate::*; use std::fmt; glib::wrapper! { #[doc(alias = "ArvXmlSchema")] pub struct XmlSchema(Object<ffi::ArvXmlSchema, ffi::ArvXmlSchemaClass>); match fn { type_ => || ffi::arv_xml_schema_get_type(), } } impl XmlSchema { #[doc(alias = "arv_xml_schema_new_from_file")] #[doc(alias = "new_from_file")] pub fn from_file<P: IsA<gio::File>>(file: &P) -> XmlSchema { assert_initialized_main_thread!(); unsafe { from_glib_full(ffi::arv_xml_schema_new_from_file( file.as_ref().to_glib_none().0, )) } } #[doc(alias = "arv_xml_schema_new_from_path")] #[doc(alias = "new_from_path")] pub fn from_path(path: &str) -> XmlSchema { assert_initialized_main_thread!(); unsafe { from_glib_full(ffi::arv_xml_schema_new_from_path(path.to_glib_none().0)) } } //#[doc(alias = "arv_xml_schema_validate")] //pub fn validate(&self, xml: /*Unimplemented*/Option<Fundamental: Pointer>, size: usize, line: i32, column: i32) -> Result<(), glib::Error> { // unsafe { TODO: call ffi:arv_xml_schema_validate() } //} } unsafe impl Send for XmlSchema {} impl fmt::Display for XmlSchema { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("XmlSchema") } }
26.6
143
0.682707
48943d21bb5dfee438635ca03563f65e408f375f
1,121
// Copyright 2017 Zachary Bush. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! This module contains error object for `PromptBuffer` use std::io; use std::convert; use std::sync::mpsc; /// Convenience wrapper for `Result<T, PromptBufferError>` pub type PromptBufferResult<T> = Result<T, PromptBufferError>; /// The base error type of `PromptBuffer` pub enum PromptBufferError { /// Error variant for IO errors IO(io::Error), /// Error variant for channel send errors SendError(mpsc::SendError<()>), } macro_rules! convert_impl { ($($from:ty => $to:ident),+) => {$( impl convert::From<$from> for PromptBufferError { fn from(error: $from) -> PromptBufferError { PromptBufferError::$to(error) } } )+} } convert_impl! { io::Error => IO, mpsc::SendError<()> => SendError }
28.025
68
0.655665
f93adcb2addee00cde3e1f700c41c3c14da263ce
2,786
// iterators3.rs // This is a bigger exercise than most of the others! You can do it! // Here is your mission, should you choose to accept it: // 1. Complete the divide function to get the first four tests to pass. // 2. Get the remaining tests to pass by completing the result_with_list and // list_of_results functions. // Execute `rustlings hint iterators3` to get some hints! // I AM NOT DONE #[derive(Debug, PartialEq, Eq)] pub enum DivisionError { NotDivisible(NotDivisibleError), DivideByZero, } #[derive(Debug, PartialEq, Eq)] pub struct NotDivisibleError { dividend: i32, divisor: i32, } // Calculate `a` divided by `b` if `a` is evenly divisible by `b`. // Otherwise, return a suitable error. pub fn divide(a: i32, b: i32) -> Result<i32, DivisionError> {} // Complete the function and return a value of the correct type so the test passes. // Desired output: Ok([1, 11, 1426, 3]) fn result_with_list() -> () { let numbers = vec![27, 297, 38502, 81]; let division_results = numbers.into_iter().map(|n| divide(n, 27)); } // Complete the function and return a value of the correct type so the test passes. // Desired output: [Ok(1), Ok(11), Ok(1426), Ok(3)] fn list_of_results() -> () { let numbers = vec![27, 297, 38502, 81]; let division_results = numbers.into_iter().map(|n| divide(n, 27)); } #[cfg(test)] mod tests { use super::*; #[test] fn test_success() { assert_eq!(divide(81, 9), Ok(9)); } #[test] fn test_not_divisible() { assert_eq!( divide(81, 6), Err(DivisionError::NotDivisible(NotDivisibleError { dividend: 81, divisor: 6 })) ); } #[test] fn test_divide_by_0() { assert_eq!(divide(81, 0), Err(DivisionError::DivideByZero)); } #[test] fn test_divide_0_by_something() { assert_eq!(divide(0, 81), Ok(0)); } #[test] fn test_result_with_list() { assert_eq!(format!("{:?}", result_with_list()), "Ok([1, 11, 1426, 3])"); } #[test] fn test_list_of_results() { assert_eq!( format!("{:?}", list_of_results()), "[Ok(1), Ok(11), Ok(1426), Ok(3)]" ); } } // The divide function needs to return the correct error when even division is not // possible. // The division_results variable needs to be collected into a collection type. // The result_with_list function needs to return a single Result where the success // case is a vector of integers and the failure case is a DivisionError. // The list_of_results function needs to return a vector of results. // See https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect for how // the `FromIterator` trait is used in `collect()`.
28.721649
84
0.635319
c1d32e007ef730c1244428748b612eb8e8dddf6c
1,575
mod input; #[allow(dead_code)] enum Menu { CalculatorMode = 1, Exit = 2 } fn main() { input::clear_terminal(); println!("Welcome to the Rust Calculator"); loop { println!("------------------------------\n"); let selection: i32 = input::get_menu_option( "Select one of the following options:\n 1) Calculator mode\n 2) Exit\n" ); if selection == Menu::Exit as i32 { break }; input::clear_terminal(); let num1: f32 = input::get_number("Please input the first number."); let num2: f32 = input::get_number("Please input the second number."); let operator: String = input::get_operator("Please input the desired operator: +, -, *, /"); let product: f32 = match perform_calculation(num1, num2, &operator) { Ok(product) => product, Err(error) => { println!("\nERROR: {}", error); continue; } }; println!("\nResult: {} {} {} = {}", num1, operator, num2, product) } println!("Shutting down...") } fn perform_calculation(num1: f32, num2: f32, operator: &String) -> Result<f32, &'static str> { return match &operator[..] { "+" => Ok(num1 + num2), "-" => Ok(num1 - num2), "*" => Ok(num1 * num2), "/" => { if num2 == 0.0 { Err("ERROR: You cannot divide a number by 0.") } else { Ok(num1 / num2) } } _ => panic!("ERROR: Something has gone wrong internally."), }; }
32.142857
100
0.502857
222438ac1354460cb36248c7557aa7f9c18de648
2,094
#![macro_use] #![feature(generic_associated_types)] #![feature(type_alias_impl_trait)] #[cfg(feature = "std")] mod tests { use drogue_device::{actors::button::*, testutil::*, *}; #[allow(unused_imports)] use drogue_device_macros::test as drogue_test; use embassy::executor::Spawner; #[allow(dead_code)] struct TestDevicePressed { handler: ActorContext<TestHandler>, button: ActorContext<Button<TestPin, Address<TestHandler>>>, } #[drogue_test] #[allow(dead_code)] async fn test_pressed(spawner: Spawner, mut context: TestContext<TestDevicePressed>) { let pin = context.pin(true); let notified = context.signal(); let device = context.configure(TestDevicePressed { handler: ActorContext::new(), button: ActorContext::new(), }); let handler_addr = device.handler.mount(spawner, TestHandler::new(notified)); device.button.mount(spawner, Button::new(pin, handler_addr)); assert!(notified.message().is_none()); pin.set_low(); notified.wait_signaled().await.unwrap(); assert_eq!(0, notified.message().unwrap().0); } #[allow(dead_code)] struct TestDeviceReleased { handler: ActorContext<TestHandler>, button: ActorContext<Button<TestPin, Address<TestHandler>>>, } #[drogue_test] #[allow(dead_code)] async fn test_released(spawner: Spawner, mut context: TestContext<TestDeviceReleased>) { let pin = context.pin(false); let notified = context.signal(); let device = context.configure(TestDeviceReleased { handler: ActorContext::new(), button: ActorContext::new(), }); let handler_addr = device.handler.mount(spawner, TestHandler::new(notified)); device.button.mount(spawner, Button::new(pin, handler_addr)); println!("start"); assert!(notified.message().is_none()); pin.set_high(); notified.wait_signaled().await.unwrap(); assert_eq!(1, notified.message().unwrap().0); } }
32.71875
92
0.636581
eb0c98077c5b13823516c989786c75fa71270835
18,016
use super::{NodeId, NodeState, RenderGraph, RenderGraphError}; use bevy_utils::HashMap; use thiserror::Error; #[derive(Error, Debug)] pub enum StagerError { // This might have to be `:` tagged at the end. #[error("encountered a `RenderGraphError`")] RenderGraphError(#[from] RenderGraphError), } #[derive(Default, Debug, Eq, PartialEq)] pub struct Stage { pub jobs: Vec<OrderedJob>, } #[derive(Default, Debug, Eq, PartialEq)] pub struct OrderedJob { pub nodes: Vec<NodeId>, } #[derive(Default, Debug)] pub struct StageBorrow<'a> { pub jobs: Vec<OrderedJobBorrow<'a>>, } #[derive(Default, Debug)] pub struct OrderedJobBorrow<'a> { pub node_states: Vec<&'a mut NodeState>, } #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] struct NodeIndices { stage: usize, job: usize, node: usize, } #[derive(Default, Debug)] pub struct Stages { stages: Vec<Stage>, /// a collection of node indices that are used to efficiently borrow render graph nodes node_indices: HashMap<NodeId, NodeIndices>, } impl Stages { pub fn new(stages: Vec<Stage>) -> Self { let mut node_indices = HashMap::default(); for (stage_index, stage) in stages.iter().enumerate() { for (job_index, job) in stage.jobs.iter().enumerate() { for (node_index, node) in job.nodes.iter().enumerate() { node_indices.insert( *node, NodeIndices { stage: stage_index, job: job_index, node: node_index, }, ); } } } Stages { stages, node_indices, } } pub fn borrow<'a>(&self, render_graph: &'a mut RenderGraph) -> Vec<StageBorrow<'a>> { // unfortunately borrowing render graph nodes in a specific order takes a little bit of // gymnastics let mut stage_borrows = Vec::with_capacity(self.stages.len()); let mut node_borrows = Vec::new(); for node in render_graph.iter_nodes_mut() { let indices = self.node_indices.get(&node.id).unwrap(); node_borrows.push((node, indices)); } node_borrows.sort_by_key(|(_node, indices)| <&NodeIndices>::clone(indices)); let mut last_stage = usize::MAX; let mut last_job = usize::MAX; for (node, indices) in node_borrows.drain(..) { if last_stage != indices.stage { stage_borrows.push(StageBorrow::default()); last_job = usize::MAX; } let stage = &mut stage_borrows[indices.stage]; if last_job != indices.job { stage.jobs.push(OrderedJobBorrow::default()); } let job = &mut stage.jobs[indices.job]; job.node_states.push(node); last_stage = indices.stage; last_job = indices.job; } stage_borrows } } /// Produces a collection of `Stages`, which are sets of OrderedJobs that must be run before moving /// on to the next stage pub trait RenderGraphStager { fn get_stages(&mut self, render_graph: &RenderGraph) -> Result<Stages, RenderGraphError>; } // TODO: remove this /// This scheduler ignores dependencies and puts everything in one stage. It shouldn't be used for /// anything :) #[derive(Debug, Default)] pub struct LinearStager; impl RenderGraphStager for LinearStager { fn get_stages(&mut self, render_graph: &RenderGraph) -> Result<Stages, RenderGraphError> { let mut stage = Stage::default(); let mut job = OrderedJob::default(); for node in render_graph.iter_nodes() { job.nodes.push(node.id); } stage.jobs.push(job); Ok(Stages::new(vec![stage])) } } #[derive(Debug, Copy, Clone)] /// Determines the grouping strategy used when constructing graph stages pub enum JobGrouping { /// Default to adding the current node to a new job in its assigned stage. This results /// in a "loose" pack that is easier to parallelize but has more jobs Loose, /// Default to adding the current node into the first job in its assigned stage. This results /// in a "tight" pack that is harder to parallelize but results in fewer jobs Tight, } #[derive(Debug)] /// Produces Render Graph stages and jobs in a way that ensures node dependencies are respected. pub struct DependentNodeStager { job_grouping: JobGrouping, } impl DependentNodeStager { pub fn loose_grouping() -> Self { DependentNodeStager { job_grouping: JobGrouping::Loose, } } pub fn tight_grouping() -> Self { DependentNodeStager { job_grouping: JobGrouping::Tight, } } } impl RenderGraphStager for DependentNodeStager { fn get_stages<'a>(&mut self, render_graph: &RenderGraph) -> Result<Stages, RenderGraphError> { // get all nodes without input. this intentionally includes nodes with no outputs let output_only_nodes = render_graph .iter_nodes() .filter(|node| node.input_slots.is_empty()); let mut stages = vec![Stage::default()]; let mut node_stages = HashMap::default(); for output_only_node in output_only_nodes { // each "output only" node should start a new job on the first stage stage_node( render_graph, &mut stages, &mut node_stages, output_only_node, self.job_grouping, ); } Ok(Stages::new(stages)) } } fn stage_node( graph: &RenderGraph, stages: &mut Vec<Stage>, node_stages_and_jobs: &mut HashMap<NodeId, (usize, usize)>, node: &NodeState, job_grouping: JobGrouping, ) { // don't re-visit nodes or visit them before all of their parents have been visited if node_stages_and_jobs.contains_key(&node.id) || node .edges .input_edges .iter() .any(|e| !node_stages_and_jobs.contains_key(&e.get_output_node())) { return; } // by default assume we are creating a new job on a new stage let mut stage_index = 0; let mut job_index = match job_grouping { JobGrouping::Tight => Some(0), JobGrouping::Loose => None, }; // check to see if the current node has a parent. if so, grab the parent with the highest stage if let Some((max_parent_stage, max_parent_job)) = node .edges .input_edges .iter() .map(|e| { node_stages_and_jobs .get(&e.get_output_node()) .expect("Already checked that parents were visited.") }) .max() { // count the number of parents that are in the highest stage let max_stage_parent_count = node .edges .input_edges .iter() .filter(|e| { let (max_stage, _) = node_stages_and_jobs .get(&e.get_output_node()) .expect("Already checked that parents were visited."); max_stage == max_parent_stage }) .count(); // if the current node has more than one parent on the highest stage (aka requires // synchronization), then move it to the next stage and start a new job on that // stage if max_stage_parent_count > 1 { stage_index = max_parent_stage + 1; } else { stage_index = *max_parent_stage; job_index = Some(*max_parent_job); } } if stage_index == stages.len() { stages.push(Stage::default()); } let stage = &mut stages[stage_index]; let job_index = job_index.unwrap_or_else(|| stage.jobs.len()); if job_index == stage.jobs.len() { stage.jobs.push(OrderedJob::default()); } let job = &mut stage.jobs[job_index]; job.nodes.push(node.id); node_stages_and_jobs.insert(node.id, (stage_index, job_index)); for (_edge, node) in graph.iter_node_outputs(node.id).unwrap() { stage_node(graph, stages, node_stages_and_jobs, node, job_grouping); } } #[cfg(test)] mod tests { use super::{DependentNodeStager, OrderedJob, RenderGraphStager, Stage}; use crate::{ render_graph::{Node, NodeId, RenderGraph, ResourceSlotInfo, ResourceSlots}, renderer::{RenderContext, RenderResourceType}, }; use bevy_ecs::world::World; struct TestNode { inputs: Vec<ResourceSlotInfo>, outputs: Vec<ResourceSlotInfo>, } impl TestNode { pub fn new(inputs: usize, outputs: usize) -> Self { TestNode { inputs: (0..inputs) .map(|i| ResourceSlotInfo { name: format!("in_{}", i).into(), resource_type: RenderResourceType::Texture, }) .collect(), outputs: (0..outputs) .map(|i| ResourceSlotInfo { name: format!("out_{}", i).into(), resource_type: RenderResourceType::Texture, }) .collect(), } } } impl Node for TestNode { fn input(&self) -> &[ResourceSlotInfo] { &self.inputs } fn output(&self) -> &[ResourceSlotInfo] { &self.outputs } fn update( &mut self, _: &World, _: &mut dyn RenderContext, _: &ResourceSlots, _: &mut ResourceSlots, ) { } } #[test] fn test_render_graph_dependency_stager_loose() { let mut graph = RenderGraph::default(); // Setup graph to look like this: // // A -> B -> C -> D // / / // E F -> G // // H -> I -> J let a_id = graph.add_node("A", TestNode::new(0, 1)); let b_id = graph.add_node("B", TestNode::new(2, 1)); let c_id = graph.add_node("C", TestNode::new(2, 1)); let d_id = graph.add_node("D", TestNode::new(1, 0)); let e_id = graph.add_node("E", TestNode::new(0, 1)); let f_id = graph.add_node("F", TestNode::new(0, 2)); let g_id = graph.add_node("G", TestNode::new(1, 0)); let h_id = graph.add_node("H", TestNode::new(0, 1)); let i_id = graph.add_node("I", TestNode::new(1, 1)); let j_id = graph.add_node("J", TestNode::new(1, 0)); graph.add_node_edge("A", "B").unwrap(); graph.add_node_edge("B", "C").unwrap(); graph.add_node_edge("C", "D").unwrap(); graph.add_node_edge("E", "B").unwrap(); graph.add_node_edge("F", "C").unwrap(); graph.add_node_edge("F", "G").unwrap(); graph.add_node_edge("H", "I").unwrap(); graph.add_node_edge("I", "J").unwrap(); let mut stager = DependentNodeStager::loose_grouping(); let mut stages = stager.get_stages(&graph).unwrap(); // Expected Stages: // (X indicates nodes that are not part of that stage) // Stage 1 // A -> X -> X -> X // / / // E F -> G // // H -> I -> J // Stage 2 // X -> B -> C -> D // / / // X X -> X // // X -> X -> X let mut expected_stages = vec![ Stage { jobs: vec![ OrderedJob { nodes: vec![f_id, g_id], }, OrderedJob { nodes: vec![a_id] }, OrderedJob { nodes: vec![e_id] }, OrderedJob { nodes: vec![h_id, i_id, j_id], }, ], }, Stage { jobs: vec![OrderedJob { nodes: vec![b_id, c_id, d_id], }], }, ]; // ensure job order lines up within stages (this can vary due to hash maps) // jobs within a stage are unordered conceptually so this is ok expected_stages .iter_mut() .for_each(|stage| stage.jobs.sort_by_key(|job| job.nodes[0])); stages .stages .iter_mut() .for_each(|stage| stage.jobs.sort_by_key(|job| job.nodes[0])); assert_eq!( stages.stages, expected_stages, "stages should be loosely grouped" ); let mut borrowed = stages.borrow(&mut graph); // ensure job order lines up within stages (this can vary due to hash maps) // jobs within a stage are unordered conceptually so this is ok borrowed .iter_mut() .for_each(|stage| stage.jobs.sort_by_key(|job| job.node_states[0].id)); assert_eq!( borrowed.len(), expected_stages.len(), "same number of stages" ); for (stage_index, borrowed_stage) in borrowed.iter().enumerate() { assert_eq!( borrowed_stage.jobs.len(), stages.stages[stage_index].jobs.len(), "job length matches" ); for (job_index, borrowed_job) in borrowed_stage.jobs.iter().enumerate() { assert_eq!( borrowed_job.node_states.len(), stages.stages[stage_index].jobs[job_index].nodes.len(), "node length matches" ); for (node_index, borrowed_node) in borrowed_job.node_states.iter().enumerate() { assert_eq!( borrowed_node.id, stages.stages[stage_index].jobs[job_index].nodes[node_index] ); } } } } #[test] fn test_render_graph_dependency_stager_tight() { let mut graph = RenderGraph::default(); // Setup graph to look like this: // // A -> B -> C -> D // / / // E F -> G // // H -> I -> J let _a_id = graph.add_node("A", TestNode::new(0, 1)); let b_id = graph.add_node("B", TestNode::new(2, 1)); let c_id = graph.add_node("C", TestNode::new(2, 1)); let d_id = graph.add_node("D", TestNode::new(1, 0)); let _e_id = graph.add_node("E", TestNode::new(0, 1)); let f_id = graph.add_node("F", TestNode::new(0, 2)); let g_id = graph.add_node("G", TestNode::new(1, 0)); let h_id = graph.add_node("H", TestNode::new(0, 1)); let i_id = graph.add_node("I", TestNode::new(1, 1)); let j_id = graph.add_node("J", TestNode::new(1, 0)); graph.add_node_edge("A", "B").unwrap(); graph.add_node_edge("B", "C").unwrap(); graph.add_node_edge("C", "D").unwrap(); graph.add_node_edge("E", "B").unwrap(); graph.add_node_edge("F", "C").unwrap(); graph.add_node_edge("F", "G").unwrap(); graph.add_node_edge("H", "I").unwrap(); graph.add_node_edge("I", "J").unwrap(); let mut stager = DependentNodeStager::tight_grouping(); let mut stages = stager.get_stages(&graph).unwrap(); // Expected Stages: // (X indicates nodes that are not part of that stage) // Stage 1 // A -> X -> X -> X // / / // E F -> G // // H -> I -> J // Stage 2 // X -> B -> C -> D // / / // X X -> X // // X -> X -> X assert_eq!(stages.stages[0].jobs.len(), 1, "expect exactly 1 job"); let job = &stages.stages[0].jobs[0]; assert_eq!(job.nodes.len(), 7, "expect exactly 7 nodes in the job"); // its hard to test the exact order of this job's nodes because of hashing, so instead we'll // test the constraints that must hold true let index = |node_id: NodeId| -> usize { job.nodes.iter().position(|id| *id == node_id).unwrap() }; assert!(index(f_id) < index(g_id)); assert!(index(h_id) < index(i_id)); assert!(index(i_id) < index(j_id)); let expected_stage_1 = Stage { jobs: vec![OrderedJob { nodes: vec![b_id, c_id, d_id], }], }; assert_eq!(stages.stages[1], expected_stage_1,); let mut borrowed = stages.borrow(&mut graph); // ensure job order lines up within stages (this can vary due to hash maps) // jobs within a stage are unordered conceptually so this is ok stages .stages .iter_mut() .for_each(|stage| stage.jobs.sort_by_key(|job| job.nodes[0])); borrowed .iter_mut() .for_each(|stage| stage.jobs.sort_by_key(|job| job.node_states[0].id)); assert_eq!(borrowed.len(), 2, "same number of stages"); for (stage_index, borrowed_stage) in borrowed.iter().enumerate() { assert_eq!( borrowed_stage.jobs.len(), stages.stages[stage_index].jobs.len(), "job length matches" ); for (job_index, borrowed_job) in borrowed_stage.jobs.iter().enumerate() { assert_eq!( borrowed_job.node_states.len(), stages.stages[stage_index].jobs[job_index].nodes.len(), "node length matches" ); for (node_index, borrowed_node) in borrowed_job.node_states.iter().enumerate() { assert_eq!( borrowed_node.id, stages.stages[stage_index].jobs[job_index].nodes[node_index] ); } } } } }
32.756364
100
0.536745
f565757bfc2bdf43087c2c47113d202d39dcb9b1
2,709
use azure_cosmos::prelude::*; use futures::stream::StreamExt; use serde_json::Value; use std::error::Error; // This example expects you to have created a collection // with partitionKey on "id". #[tokio::main] async fn main() -> Result<(), Box<dyn Error + Send + Sync>> { let database_name = std::env::args() .nth(1) .expect("please specify the database name as first command line parameter"); let collection_name = std::env::args() .nth(2) .expect("please specify the collection name as second command line parameter"); let partition_key_name = std::env::args() .nth(3) .expect("please specify the partition key as third command line parameter"); let master_key = std::env::var("COSMOS_MASTER_KEY").expect("Set env variable COSMOS_MASTER_KEY first!"); let account = std::env::var("COSMOS_ACCOUNT").expect("Set env variable COSMOS_ACCOUNT first!"); let authorization_token = AuthorizationToken::primary_from_base64(&master_key)?; // Next we will create a Cosmos client. let client = CosmosClient::new( account.clone(), authorization_token, CosmosOptions::default(), ); let client = client.into_database_client(database_name); let client = client.into_collection_client(collection_name); let mut documents = Vec::new(); let stream = client.list_documents(); let mut stream = Box::pin(stream.stream::<serde_json::Value>()); while let Some(res) = stream.next().await { for doc in res?.documents { documents.push(doc); } } for document in documents { // find id and partition key from document json let doc_as_obj = match document.document { Value::Object(map) => map, _ => panic!("expected one object"), }; let id = match &doc_as_obj["id"] { Value::String(id) => id, _ => panic!("cannot find id field as string"), }; let partition_key: String = match &doc_as_obj[&partition_key_name] { Value::String(id) => id.to_owned(), Value::Number(num) => { format!( "{}", num.as_i64().expect("only numbers up to i64 are supported") ) } _ => panic!("cannot find supplied partition key as string"), }; println!( "deleting id =={:#?}, partition key == {:#?}", id, partition_key ); client .clone() .into_document_client(id.clone(), &partition_key)? .delete_document() .execute() .await?; } Ok(()) }
32.638554
99
0.580657
f7183f7c861bb1bfd05f3a1b39c0838c661a4cef
17,027
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::{ collections::VecDeque, mem, sync::{ mpsc::{channel, Receiver, Sender}, Arc, }, thread::{self, JoinHandle}, time::{Duration, Instant}, }; use async_task::{Runnable, Task}; use slab::Slab; use sync::{Condvar, Mutex}; use sys_util::{error, warn}; const DEFAULT_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); struct State { tasks: VecDeque<Runnable>, num_threads: usize, num_idle: usize, num_notified: usize, worker_threads: Slab<JoinHandle<()>>, exited_threads: Option<Receiver<usize>>, exit: Sender<usize>, shutting_down: bool, } fn run_blocking_thread(idx: usize, inner: Arc<Inner>, exit: Sender<usize>) { let mut state = inner.state.lock(); while !state.shutting_down { if let Some(runnable) = state.tasks.pop_front() { drop(state); runnable.run(); state = inner.state.lock(); continue; } // No more tasks so wait for more work. state.num_idle += 1; let (guard, result) = inner .condvar .wait_timeout_while(state, inner.keepalive, |s| { !s.shutting_down && s.num_notified == 0 }); state = guard; // If `state.num_notified > 0` then this was a real wakeup. if state.num_notified > 0 { state.num_notified -= 1; continue; } // Only decrement the idle count if we timed out. Otherwise, it was decremented when new // work was added to `state.tasks`. if result.timed_out() { state.num_idle = state .num_idle .checked_sub(1) .expect("`num_idle` underflow on timeout"); break; } } state.num_threads -= 1; // If we're shutting down then the BlockingPool will take care of joining all the threads. // Otherwise, we need to join the last worker thread that exited here. let last_exited_thread = if let Some(exited_threads) = state.exited_threads.as_mut() { exited_threads .try_recv() .map(|idx| state.worker_threads.remove(idx)) .ok() } else { None }; // Drop the lock before trying to join the last exited thread. drop(state); if let Some(handle) = last_exited_thread { let _ = handle.join(); } if let Err(e) = exit.send(idx) { error!("Failed to send thread exit event on channel: {}", e); } } struct Inner { state: Mutex<State>, condvar: Condvar, max_threads: usize, keepalive: Duration, } impl Inner { fn schedule(self: &Arc<Inner>, runnable: Runnable) { let mut state = self.state.lock(); // If we're shutting down then nothing is going to run this task. if state.shutting_down { return; } state.tasks.push_back(runnable); if state.num_idle == 0 { // There are no idle threads. Spawn a new one if possible. if state.num_threads < self.max_threads { state.num_threads += 1; let exit = state.exit.clone(); let entry = state.worker_threads.vacant_entry(); let idx = entry.key(); let inner = self.clone(); entry.insert( thread::Builder::new() .name(format!("blockingPool{}", idx)) .spawn(move || run_blocking_thread(idx, inner, exit)) .unwrap(), ); } } else { // We have idle threads, wake one up. state.num_idle -= 1; state.num_notified += 1; self.condvar.notify_one(); } } } #[derive(Debug, thiserror::Error)] #[error("{0} BlockingPool threads did not exit in time and will be detached")] pub struct ShutdownTimedOut(usize); /// A thread pool for running work that may block. /// /// It is generally discouraged to do any blocking work inside an async function. However, this is /// sometimes unavoidable when dealing with interfaces that don't provide async variants. In this /// case callers may use the `BlockingPool` to run the blocking work on a different thread and /// `await` for its result to finish, which will prevent blocking the main thread of the /// application. /// /// Since the blocking work is sent to another thread, users should be careful when using the /// `BlockingPool` for latency-sensitive operations. Additionally, the `BlockingPool` is intended to /// be used for work that will eventually complete on its own. Users who want to spawn a thread /// should just use `thread::spawn` directly. /// /// There is no way to cancel work once it has been picked up by one of the worker threads in the /// `BlockingPool`. Dropping or shutting down the pool will block up to a timeout (default 10 /// seconds) to wait for any active blocking work to finish. Any threads running tasks that have not /// completed by that time will be detached. /// /// # Examples /// /// Spawn a task to run in the `BlockingPool` and await on its result. /// /// ```edition2018 /// use cros_async::BlockingPool; /// /// # async fn do_it() { /// let pool = BlockingPool::default(); /// /// let res = pool.spawn(move || { /// // Do some CPU-intensive or blocking work here. /// /// 42 /// }).await; /// /// assert_eq!(res, 42); /// # } /// # cros_async::block_on(do_it()); /// ``` pub struct BlockingPool { inner: Arc<Inner>, } impl BlockingPool { /// Create a new `BlockingPool`. /// /// The `BlockingPool` will never spawn more than `max_threads` threads to do work, regardless /// of the number of tasks that are added to it. This value should be set relatively low (for /// example, the number of CPUs on the machine) if the pool is intended to run CPU intensive /// work or it should be set relatively high (128 or more) if the pool is intended to be used /// for various IO operations that cannot be completed asynchronously. The default value is 256. /// /// Worker threads are spawned on demand when new work is added to the pool and will /// automatically exit after being idle for some time so there is no overhead for setting /// `max_threads` to a large value when there is little to no work assigned to the /// `BlockingPool`. `keepalive` determines the idle duration after which the worker thread will /// exit. The default value is 10 seconds. pub fn new(max_threads: usize, keepalive: Duration) -> BlockingPool { let (exit, exited_threads) = channel(); BlockingPool { inner: Arc::new(Inner { state: Mutex::new(State { tasks: VecDeque::new(), num_threads: 0, num_idle: 0, num_notified: 0, worker_threads: Slab::new(), exited_threads: Some(exited_threads), exit, shutting_down: false, }), condvar: Condvar::new(), max_threads, keepalive, }), } } /// Like new but with pre-allocating capacity for up to `max_threads`. pub fn with_capacity(max_threads: usize, keepalive: Duration) -> BlockingPool { let (exit, exited_threads) = channel(); BlockingPool { inner: Arc::new(Inner { state: Mutex::new(State { tasks: VecDeque::new(), num_threads: 0, num_idle: 0, num_notified: 0, worker_threads: Slab::with_capacity(max_threads), exited_threads: Some(exited_threads), exit, shutting_down: false, }), condvar: Condvar::new(), max_threads, keepalive, }), } } /// Spawn a task to run in the `BlockingPool`. /// /// Callers may `await` the returned `Task` to be notified when the work is completed. /// /// # Panics /// /// `await`ing a `Task` after dropping the `BlockingPool` or calling `BlockingPool::shutdown` /// will panic if the work was not completed before the pool was shut down. pub fn spawn<F, R>(&self, f: F) -> Task<R> where F: FnOnce() -> R + Send + 'static, R: Send + 'static, { let raw = Arc::downgrade(&self.inner); let schedule = move |runnable| { if let Some(i) = raw.upgrade() { i.schedule(runnable); } }; let (runnable, task) = async_task::spawn(async move { f() }, schedule); runnable.schedule(); task } /// Shut down the `BlockingPool`. /// /// If `deadline` is provided then this will block until either all worker threads exit or the /// deadline is exceeded. If `deadline` is not given then this will block indefinitely until all /// worker threads exit. Any work that was added to the `BlockingPool` but not yet picked up by /// a worker thread will not complete and `await`ing on the `Task` for that work will panic. pub fn shutdown(&self, deadline: Option<Instant>) -> Result<(), ShutdownTimedOut> { let mut state = self.inner.state.lock(); if state.shutting_down { // We've already shut down this BlockingPool. return Ok(()); } state.shutting_down = true; let exited_threads = state.exited_threads.take().expect("exited_threads missing"); let unfinished_tasks = mem::replace(&mut state.tasks, VecDeque::new()); let mut worker_threads = mem::replace(&mut state.worker_threads, Slab::new()); drop(state); self.inner.condvar.notify_all(); // Cancel any unfinished work after releasing the lock. drop(unfinished_tasks); // Now wait for all worker threads to exit. if let Some(deadline) = deadline { let mut now = Instant::now(); while now < deadline && !worker_threads.is_empty() { if let Ok(idx) = exited_threads.recv_timeout(deadline - now) { let _ = worker_threads.remove(idx).join(); } now = Instant::now(); } // Any threads that have not yet joined will just be detached. if !worker_threads.is_empty() { return Err(ShutdownTimedOut(worker_threads.len())); } Ok(()) } else { // Block indefinitely until all worker threads exit. for handle in worker_threads.drain() { let _ = handle.join(); } Ok(()) } } } impl Default for BlockingPool { fn default() -> BlockingPool { BlockingPool::new(256, Duration::from_secs(10)) } } impl Drop for BlockingPool { fn drop(&mut self) { if let Err(e) = self.shutdown(Some(Instant::now() + DEFAULT_SHUTDOWN_TIMEOUT)) { warn!("{}", e); } } } #[cfg(test)] mod test { use std::{ sync::{Arc, Barrier}, thread, time::{Duration, Instant}, }; use futures::{stream::FuturesUnordered, StreamExt}; use sync::{Condvar, Mutex}; use crate::{block_on, BlockingPool}; #[test] fn blocking_sleep() { let pool = BlockingPool::default(); let res = block_on(pool.spawn(|| 42)); assert_eq!(res, 42); } #[test] fn fast_tasks_with_short_keepalive() { let pool = BlockingPool::new(256, Duration::from_millis(1)); let streams = FuturesUnordered::new(); for _ in 0..2 { for _ in 0..256 { let task = pool.spawn(|| ()); streams.push(task); } thread::sleep(Duration::from_millis(1)); } block_on(streams.collect::<Vec<_>>()); // The test passes if there are no panics, which would happen if one of the worker threads // triggered an underflow on `pool.inner.state.num_idle`. } #[test] fn more_tasks_than_threads() { let pool = BlockingPool::new(4, Duration::from_secs(10)); let stream = (0..19) .map(|_| pool.spawn(|| thread::sleep(Duration::from_millis(5)))) .collect::<FuturesUnordered<_>>(); let results = block_on(stream.collect::<Vec<_>>()); assert_eq!(results.len(), 19); } #[test] fn shutdown() { let pool = BlockingPool::default(); let stream = (0..19) .map(|_| pool.spawn(|| thread::sleep(Duration::from_millis(5)))) .collect::<FuturesUnordered<_>>(); let results = block_on(stream.collect::<Vec<_>>()); assert_eq!(results.len(), 19); pool.shutdown(Some(Instant::now() + Duration::from_secs(10))) .unwrap(); let state = pool.inner.state.lock(); assert_eq!(state.num_threads, 0); } #[test] fn keepalive_timeout() { // Set the keepalive to a very low value so that threads will exit soon after they run out // of work. let pool = BlockingPool::new(7, Duration::from_millis(1)); let stream = (0..19) .map(|_| pool.spawn(|| thread::sleep(Duration::from_millis(5)))) .collect::<FuturesUnordered<_>>(); let results = block_on(stream.collect::<Vec<_>>()); assert_eq!(results.len(), 19); // Wait for all threads to exit. let deadline = Instant::now() + Duration::from_secs(10); while Instant::now() < deadline { thread::sleep(Duration::from_millis(100)); let state = pool.inner.state.lock(); if state.num_threads == 0 { break; } } { let state = pool.inner.state.lock(); assert_eq!(state.num_threads, 0); assert_eq!(state.num_idle, 0); } } #[test] #[should_panic] fn shutdown_with_pending_work() { let pool = BlockingPool::new(1, Duration::from_secs(10)); let mu = Arc::new(Mutex::new(false)); let cv = Arc::new(Condvar::new()); // First spawn a thread that blocks the pool. let task_mu = mu.clone(); let task_cv = cv.clone(); pool.spawn(move || { let mut ready = task_mu.lock(); while !*ready { ready = task_cv.wait(ready); } }) .detach(); // This task will never finish because we will shut down the pool first. let unfinished = pool.spawn(|| 5); // Spawn a thread to unblock the work we started earlier once it sees that the pool is // shutting down. let inner = pool.inner.clone(); thread::spawn(move || { let mut state = inner.state.lock(); while !state.shutting_down { state = inner.condvar.wait(state); } *mu.lock() = true; cv.notify_all(); }); pool.shutdown(None).unwrap(); // This should panic. assert_eq!(block_on(unfinished), 5); } #[test] fn unfinished_worker_thread() { let pool = BlockingPool::default(); let ready = Arc::new(Mutex::new(false)); let cv = Arc::new(Condvar::new()); let barrier = Arc::new(Barrier::new(2)); let thread_ready = ready.clone(); let thread_barrier = barrier.clone(); let thread_cv = cv.clone(); let task = pool.spawn(move || { thread_barrier.wait(); let mut ready = thread_ready.lock(); while !*ready { ready = thread_cv.wait(ready); } }); // Wait to shut down the pool until after the worker thread has started. barrier.wait(); pool.shutdown(Some(Instant::now() + Duration::from_millis(5))) .unwrap_err(); let num_threads = pool.inner.state.lock().num_threads; assert_eq!(num_threads, 1); // Now wake up the blocked task so we don't leak the thread. *ready.lock() = true; cv.notify_all(); block_on(task); let deadline = Instant::now() + Duration::from_secs(10); while Instant::now() < deadline { thread::sleep(Duration::from_millis(100)); let state = pool.inner.state.lock(); if state.num_threads == 0 { break; } } { let state = pool.inner.state.lock(); assert_eq!(state.num_threads, 0); assert_eq!(state.num_idle, 0); } } }
32.556405
100
0.563282
e97b25eb3ab04bc95a79a6d520d474025925462e
56,852
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::native_functions::NativeFunction; use bytecode_verifier::{ constants, instantiation_loops::InstantiationLoopChecker, verify_main_signature, CodeUnitVerifier, DependencyChecker, DuplicationChecker, InstructionConsistency, RecursiveStructDefChecker, ResourceTransitiveChecker, SignatureChecker, }; use libra_crypto::HashValue; use libra_logger::prelude::*; use move_core_types::{ identifier::{IdentStr, Identifier}, language_storage::{ModuleId, StructTag, TypeTag}, value::{MoveKind, MoveKindInfo, MoveStructLayout, MoveTypeLayout}, vm_status::StatusCode, }; use move_vm_types::{ data_store::DataStore, loaded_data::{ runtime_types::{StructType, Type, TypeConverter}, types::{FatStructType, FatType}, }, }; use std::{ collections::HashMap, fmt::Debug, hash::Hash, sync::{Arc, Mutex}, }; use vm::{ access::{ModuleAccess, ScriptAccess}, errors::{verification_error, Location, PartialVMError, PartialVMResult, VMResult}, file_format::{ Bytecode, CompiledModule, CompiledScript, Constant, ConstantPoolIndex, FieldHandleIndex, FieldInstantiationIndex, FunctionDefinition, FunctionHandleIndex, FunctionInstantiationIndex, Kind, Signature, SignatureToken, StructDefInstantiationIndex, StructDefinition, StructDefinitionIndex, StructFieldInformation, TableIndex, }, IndexKind, }; // A simple cache that offers both a HashMap and a Vector lookup. // Values are forced into a `Arc` so they can be used from multiple thread. // Access to this cache is always under a `Mutex`. struct BinaryCache<K, V> { id_map: HashMap<K, usize>, binaries: Vec<Arc<V>>, } impl<K, V> BinaryCache<K, V> where K: Eq + Hash, { fn new() -> Self { Self { id_map: HashMap::new(), binaries: vec![], } } fn insert(&mut self, key: K, binary: V) -> &Arc<V> { self.binaries.push(Arc::new(binary)); let idx = self.binaries.len() - 1; self.id_map.insert(key, idx); self.binaries .last() .expect("BinaryCache: last() after push() impossible failure") } fn get(&self, key: &K) -> Option<&Arc<V>> { self.id_map .get(&key) .and_then(|idx| self.binaries.get(*idx)) } } // A script cache is a map from the hash value of a script and the `Script` itself. // Script are added in the cache once verified and so getting a script out the cache // does not require further verification (except for parameters and type parameters) struct ScriptCache { scripts: BinaryCache<HashValue, Script>, } impl ScriptCache { fn new() -> Self { Self { scripts: BinaryCache::new(), } } fn get(&self, hash: &HashValue) -> Option<Arc<Function>> { self.scripts.get(hash).map(|script| script.entry_point()) } fn insert(&mut self, hash: HashValue, script: Script) -> PartialVMResult<Arc<Function>> { match self.get(&hash) { Some(script) => Ok(script), None => Ok(self.scripts.insert(hash, script).entry_point()), } } } // A ModuleCache is the core structure in the Loader. // It holds all Modules, Types and Functions loaded. // Types and Functions are pushed globally to the ModuleCache. // A ModuleCache is accessed under lock. pub struct ModuleCache { modules: BinaryCache<ModuleId, Module>, structs: Vec<Arc<StructType>>, functions: Vec<Arc<Function>>, } impl ModuleCache { fn new() -> Self { Self { modules: BinaryCache::new(), structs: vec![], functions: vec![], } } fn get(&self, id: &ModuleId) -> Option<Arc<Module>> { self.modules.get(id).map(|module| Arc::clone(module)) } fn insert(&mut self, id: ModuleId, module: CompiledModule) -> VMResult<Arc<Module>> { self.add_module(&module)?; let module = Module::new(module, self)?; match self.get(&id) { Some(module) => Ok(module), None => Ok(Arc::clone(self.modules.insert(id, module))), } } fn function_at(&self, idx: usize) -> Arc<Function> { self.functions[idx].clone() } fn struct_at(&self, idx: usize) -> Arc<StructType> { Arc::clone(&self.structs[idx]) } fn add_module(&mut self, module: &CompiledModule) -> VMResult<()> { let starting_idx = self.structs.len(); for (idx, struct_def) in module.struct_defs().iter().enumerate() { let st = self.load_type(module, struct_def, StructDefinitionIndex(idx as u16))?; self.structs.push(Arc::new(st)); } self.load_fields(module, starting_idx) .map_err(|e| e.finish(Location::Undefined))?; for func in module.function_defs() { let function = self.load_function(module, func)?; self.functions.push(Arc::new(function)); } Ok(()) } fn load_type( &self, module: &CompiledModule, struct_def: &StructDefinition, idx: StructDefinitionIndex, ) -> VMResult<StructType> { let struct_handle = module.struct_handle_at(struct_def.struct_handle); let is_resource = struct_handle.is_nominal_resource; let name = module.identifier_at(struct_handle.name).to_owned(); let type_parameters = struct_handle.type_parameters.clone(); let module = module.self_id(); Ok(StructType { fields: vec![], is_resource, type_parameters, name, module, struct_def: idx, }) } fn load_fields(&mut self, module: &CompiledModule, starting_idx: usize) -> PartialVMResult<()> { let mut field_types = vec![]; for struct_def in module.struct_defs() { let fields = match &struct_def.field_information { StructFieldInformation::Native => unreachable!("native structs have been removed"), StructFieldInformation::Declared(fields) => fields, }; let mut field_tys = vec![]; for field in fields { let ty = self.make_type(module, &field.signature.0)?; assume!(field_tys.len() < usize::max_value()); field_tys.push(ty); } field_types.push(field_tys); } for (fields, arc_struct_type) in field_types .into_iter() .zip(&mut self.structs[starting_idx..]) { match Arc::get_mut(arc_struct_type) { None => { return Err(PartialVMError::new(StatusCode::INVALID_CODE_CACHE) .with_message("Arc Type should not have any reference".to_string())) } Some(struct_type) => struct_type.fields = fields, } } Ok(()) } fn make_type(&self, module: &CompiledModule, tok: &SignatureToken) -> PartialVMResult<Type> { let res = match tok { SignatureToken::Bool => Type::Bool, SignatureToken::U8 => Type::U8, SignatureToken::U64 => Type::U64, SignatureToken::U128 => Type::U128, SignatureToken::Address => Type::Address, SignatureToken::Signer => Type::Signer, SignatureToken::TypeParameter(idx) => Type::TyParam(*idx as usize), SignatureToken::Vector(inner_tok) => { let inner_type = self.make_type(module, inner_tok)?; Type::Vector(Box::new(inner_type)) } SignatureToken::Reference(inner_tok) => { let inner_type = self.make_type(module, inner_tok)?; Type::Reference(Box::new(inner_type)) } SignatureToken::MutableReference(inner_tok) => { let inner_type = self.make_type(module, inner_tok)?; Type::MutableReference(Box::new(inner_type)) } SignatureToken::Struct(sh_idx) => { let struct_handle = module.struct_handle_at(*sh_idx); let struct_name = module.identifier_at(struct_handle.name); let module_handle = module.module_handle_at(struct_handle.module); let module_id = ModuleId::new( *module.address_identifier_at(module_handle.address), module.identifier_at(module_handle.name).to_owned(), ); let def_idx = self.find_struct_by_name(struct_name, &module_id)?.0; Type::Struct(def_idx) } SignatureToken::StructInstantiation(sh_idx, tys) => { let type_parameters: Vec<_> = tys .iter() .map(|tok| self.make_type(module, tok)) .collect::<PartialVMResult<_>>()?; let struct_handle = module.struct_handle_at(*sh_idx); let struct_name = module.identifier_at(struct_handle.name); let module_handle = module.module_handle_at(struct_handle.module); let module_id = ModuleId::new( *module.address_identifier_at(module_handle.address), module.identifier_at(module_handle.name).to_owned(), ); let def_idx = self.find_struct_by_name(struct_name, &module_id)?.0; Type::StructInstantiation(def_idx, type_parameters) } }; Ok(res) } fn find_struct_by_name( &self, struct_name: &IdentStr, module_id: &ModuleId, ) -> PartialVMResult<(usize, Arc<StructType>)> { for (idx, ty) in self.structs.iter().enumerate() { if struct_match(ty, &module_id, struct_name) { return Ok((idx, Arc::clone(ty))); } } Err( PartialVMError::new(StatusCode::TYPE_RESOLUTION_FAILURE).with_message(format!( "Cannot find {:?}::{:?} in cache", module_id, struct_name )), ) } fn resolve_function_handle( &self, func_name: &IdentStr, module_id: &ModuleId, ) -> VMResult<usize> { for (idx, f) in self.functions.iter().enumerate() { if function_match(&f, module_id, func_name) { return Ok(idx); } } Err(PartialVMError::new(StatusCode::FUNCTION_RESOLUTION_FAILURE) .with_message(format!( "Cannot find {:?}::{:?} in cache", module_id, func_name )) .finish(Location::Undefined)) } fn load_function( &self, module: &CompiledModule, func_def: &FunctionDefinition, ) -> VMResult<Function> { Ok(Function::new(func_def, module)) } } struct StructInfo { struct_tag: Option<StructTag>, struct_layout: Option<MoveStructLayout>, kind_info: Option<(MoveKind, Vec<MoveKindInfo>)>, } impl StructInfo { fn new() -> Self { Self { struct_tag: None, struct_layout: None, kind_info: None, } } } pub(crate) struct TypeCache { structs: HashMap<usize, HashMap<Vec<Type>, StructInfo>>, } impl TypeCache { fn new() -> Self { Self { structs: HashMap::new(), } } } // A Loader is responsible to load scripts and modules and holds the cache of all loaded // entities. Each cache is protected by a `Mutex`. Operation in the Loader must be thread safe // (operating on values on the stack) and when cache needs updating the mutex must be taken. // The `pub(crate)` API is what a Loader offers to the runtime. pub(crate) struct Loader { scripts: Mutex<ScriptCache>, module_cache: Mutex<ModuleCache>, type_cache: Mutex<TypeCache>, } impl Loader { pub(crate) fn new() -> Self { //println!("new loader"); Self { scripts: Mutex::new(ScriptCache::new()), module_cache: Mutex::new(ModuleCache::new()), type_cache: Mutex::new(TypeCache::new()), } } // Entry point for function execution (`MoveVM::execute_function`). // Loading verifies the module if it was never loaded. // Type parameters are checked as well after every type is loaded. pub(crate) fn load_function( &self, function_name: &IdentStr, module_id: &ModuleId, ty_args: &[TypeTag], data_store: &mut dyn DataStore, ) -> VMResult<(Arc<Function>, Vec<Type>)> { self.load_module(module_id, data_store)?; let idx = self .module_cache .lock() .unwrap() .resolve_function_handle(function_name, module_id)?; let func = self.module_cache.lock().unwrap().function_at(idx); // verify type arguments let mut type_params = vec![]; for ty in ty_args { type_params.push(self.load_type(ty, data_store)?); } self.verify_ty_args(func.type_parameters(), &type_params) .map_err(|e| e.finish(Location::Module(module_id.clone())))?; Ok((func, type_params)) } // Entry point for script execution (`MoveVM::execute_script`). // Verifies the script if it is not in the cache of scripts loaded. // Type parameters are checked as well after every type is loaded. pub(crate) fn load_script( &self, script_blob: &[u8], ty_args: &[TypeTag], data_store: &mut dyn DataStore, ) -> VMResult<(Arc<Function>, Vec<Type>)> { // retrieve or load the script let hash_value = HashValue::sha3_256_of(script_blob); let opt_main = self.scripts.lock().unwrap().get(&hash_value); let main = match opt_main { Some(main) => main, None => { let ver_script = self.deserialize_and_verify_script(script_blob, data_store)?; let script = Script::new(ver_script, &hash_value, &self.module_cache.lock().unwrap())?; self.scripts .lock() .unwrap() .insert(hash_value, script) .map_err(|e| e.finish(Location::Script))? } }; // verify type arguments let mut type_params = vec![]; for ty in ty_args { type_params.push(self.load_type(ty, data_store)?); } self.verify_ty_args(main.type_parameters(), &type_params) .map_err(|e| e.finish(Location::Script))?; Ok((main, type_params)) } // Entry point for module publishing (`MoveVM::publish_module`). // A module to be published must be loadable. // This step performs all verification steps to load the module without loading it. // The module is not added to the code cache. It is simply published to the data cache. pub(crate) fn verify_module(&self, module: &CompiledModule) -> VMResult<()> { DuplicationChecker::verify_module(&module)?; SignatureChecker::verify_module(&module)?; InstructionConsistency::verify_module(&module)?; ResourceTransitiveChecker::verify_module(&module)?; constants::verify_module(&module)?; RecursiveStructDefChecker::verify_module(&module)?; InstantiationLoopChecker::verify_module(&module)?; CodeUnitVerifier::verify_module(&module)?; Self::check_natives(&module) } fn verify_script(&self, script: &CompiledScript) -> VMResult<()> { DuplicationChecker::verify_script(&script)?; SignatureChecker::verify_script(&script)?; InstructionConsistency::verify_script(&script)?; constants::verify_script(&script)?; CodeUnitVerifier::verify_script(&script)?; verify_main_signature(&script) } fn load_type(&self, type_tag: &TypeTag, data_store: &mut dyn DataStore) -> VMResult<Type> { Ok(match type_tag { TypeTag::Bool => Type::Bool, TypeTag::U8 => Type::U8, TypeTag::U64 => Type::U64, TypeTag::U128 => Type::U128, TypeTag::Address => Type::Address, TypeTag::Signer => Type::Signer, TypeTag::Vector(tt) => Type::Vector(Box::new(self.load_type(tt, data_store)?)), TypeTag::Struct(struct_tag) => { let module_id = ModuleId::new(struct_tag.address, struct_tag.module.clone()); self.load_module(&module_id, data_store)?; let (idx, struct_type) = self .module_cache .lock() .unwrap() .find_struct_by_name(&struct_tag.name, &module_id) .map_err(|e| e.finish(Location::Undefined))?; if struct_type.type_parameters.is_empty() && struct_tag.type_params.is_empty() { Type::Struct(idx) } else { let mut type_params = vec![]; for ty_param in &struct_tag.type_params { type_params.push(self.load_type(ty_param, data_store)?); } self.verify_ty_args(&struct_type.type_parameters, &type_params) .map_err(|e| e.finish(Location::Undefined))?; Type::StructInstantiation(idx, type_params) } } }) } fn load_module(&self, id: &ModuleId, data_store: &mut dyn DataStore) -> VMResult<Arc<Module>> { if let Some(module) = self.module_cache.lock().unwrap().get(id) { return Ok(module); } let module = self.deserialize_and_verify_module(id, data_store)?; Self::check_natives(&module)?; self.module_cache.lock().unwrap().insert(id.clone(), module) } fn verify_ty_args(&self, constraints: &[Kind], ty_args: &[Type]) -> PartialVMResult<()> { if constraints.len() != ty_args.len() { return Err(PartialVMError::new( StatusCode::NUMBER_OF_TYPE_ARGUMENTS_MISMATCH, )); } for (ty, expected_k) in ty_args.iter().zip(constraints) { let k = if self.is_resource(ty)? { Kind::Resource } else { Kind::Copyable }; if !k.is_sub_kind_of(*expected_k) { return Err(PartialVMError::new(StatusCode::CONSTRAINT_KIND_MISMATCH)); } } Ok(()) } fn check_natives(module: &CompiledModule) -> VMResult<()> { fn check_natives_impl(module: &CompiledModule) -> PartialVMResult<()> { for (idx, native_function) in module .function_defs() .iter() .filter(|fdv| fdv.is_native()) .enumerate() { let fh = module.function_handle_at(native_function.function); let mh = module.module_handle_at(fh.module); NativeFunction::resolve( module.address_identifier_at(mh.address), module.identifier_at(mh.name).as_str(), module.identifier_at(fh.name).as_str(), ) .ok_or_else(|| { verification_error( StatusCode::MISSING_DEPENDENCY, IndexKind::FunctionHandle, idx as TableIndex, ) })?; } // TODO: fix check and error code if we leave something around for native structs. // For now this generates the only error test cases care about... for (idx, struct_def) in module.struct_defs().iter().enumerate() { if struct_def.field_information == StructFieldInformation::Native { return Err(verification_error( StatusCode::MISSING_DEPENDENCY, IndexKind::FunctionHandle, idx as TableIndex, )); } } Ok(()) } check_natives_impl(module).map_err(|e| e.finish(Location::Module(module.self_id()))) } pub(crate) fn function_at(&self, idx: usize) -> Arc<Function> { self.module_cache.lock().unwrap().function_at(idx) } fn get_module(&self, idx: &ModuleId) -> Arc<Module> { Arc::clone( self.module_cache .lock() .unwrap() .modules .get(idx) .expect("ModuleId on Function must exist"), ) } fn get_script(&self, hash: &HashValue) -> Arc<Script> { Arc::clone( self.scripts .lock() .unwrap() .scripts .get(hash) .expect("Script hash on Function must exist"), ) } fn struct_at(&self, idx: usize) -> Arc<StructType> { self.module_cache.lock().unwrap().struct_at(idx) } fn is_resource(&self, type_: &Type) -> PartialVMResult<bool> { match type_ { Type::Struct(idx) => Ok(self .module_cache .lock() .unwrap() .struct_at(*idx) .is_resource), Type::StructInstantiation(idx, instantiation) => { if self .module_cache .lock() .unwrap() .struct_at(*idx) .is_resource { Ok(true) } else { for ty in instantiation { if self.is_resource(ty)? { return Ok(true); } } Ok(false) } } Type::Vector(ty) => self.is_resource(ty), _ => Ok(false), } } fn deserialize_and_verify_script( &self, script: &[u8], data_store: &mut dyn DataStore, ) -> VMResult<CompiledScript> { let script = match CompiledScript::deserialize(script) { Ok(script) => script, Err(err) => { error!("[VM] deserializer for script returned error: {:?}", err); let msg = format!("Deserialization error: {:?}", err); return Err(PartialVMError::new(StatusCode::CODE_DESERIALIZATION_ERROR) .with_message(msg) .finish(Location::Script)); } }; match self.verify_script(&script) { Ok(_) => { // verify dependencies let deps = load_script_dependencies(&script); let mut dependencies = vec![]; for dep in &deps { dependencies.push(self.load_module(dep, data_store)?); } self.verify_script_dependencies(script, dependencies) } Err(err) => { error!( "[VM] bytecode verifier returned errors for script: {:?}", err ); Err(err) } } } fn verify_script_dependencies( &self, script: CompiledScript, dependencies: Vec<Arc<Module>>, ) -> VMResult<CompiledScript> { let mut deps = vec![]; for dep in &dependencies { deps.push(dep.module()); } DependencyChecker::verify_script(&script, deps).and_then(|_| Ok(script)) } fn deserialize_and_verify_module( &self, id: &ModuleId, data_store: &mut dyn DataStore, ) -> VMResult<CompiledModule> { let module = match data_store.load_module(id) { Ok(m) => m, Err(err) => { crit!("[VM] Error fetching module with id {:?}", id); return Err(err); } }; self.verify_module(&module)?; self.check_dependencies(&module, data_store)?; Ok(module) } fn check_dependencies( &self, module: &CompiledModule, data_store: &mut dyn DataStore, ) -> VMResult<()> { let deps = load_module_dependencies(module); let mut dependencies = vec![]; for dep in &deps { dependencies.push(self.load_module(dep, data_store)?); } self.verify_module_dependencies(module, dependencies) } fn verify_module_dependencies( &self, module: &CompiledModule, dependencies: Vec<Arc<Module>>, ) -> VMResult<()> { let mut deps = vec![]; for dep in &dependencies { deps.push(dep.module()); } DependencyChecker::verify_module(module, deps) } } // A simple wrapper for a `Module` or a `Script` in the `Resolver` enum BinaryType { Module(Arc<Module>), Script(Arc<Script>), } // A Resolver is a simple and small structure allocated on the stack and used by the // interpreter. It's the only API known to the interpreter and it's tailored to the interpreter // needs. pub struct Resolver<'a> { loader: &'a Loader, binary: BinaryType, } impl<'a> Resolver<'a> { fn for_module(loader: &'a Loader, module: Arc<Module>) -> Self { let binary = BinaryType::Module(module); Self { loader, binary } } fn for_script(loader: &'a Loader, script: Arc<Script>) -> Self { let binary = BinaryType::Script(script); Self { loader, binary } } pub(crate) fn constant_at(&self, idx: ConstantPoolIndex) -> &Constant { match &self.binary { BinaryType::Module(module) => module.module.constant_at(idx), BinaryType::Script(script) => script.script.constant_at(idx), } } pub(crate) fn function_at(&self, idx: FunctionHandleIndex) -> Arc<Function> { let idx = match &self.binary { BinaryType::Module(module) => module.function_at(idx.0), BinaryType::Script(script) => script.function_at(idx.0), }; self.loader.function_at(idx) } pub(crate) fn function_instantiation_at( &self, idx: FunctionInstantiationIndex, ) -> &FunctionInstantiation { match &self.binary { BinaryType::Module(module) => module.function_instantiation_at(idx.0), BinaryType::Script(script) => script.function_instantiation_at(idx.0), } } pub(crate) fn struct_at(&self, idx: StructDefinitionIndex) -> Arc<StructType> { match &self.binary { BinaryType::Module(module) => { let gidx = module.struct_at(idx); self.loader.struct_at(gidx) } BinaryType::Script(_) => unreachable!("Scripts cannot have type instructions"), } } pub(crate) fn struct_gidx_at(&self, idx: StructDefinitionIndex) -> usize { match &self.binary { BinaryType::Module(module) => module.struct_at(idx), BinaryType::Script(_) => unreachable!("Scripts cannot have type instructions"), } } pub(crate) fn struct_type_at(&self, gidx: usize) -> Arc<StructType> { self.loader.struct_at(gidx) } pub(crate) fn struct_instantiation_at( &self, idx: StructDefInstantiationIndex, ) -> &StructInstantiation { match &self.binary { BinaryType::Module(module) => module.struct_instantiation_at(idx.0), BinaryType::Script(_) => unreachable!("Scripts cannot have type instructions"), } } pub(crate) fn get_struct_type(&self, idx: StructDefinitionIndex) -> Type { Type::Struct(self.struct_gidx_at(idx)) } pub(crate) fn get_struct_instantiation_type( &self, idx: StructDefInstantiationIndex, ty_args: &[Type], ) -> PartialVMResult<Type> { let si = self.struct_instantiation_at(idx); Ok(Type::StructInstantiation( si.get_def_idx(), si.get_instantiation() .iter() .map(|ty| ty.subst(ty_args)) .collect::<PartialVMResult<_>>()?, )) } pub(crate) fn field_offset(&self, idx: FieldHandleIndex) -> usize { match &self.binary { BinaryType::Module(module) => module.field_offset(idx), BinaryType::Script(_) => unreachable!("Scripts cannot have field instructions"), } } pub(crate) fn field_instantiation_offset(&self, idx: FieldInstantiationIndex) -> usize { match &self.binary { BinaryType::Module(module) => module.field_instantiation_offset(idx), BinaryType::Script(_) => unreachable!("Scripts cannot have field instructions"), } } pub(crate) fn field_count(&self, idx: StructDefinitionIndex) -> u16 { match &self.binary { BinaryType::Module(module) => module.field_count(idx.0), BinaryType::Script(_) => unreachable!("Scripts cannot have type instructions"), } } pub(crate) fn field_instantiation_count(&self, idx: StructDefInstantiationIndex) -> u16 { match &self.binary { BinaryType::Module(module) => module.field_instantiation_count(idx.0), BinaryType::Script(_) => unreachable!("Scripts cannot have type instructions"), } } pub fn type_to_fat_type(&self, ty: &Type) -> PartialVMResult<FatType> { self.loader.type_to_fat_type(ty) } pub(crate) fn is_resource(&self, ty: &Type) -> PartialVMResult<bool> { self.loader.is_resource(ty) } pub(crate) fn make_fat_type( &self, token: &SignatureToken, type_context: &[Type], ) -> PartialVMResult<FatType> { match &self.binary { BinaryType::Module(module) => { let binary = &module.module; let ty = self .loader .module_cache .lock() .unwrap() .make_type(binary, token)? .subst(type_context)?; self.loader.type_to_fat_type(&ty) } // TODO: this may not be true at all when it comes to printing (locals for instance) BinaryType::Script(_) => unreachable!("Scripts cannot have type operations"), } } } // This is the unfortunate side effect of our type story. It will have to go soon... impl<'a> TypeConverter for Resolver<'a> { fn type_to_fat_type(&self, ty: &Type) -> PartialVMResult<FatType> { Resolver::type_to_fat_type(self, ty) } } // A Module is very similar to a binary Module but data is "transformed" to a representation // more appropriate to execution. // When code executes indexes in instructions are resolved against those runtime structure // so that any data needed for execution is immediately available #[derive(Debug)] pub struct Module { id: ModuleId, // primitive pools module: CompiledModule, // types as indexes into the Loader type list struct_refs: Vec<usize>, structs: Vec<StructDef>, struct_instantiations: Vec<StructInstantiation>, // functions as indexes into the Loader function list function_refs: Vec<usize>, // materialized instantiations, whether partial or not function_instantiations: Vec<FunctionInstantiation>, // fields as a pair of index, first to the type, second to the field position in that type field_handles: Vec<FieldHandle>, // materialized instantiations, whether partial or not field_instantiations: Vec<FieldInstantiation>, } impl Module { fn new(module: CompiledModule, cache: &ModuleCache) -> VMResult<Self> { let id = module.self_id(); let mut struct_refs = vec![]; for struct_handle in module.struct_handles() { let struct_name = module.identifier_at(struct_handle.name); let module_handle = module.module_handle_at(struct_handle.module); let module_id = module.module_id_for_handle(module_handle); struct_refs.push( cache .find_struct_by_name(struct_name, &module_id) .map_err(|e| e.finish(Location::Module(module.self_id())))? .0, ); } let mut structs = vec![]; for struct_def in module.struct_defs() { let idx = struct_refs[struct_def.struct_handle.0 as usize]; let field_count = cache.structs[idx].fields.len() as u16; structs.push(StructDef { idx, field_count }); } let mut struct_instantiations = vec![]; for struct_inst in module.struct_instantiations() { let def = struct_inst.def.0 as usize; let struct_def = &structs[def]; let field_count = struct_def.field_count; let mut instantiation = vec![]; for ty in &module.signature_at(struct_inst.type_parameters).0 { instantiation.push( cache .make_type(&module, ty) .map_err(|e| e.finish(Location::Module(module.self_id())))?, ); } struct_instantiations.push(StructInstantiation { field_count, def: struct_def.idx, instantiation, }); } let mut function_refs = vec![]; for func_handle in module.function_handles() { let func_name = module.identifier_at(func_handle.name); let module_handle = module.module_handle_at(func_handle.module); let module_id = module.module_id_for_handle(module_handle); let ref_idx = cache.resolve_function_handle(func_name, &module_id)?; function_refs.push(ref_idx); } let mut function_instantiations = vec![]; for func_inst in module.function_instantiations() { let handle = function_refs[func_inst.handle.0 as usize]; let mut instantiation = vec![]; for ty in &module.signature_at(func_inst.type_parameters).0 { instantiation.push( cache .make_type(&module, ty) .map_err(|e| e.finish(Location::Module(module.self_id())))?, ); } function_instantiations.push(FunctionInstantiation { handle, instantiation, }); } let mut field_handles = vec![]; for f_handle in module.field_handles() { let def_idx = f_handle.owner; let owner = structs[def_idx.0 as usize].idx; let offset = f_handle.field as usize; field_handles.push(FieldHandle { owner, offset }); } let mut field_instantiations: Vec<FieldInstantiation> = vec![]; for f_inst in module.field_instantiations() { let fh_idx = f_inst.handle; let owner = field_handles[fh_idx.0 as usize].owner; let offset = field_handles[fh_idx.0 as usize].offset; field_instantiations.push(FieldInstantiation { owner, offset }); } Ok(Self { id, module, struct_refs, structs, function_refs, struct_instantiations, function_instantiations, field_handles, field_instantiations, }) } fn struct_at(&self, idx: StructDefinitionIndex) -> usize { self.structs[idx.0 as usize].idx } fn struct_instantiation_at(&self, idx: u16) -> &StructInstantiation { &self.struct_instantiations[idx as usize] } fn function_at(&self, idx: u16) -> usize { self.function_refs[idx as usize] } fn function_instantiation_at(&self, idx: u16) -> &FunctionInstantiation { &self.function_instantiations[idx as usize] } fn field_count(&self, idx: u16) -> u16 { self.structs[idx as usize].field_count } fn field_instantiation_count(&self, idx: u16) -> u16 { self.struct_instantiations[idx as usize].field_count } fn module(&self) -> &CompiledModule { &self.module } fn field_offset(&self, idx: FieldHandleIndex) -> usize { self.field_handles[idx.0 as usize].offset } fn field_instantiation_offset(&self, idx: FieldInstantiationIndex) -> usize { self.field_instantiations[idx.0 as usize].offset } } // A Script is very similar to a binary Script but data is "transformed" to a representation // more appropriate to execution. // When code executes indexes in instructions are resolved against those runtime structure // so that any data needed for execution is immediately available #[derive(Debug)] struct Script { // primitive pools script: CompiledScript, // types as indexes into the Loader type list struct_refs: Vec<usize>, // functions as indexes into the Loader function list function_refs: Vec<usize>, // materialized instantiations, whether partial or not function_instantiations: Vec<FunctionInstantiation>, // entry point main: Arc<Function>, } impl Script { fn new(script: CompiledScript, script_hash: &HashValue, cache: &ModuleCache) -> VMResult<Self> { let mut struct_refs = vec![]; for struct_handle in script.struct_handles() { let struct_name = script.identifier_at(struct_handle.name); let module_handle = script.module_handle_at(struct_handle.module); let module_id = ModuleId::new( *script.address_identifier_at(module_handle.address), script.identifier_at(module_handle.name).to_owned(), ); struct_refs.push( cache .find_struct_by_name(struct_name, &module_id) .map_err(|e| e.finish(Location::Script))? .0, ); } let mut function_refs = vec![]; for func_handle in script.function_handles().iter() { let func_name = script.identifier_at(func_handle.name); let module_handle = script.module_handle_at(func_handle.module); let module_id = ModuleId::new( *script.address_identifier_at(module_handle.address), script.identifier_at(module_handle.name).to_owned(), ); let ref_idx = cache.resolve_function_handle(func_name, &module_id)?; function_refs.push(ref_idx); } let mut function_instantiations = vec![]; let (_, module) = script.clone().into_module(); for func_inst in script.function_instantiations() { let handle = function_refs[func_inst.handle.0 as usize]; let mut instantiation = vec![]; for ty in &script.signature_at(func_inst.type_parameters).0 { instantiation.push( cache .make_type(&module, ty) .map_err(|e| e.finish(Location::Script))?, ); } function_instantiations.push(FunctionInstantiation { handle, instantiation, }); } let scope = Scope::Script(*script_hash); let compiled_script = script.as_inner(); let code: Vec<Bytecode> = compiled_script.code.code.clone(); let parameters = script.signature_at(compiled_script.parameters).clone(); let return_ = Signature(vec![]); let locals = Signature( parameters .0 .iter() .chain(script.signature_at(compiled_script.code.locals).0.iter()) .cloned() .collect(), ); let type_parameters = compiled_script.type_parameters.clone(); // TODO: main does not have a name. Revisit. let name = Identifier::new("main").unwrap(); let native = None; // Script entries cannot be native let main: Arc<Function> = Arc::new(Function { code, parameters, return_, locals, type_parameters, native, scope, name, }); Ok(Self { script, struct_refs, function_refs, function_instantiations, main, }) } fn entry_point(&self) -> Arc<Function> { self.main.clone() } fn function_at(&self, idx: u16) -> usize { self.function_refs[idx as usize] } fn function_instantiation_at(&self, idx: u16) -> &FunctionInstantiation { &self.function_instantiations[idx as usize] } } // A simple wrapper for the "owner" of the function (Module or Script) #[derive(Debug)] enum Scope { Module(ModuleId), Script(HashValue), } // A runtime function #[derive(Debug)] pub struct Function { code: Vec<Bytecode>, parameters: Signature, return_: Signature, locals: Signature, type_parameters: Vec<Kind>, native: Option<NativeFunction>, scope: Scope, name: Identifier, } impl Function { fn new(def: &FunctionDefinition, module: &CompiledModule) -> Self { let handle = module.function_handle_at(def.function); let name = module.identifier_at(handle.name).to_owned(); let module_id = module.self_id(); let native = if def.is_native() { NativeFunction::resolve( module_id.address(), module_id.name().as_str(), name.as_str(), ) } else { None }; let scope = Scope::Module(module_id); let parameters = module.signature_at(handle.parameters).clone(); // Native functions do not have a code unit let (code, locals) = match &def.code { Some(code) => ( code.code.clone(), Signature( parameters .0 .iter() .chain(module.signature_at(code.locals).0.iter()) .cloned() .collect(), ), ), None => (vec![], Signature(vec![])), }; let return_ = module.signature_at(handle.return_).clone(); let type_parameters = handle.type_parameters.clone(); Self { code, parameters, return_, locals, type_parameters, native, scope, name, } } pub(crate) fn module_id(&self) -> Option<&ModuleId> { match &self.scope { Scope::Module(module_id) => Some(module_id), Scope::Script(_) => None, } } pub(crate) fn get_resolver<'a>(&self, loader: &'a Loader) -> Resolver<'a> { match &self.scope { Scope::Module(module_id) => { let module = loader.get_module(module_id); Resolver::for_module(loader, module) } Scope::Script(script_hash) => { let script = loader.get_script(script_hash); Resolver::for_script(loader, script) } } } pub(crate) fn local_count(&self) -> usize { self.locals.len() } pub(crate) fn locals(&self) -> &Signature { &self.locals } pub(crate) fn arg_count(&self) -> usize { self.parameters.len() } pub(crate) fn name(&self) -> &str { self.name.as_str() } pub(crate) fn code(&self) -> &[Bytecode] { &self.code } pub(crate) fn type_parameters(&self) -> &[Kind] { &self.type_parameters } pub(crate) fn parameters(&self) -> &Signature { &self.parameters } pub(crate) fn pretty_string(&self) -> String { match &self.scope { Scope::Script(_) => "Script::main".into(), Scope::Module(id) => format!( "0x{}::{}::{}", id.address(), id.name().as_str(), self.name.as_str() ), } } pub(crate) fn is_native(&self) -> bool { self.native.is_some() } pub(crate) fn get_native(&self) -> PartialVMResult<NativeFunction> { self.native.ok_or_else(|| { PartialVMError::new(StatusCode::UNREACHABLE) .with_message("Missing Native Function".to_string()) }) } } // A function instantiation. #[derive(Debug)] pub struct FunctionInstantiation { handle: usize, instantiation: Vec<Type>, } impl FunctionInstantiation { pub(crate) fn materialize(&self, type_params: &[Type]) -> PartialVMResult<Vec<Type>> { let mut instantiation = vec![]; for ty in &self.instantiation { instantiation.push(ty.subst(type_params)?); } Ok(instantiation) } pub(crate) fn handle(&self) -> usize { self.handle } pub(crate) fn instantiation_size(&self) -> usize { self.instantiation.len() } } // A struct definition carries an index to the type in the ModuleCache and the field count // which is the most common information used at runtime #[derive(Debug)] struct StructDef { field_count: u16, idx: usize, } // A struct insantiation. #[derive(Debug)] pub(crate) struct StructInstantiation { field_count: u16, def: usize, instantiation: Vec<Type>, } impl StructInstantiation { pub(crate) fn get_def_idx(&self) -> usize { self.def } pub(crate) fn get_instantiation(&self) -> &[Type] { &self.instantiation } } // A field handle. The offset is the only used information when operating on a field #[derive(Debug)] struct FieldHandle { offset: usize, owner: usize, } // A field instantiation. The offset is the only used information when operating on a field #[derive(Debug)] pub struct FieldInstantiation { offset: usize, owner: usize, } // // Utility functions // fn load_script_dependencies(script: &CompiledScript) -> Vec<ModuleId> { let mut deps = vec![]; for module in script.module_handles() { deps.push(ModuleId::new( *script.address_identifier_at(module.address), script.identifier_at(module.name).to_owned(), )); } deps } fn load_module_dependencies(module: &CompiledModule) -> Vec<ModuleId> { let self_module = module.self_handle(); let mut deps = vec![]; for module_handle in module.module_handles() { if module_handle == self_module { continue; } deps.push(ModuleId::new( *module.address_identifier_at(module_handle.address), module.identifier_at(module_handle.name).to_owned(), )); } deps } fn struct_match(struct_: &StructType, module: &ModuleId, name: &IdentStr) -> bool { struct_.name.as_ident_str() == name && &struct_.module == module } fn function_match(function: &Function, module: &ModuleId, name: &IdentStr) -> bool { function.name.as_ident_str() == name && function.module_id() == Some(module) } impl Loader { fn type_to_fat_type(&self, ty: &Type) -> PartialVMResult<FatType> { use Type::*; Ok(match ty { Bool => FatType::Bool, U8 => FatType::U8, U64 => FatType::U64, U128 => FatType::U128, Address => FatType::Address, Signer => FatType::Signer, Vector(ty) => FatType::Vector(Box::new(self.type_to_fat_type(ty)?)), Struct(idx) => FatType::Struct(Box::new(self.struct_to_fat_struct(*idx, vec![])?)), StructInstantiation(idx, instantiation) => { let mut ty_args = vec![]; for inst in instantiation { ty_args.push(self.type_to_fat_type(inst)?); } FatType::Struct(Box::new(self.struct_to_fat_struct(*idx, ty_args)?)) } Reference(ty) => FatType::Reference(Box::new(self.type_to_fat_type(ty)?)), MutableReference(ty) => FatType::MutableReference(Box::new(self.type_to_fat_type(ty)?)), TyParam(idx) => FatType::TyParam(*idx), }) } fn struct_gidx_to_type_tag(&self, gidx: usize, ty_args: &[Type]) -> PartialVMResult<StructTag> { if let Some(struct_map) = self.type_cache.lock().unwrap().structs.get(&gidx) { if let Some(struct_info) = struct_map.get(ty_args) { if let Some(struct_tag) = &struct_info.struct_tag { return Ok(struct_tag.clone()); } } } let ty_arg_tags = ty_args .iter() .map(|ty| self.type_to_type_tag(ty)) .collect::<PartialVMResult<Vec<_>>>()?; let struct_type = self.module_cache.lock().unwrap().struct_at(gidx); let struct_tag = StructTag { address: *struct_type.module.address(), module: struct_type.module.name().to_owned(), name: struct_type.name.clone(), type_params: ty_arg_tags, }; self.type_cache .lock() .unwrap() .structs .entry(gidx) .or_insert_with(HashMap::new) .entry(ty_args.to_vec()) .or_insert_with(StructInfo::new) .struct_tag = Some(struct_tag.clone()); Ok(struct_tag) } pub(crate) fn type_to_type_tag(&self, ty: &Type) -> PartialVMResult<TypeTag> { Ok(match ty { Type::Bool => TypeTag::Bool, Type::U8 => TypeTag::U8, Type::U64 => TypeTag::U64, Type::U128 => TypeTag::U128, Type::Address => TypeTag::Address, Type::Signer => TypeTag::Signer, Type::Vector(ty) => TypeTag::Vector(Box::new(self.type_to_type_tag(ty)?)), Type::Struct(gidx) => TypeTag::Struct(self.struct_gidx_to_type_tag(*gidx, &[])?), Type::StructInstantiation(gidx, ty_args) => { TypeTag::Struct(self.struct_gidx_to_type_tag(*gidx, ty_args)?) } Type::Reference(_) | Type::MutableReference(_) | Type::TyParam(_) => { return Err( PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("no type tag for {:?}", ty)), ) } }) } fn struct_gidx_to_type_layout( &self, gidx: usize, ty_args: &[Type], ) -> PartialVMResult<MoveStructLayout> { if let Some(struct_map) = self.type_cache.lock().unwrap().structs.get(&gidx) { if let Some(struct_info) = struct_map.get(ty_args) { if let Some(layout) = &struct_info.struct_layout { return Ok(layout.clone()); } } } let struct_type = self.module_cache.lock().unwrap().struct_at(gidx); let field_tys = struct_type .fields .iter() .map(|ty| ty.subst(ty_args)) .collect::<PartialVMResult<Vec<_>>>()?; let field_layouts = field_tys .iter() .map(|ty| self.type_to_type_layout(ty)) .collect::<PartialVMResult<Vec<_>>>()?; let struct_layout = MoveStructLayout::new(field_layouts); self.type_cache .lock() .unwrap() .structs .entry(gidx) .or_insert_with(HashMap::new) .entry(ty_args.to_vec()) .or_insert_with(StructInfo::new) .struct_layout = Some(struct_layout.clone()); Ok(struct_layout) } pub(crate) fn type_to_type_layout(&self, ty: &Type) -> PartialVMResult<MoveTypeLayout> { Ok(match ty { Type::Bool => MoveTypeLayout::Bool, Type::U8 => MoveTypeLayout::U8, Type::U64 => MoveTypeLayout::U64, Type::U128 => MoveTypeLayout::U128, Type::Address => MoveTypeLayout::Address, Type::Signer => MoveTypeLayout::Signer, Type::Vector(ty) => MoveTypeLayout::Vector(Box::new(self.type_to_type_layout(ty)?)), Type::Struct(gidx) => { MoveTypeLayout::Struct(self.struct_gidx_to_type_layout(*gidx, &[])?) } Type::StructInstantiation(gidx, ty_args) => { MoveTypeLayout::Struct(self.struct_gidx_to_type_layout(*gidx, ty_args)?) } Type::Reference(_) | Type::MutableReference(_) | Type::TyParam(_) => { return Err( PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("no type layout for {:?}", ty)), ) } }) } fn struct_gidx_to_kind_info( &self, gidx: usize, ty_args: &[Type], ) -> PartialVMResult<(MoveKind, Vec<MoveKindInfo>)> { if let Some(struct_map) = self.type_cache.lock().unwrap().structs.get(&gidx) { if let Some(struct_info) = struct_map.get(ty_args) { if let Some(kind_info) = &struct_info.kind_info { return Ok(kind_info.clone()); } } } let struct_type = self.module_cache.lock().unwrap().struct_at(gidx); let mut is_resource = struct_type.is_resource; if !is_resource { for ty in ty_args { if self.is_resource(ty)? { is_resource = true; } } } let field_tys = struct_type .fields .iter() .map(|ty| ty.subst(ty_args)) .collect::<PartialVMResult<Vec<_>>>()?; let field_kind_info = field_tys .iter() .map(|ty| self.type_to_kind_info(ty)) .collect::<PartialVMResult<Vec<_>>>()?; let kind_info = (MoveKind::from_bool(is_resource), field_kind_info); self.type_cache .lock() .unwrap() .structs .entry(gidx) .or_insert_with(HashMap::new) .entry(ty_args.to_vec()) .or_insert_with(StructInfo::new) .kind_info = Some(kind_info.clone()); Ok(kind_info) } pub(crate) fn type_to_kind_info(&self, ty: &Type) -> PartialVMResult<MoveKindInfo> { Ok(match ty { Type::Bool | Type::U8 | Type::U64 | Type::U128 | Type::Address => { MoveKindInfo::Base(MoveKind::Copyable) } Type::Signer => MoveKindInfo::Base(MoveKind::Resource), Type::Vector(ty) => { let kind_info = self.type_to_kind_info(ty)?; MoveKindInfo::Vector(kind_info.kind(), Box::new(kind_info)) } Type::Struct(gidx) => { let (is_resource, field_kind_info) = self.struct_gidx_to_kind_info(*gidx, &[])?; MoveKindInfo::Struct(is_resource, field_kind_info) } Type::StructInstantiation(gidx, ty_args) => { let (is_resource, field_kind_info) = self.struct_gidx_to_kind_info(*gidx, ty_args)?; MoveKindInfo::Struct(is_resource, field_kind_info) } Type::Reference(_) | Type::MutableReference(_) | Type::TyParam(_) => { return Err( PartialVMError::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR) .with_message(format!("no kind info for {:?}", ty)), ) } }) } fn struct_to_fat_struct( &self, idx: usize, ty_args: Vec<FatType>, ) -> PartialVMResult<FatStructType> { let struct_type = self.module_cache.lock().unwrap().struct_at(idx); let address = *struct_type.module.address(); let module = struct_type.module.name().to_owned(); let name = struct_type.name.clone(); let is_resource = struct_type.is_resource; let mut fields = vec![]; for field_type in &struct_type.fields { fields.push(self.type_to_fat_type(field_type)?); } let mut layout = vec![]; for field in &fields { layout.push(field.subst(&ty_args)?); } Ok(FatStructType { address, module, name, is_resource, ty_args, layout, }) } }
34.771865
100
0.563815
9b8d7031f8e7507d7cfacbbc89deef69637a84ec
62,543
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use core::iter::*; use core::{i8, i16, isize}; use core::usize; #[test] fn test_lt() { let empty: [isize; 0] = []; let xs = [1,2,3]; let ys = [1,2,0]; assert!(!xs.iter().lt(ys.iter())); assert!(!xs.iter().le(ys.iter())); assert!( xs.iter().gt(ys.iter())); assert!( xs.iter().ge(ys.iter())); assert!( ys.iter().lt(xs.iter())); assert!( ys.iter().le(xs.iter())); assert!(!ys.iter().gt(xs.iter())); assert!(!ys.iter().ge(xs.iter())); assert!( empty.iter().lt(xs.iter())); assert!( empty.iter().le(xs.iter())); assert!(!empty.iter().gt(xs.iter())); assert!(!empty.iter().ge(xs.iter())); // Sequence with NaN let u = [1.0f64, 2.0]; let v = [0.0f64/0.0, 3.0]; assert!(!u.iter().lt(v.iter())); assert!(!u.iter().le(v.iter())); assert!(!u.iter().gt(v.iter())); assert!(!u.iter().ge(v.iter())); let a = [0.0f64/0.0]; let b = [1.0f64]; let c = [2.0f64]; assert!(a.iter().lt(b.iter()) == (a[0] < b[0])); assert!(a.iter().le(b.iter()) == (a[0] <= b[0])); assert!(a.iter().gt(b.iter()) == (a[0] > b[0])); assert!(a.iter().ge(b.iter()) == (a[0] >= b[0])); assert!(c.iter().lt(b.iter()) == (c[0] < b[0])); assert!(c.iter().le(b.iter()) == (c[0] <= b[0])); assert!(c.iter().gt(b.iter()) == (c[0] > b[0])); assert!(c.iter().ge(b.iter()) == (c[0] >= b[0])); } #[test] fn test_multi_iter() { let xs = [1,2,3,4]; let ys = [4,3,2,1]; assert!(xs.iter().eq(ys.iter().rev())); assert!(xs.iter().lt(xs.iter().skip(2))); } #[test] fn test_counter_from_iter() { let it = (0..).step_by(5).take(10); let xs: Vec<isize> = FromIterator::from_iter(it); assert_eq!(xs, [0, 5, 10, 15, 20, 25, 30, 35, 40, 45]); } #[test] fn test_iterator_chain() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [30, 40, 50, 60]; let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60]; let it = xs.iter().chain(&ys); let mut i = 0; for &x in it { assert_eq!(x, expected[i]); i += 1; } assert_eq!(i, expected.len()); let ys = (30..).step_by(10).take(4); let it = xs.iter().cloned().chain(ys); let mut i = 0; for x in it { assert_eq!(x, expected[i]); i += 1; } assert_eq!(i, expected.len()); } #[test] fn test_iterator_chain_nth() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [30, 40, 50, 60]; let zs = []; let expected = [0, 1, 2, 3, 4, 5, 30, 40, 50, 60]; for (i, x) in expected.iter().enumerate() { assert_eq!(Some(x), xs.iter().chain(&ys).nth(i)); } assert_eq!(zs.iter().chain(&xs).nth(0), Some(&0)); let mut it = xs.iter().chain(&zs); assert_eq!(it.nth(5), Some(&5)); assert_eq!(it.next(), None); } #[test] fn test_iterator_chain_last() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [30, 40, 50, 60]; let zs = []; assert_eq!(xs.iter().chain(&ys).last(), Some(&60)); assert_eq!(zs.iter().chain(&ys).last(), Some(&60)); assert_eq!(ys.iter().chain(&zs).last(), Some(&60)); assert_eq!(zs.iter().chain(&zs).last(), None); } #[test] fn test_iterator_chain_count() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [30, 40, 50, 60]; let zs = []; assert_eq!(xs.iter().chain(&ys).count(), 10); assert_eq!(zs.iter().chain(&ys).count(), 4); } #[test] fn test_iterator_chain_find() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [30, 40, 50, 60]; let mut iter = xs.iter().chain(&ys); assert_eq!(iter.find(|&&i| i == 4), Some(&4)); assert_eq!(iter.next(), Some(&5)); assert_eq!(iter.find(|&&i| i == 40), Some(&40)); assert_eq!(iter.next(), Some(&50)); assert_eq!(iter.find(|&&i| i == 100), None); assert_eq!(iter.next(), None); } #[test] fn test_zip_nth() { let xs = [0, 1, 2, 4, 5]; let ys = [10, 11, 12]; let mut it = xs.iter().zip(&ys); assert_eq!(it.nth(0), Some((&0, &10))); assert_eq!(it.nth(1), Some((&2, &12))); assert_eq!(it.nth(0), None); let mut it = xs.iter().zip(&ys); assert_eq!(it.nth(3), None); let mut it = ys.iter().zip(&xs); assert_eq!(it.nth(3), None); } #[test] fn test_zip_nth_side_effects() { let mut a = Vec::new(); let mut b = Vec::new(); let value = [1, 2, 3, 4, 5, 6].iter().cloned() .map(|n| { a.push(n); n * 10 }) .zip([2, 3, 4, 5, 6, 7, 8].iter().cloned().map(|n| { b.push(n * 100); n * 1000 })) .skip(1) .nth(3); assert_eq!(value, Some((50, 6000))); assert_eq!(a, vec![1, 2, 3, 4, 5]); assert_eq!(b, vec![200, 300, 400, 500, 600]); } #[test] fn test_iterator_step_by() { // Identity let mut it = (0..).step_by(1).take(3); assert_eq!(it.next(), Some(0)); assert_eq!(it.next(), Some(1)); assert_eq!(it.next(), Some(2)); assert_eq!(it.next(), None); let mut it = (0..).step_by(3).take(4); assert_eq!(it.next(), Some(0)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), Some(6)); assert_eq!(it.next(), Some(9)); assert_eq!(it.next(), None); } #[test] fn test_iterator_step_by_nth() { let mut it = (0..16).step_by(5); assert_eq!(it.nth(0), Some(0)); assert_eq!(it.nth(0), Some(5)); assert_eq!(it.nth(0), Some(10)); assert_eq!(it.nth(0), Some(15)); assert_eq!(it.nth(0), None); let it = (0..18).step_by(5); assert_eq!(it.clone().nth(0), Some(0)); assert_eq!(it.clone().nth(1), Some(5)); assert_eq!(it.clone().nth(2), Some(10)); assert_eq!(it.clone().nth(3), Some(15)); assert_eq!(it.clone().nth(4), None); assert_eq!(it.clone().nth(42), None); } #[test] fn test_iterator_step_by_nth_overflow() { #[cfg(target_pointer_width = "8")] type Bigger = u16; #[cfg(target_pointer_width = "16")] type Bigger = u32; #[cfg(target_pointer_width = "32")] type Bigger = u64; #[cfg(target_pointer_width = "64")] type Bigger = u128; #[derive(Clone)] struct Test(Bigger); impl<'a> Iterator for &'a mut Test { type Item = i32; fn next(&mut self) -> Option<Self::Item> { Some(21) } fn nth(&mut self, n: usize) -> Option<Self::Item> { self.0 += n as Bigger + 1; Some(42) } } let mut it = Test(0); let root = usize::MAX >> (::std::mem::size_of::<usize>() * 8 / 2); let n = root + 20; (&mut it).step_by(n).nth(n); assert_eq!(it.0, n as Bigger * n as Bigger); // large step let mut it = Test(0); (&mut it).step_by(usize::MAX).nth(5); assert_eq!(it.0, (usize::MAX as Bigger) * 5); // n + 1 overflows let mut it = Test(0); (&mut it).step_by(2).nth(usize::MAX); assert_eq!(it.0, (usize::MAX as Bigger) * 2); // n + 1 overflows let mut it = Test(0); (&mut it).step_by(1).nth(usize::MAX); assert_eq!(it.0, (usize::MAX as Bigger) * 1); } #[test] #[should_panic] fn test_iterator_step_by_zero() { let mut it = (0..).step_by(0); it.next(); } #[test] fn test_iterator_step_by_size_hint() { struct StubSizeHint(usize, Option<usize>); impl Iterator for StubSizeHint { type Item = (); fn next(&mut self) -> Option<()> { self.0 -= 1; if let Some(ref mut upper) = self.1 { *upper -= 1; } Some(()) } fn size_hint(&self) -> (usize, Option<usize>) { (self.0, self.1) } } // The two checks in each case are needed because the logic // is different before the first call to `next()`. let mut it = StubSizeHint(10, Some(10)).step_by(1); assert_eq!(it.size_hint(), (10, Some(10))); it.next(); assert_eq!(it.size_hint(), (9, Some(9))); // exact multiple let mut it = StubSizeHint(10, Some(10)).step_by(3); assert_eq!(it.size_hint(), (4, Some(4))); it.next(); assert_eq!(it.size_hint(), (3, Some(3))); // larger base range, but not enough to get another element let mut it = StubSizeHint(12, Some(12)).step_by(3); assert_eq!(it.size_hint(), (4, Some(4))); it.next(); assert_eq!(it.size_hint(), (3, Some(3))); // smaller base range, so fewer resulting elements let mut it = StubSizeHint(9, Some(9)).step_by(3); assert_eq!(it.size_hint(), (3, Some(3))); it.next(); assert_eq!(it.size_hint(), (2, Some(2))); // infinite upper bound let mut it = StubSizeHint(usize::MAX, None).step_by(1); assert_eq!(it.size_hint(), (usize::MAX, None)); it.next(); assert_eq!(it.size_hint(), (usize::MAX-1, None)); // still infinite with larger step let mut it = StubSizeHint(7, None).step_by(3); assert_eq!(it.size_hint(), (3, None)); it.next(); assert_eq!(it.size_hint(), (2, None)); // propagates ExactSizeIterator let a = [1,2,3,4,5]; let it = a.iter().step_by(2); assert_eq!(it.len(), 3); // Cannot be TrustedLen as a step greater than one makes an iterator // with (usize::MAX, None) no longer meet the safety requirements trait TrustedLenCheck { fn test(self) -> bool; } impl<T:Iterator> TrustedLenCheck for T { default fn test(self) -> bool { false } } impl<T:TrustedLen> TrustedLenCheck for T { fn test(self) -> bool { true } } assert!(TrustedLenCheck::test(a.iter())); assert!(!TrustedLenCheck::test(a.iter().step_by(1))); } #[test] fn test_filter_map() { let it = (0..).step_by(1).take(10) .filter_map(|x| if x % 2 == 0 { Some(x*x) } else { None }); assert_eq!(it.collect::<Vec<usize>>(), [0*0, 2*2, 4*4, 6*6, 8*8]); } #[test] fn test_filter_map_fold() { let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8]; let ys = [0*0, 2*2, 4*4, 6*6, 8*8]; let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x*x) } else { None }); let i = it.fold(0, |i, x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let it = xs.iter().filter_map(|&x| if x % 2 == 0 { Some(x*x) } else { None }); let i = it.rfold(ys.len(), |i, x| { assert_eq!(x, ys[i - 1]); i - 1 }); assert_eq!(i, 0); } #[test] fn test_iterator_enumerate() { let xs = [0, 1, 2, 3, 4, 5]; let it = xs.iter().enumerate(); for (i, &x) in it { assert_eq!(i, x); } } #[test] fn test_iterator_enumerate_nth() { let xs = [0, 1, 2, 3, 4, 5]; for (i, &x) in xs.iter().enumerate() { assert_eq!(i, x); } let mut it = xs.iter().enumerate(); while let Some((i, &x)) = it.nth(0) { assert_eq!(i, x); } let mut it = xs.iter().enumerate(); while let Some((i, &x)) = it.nth(1) { assert_eq!(i, x); } let (i, &x) = xs.iter().enumerate().nth(3).unwrap(); assert_eq!(i, x); assert_eq!(i, 3); } #[test] fn test_iterator_enumerate_count() { let xs = [0, 1, 2, 3, 4, 5]; assert_eq!(xs.iter().enumerate().count(), 6); } #[test] fn test_iterator_enumerate_fold() { let xs = [0, 1, 2, 3, 4, 5]; let mut it = xs.iter().enumerate(); // steal a couple to get an interesting offset assert_eq!(it.next(), Some((0, &0))); assert_eq!(it.next(), Some((1, &1))); let i = it.fold(2, |i, (j, &x)| { assert_eq!(i, j); assert_eq!(x, xs[j]); i + 1 }); assert_eq!(i, xs.len()); let mut it = xs.iter().enumerate(); assert_eq!(it.next(), Some((0, &0))); let i = it.rfold(xs.len() - 1, |i, (j, &x)| { assert_eq!(i, j); assert_eq!(x, xs[j]); i - 1 }); assert_eq!(i, 0); } #[test] fn test_iterator_filter_count() { let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8]; assert_eq!(xs.iter().filter(|&&x| x % 2 == 0).count(), 5); } #[test] fn test_iterator_filter_fold() { let xs = [0, 1, 2, 3, 4, 5, 6, 7, 8]; let ys = [0, 2, 4, 6, 8]; let it = xs.iter().filter(|&&x| x % 2 == 0); let i = it.fold(0, |i, &x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let it = xs.iter().filter(|&&x| x % 2 == 0); let i = it.rfold(ys.len(), |i, &x| { assert_eq!(x, ys[i - 1]); i - 1 }); assert_eq!(i, 0); } #[test] fn test_iterator_peekable() { let xs = vec![0, 1, 2, 3, 4, 5]; let mut it = xs.iter().cloned().peekable(); assert_eq!(it.len(), 6); assert_eq!(it.peek().unwrap(), &0); assert_eq!(it.len(), 6); assert_eq!(it.next().unwrap(), 0); assert_eq!(it.len(), 5); assert_eq!(it.next().unwrap(), 1); assert_eq!(it.len(), 4); assert_eq!(it.next().unwrap(), 2); assert_eq!(it.len(), 3); assert_eq!(it.peek().unwrap(), &3); assert_eq!(it.len(), 3); assert_eq!(it.peek().unwrap(), &3); assert_eq!(it.len(), 3); assert_eq!(it.next().unwrap(), 3); assert_eq!(it.len(), 2); assert_eq!(it.next().unwrap(), 4); assert_eq!(it.len(), 1); assert_eq!(it.peek().unwrap(), &5); assert_eq!(it.len(), 1); assert_eq!(it.next().unwrap(), 5); assert_eq!(it.len(), 0); assert!(it.peek().is_none()); assert_eq!(it.len(), 0); assert!(it.next().is_none()); assert_eq!(it.len(), 0); } #[test] fn test_iterator_peekable_count() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [10]; let zs: [i32; 0] = []; assert_eq!(xs.iter().peekable().count(), 6); let mut it = xs.iter().peekable(); assert_eq!(it.peek(), Some(&&0)); assert_eq!(it.count(), 6); assert_eq!(ys.iter().peekable().count(), 1); let mut it = ys.iter().peekable(); assert_eq!(it.peek(), Some(&&10)); assert_eq!(it.count(), 1); assert_eq!(zs.iter().peekable().count(), 0); let mut it = zs.iter().peekable(); assert_eq!(it.peek(), None); } #[test] fn test_iterator_peekable_nth() { let xs = [0, 1, 2, 3, 4, 5]; let mut it = xs.iter().peekable(); assert_eq!(it.peek(), Some(&&0)); assert_eq!(it.nth(0), Some(&0)); assert_eq!(it.peek(), Some(&&1)); assert_eq!(it.nth(1), Some(&2)); assert_eq!(it.peek(), Some(&&3)); assert_eq!(it.nth(2), Some(&5)); assert_eq!(it.next(), None); } #[test] fn test_iterator_peekable_last() { let xs = [0, 1, 2, 3, 4, 5]; let ys = [0]; let mut it = xs.iter().peekable(); assert_eq!(it.peek(), Some(&&0)); assert_eq!(it.last(), Some(&5)); let mut it = ys.iter().peekable(); assert_eq!(it.peek(), Some(&&0)); assert_eq!(it.last(), Some(&0)); let mut it = ys.iter().peekable(); assert_eq!(it.next(), Some(&0)); assert_eq!(it.peek(), None); assert_eq!(it.last(), None); } #[test] fn test_iterator_peekable_fold() { let xs = [0, 1, 2, 3, 4, 5]; let mut it = xs.iter().peekable(); assert_eq!(it.peek(), Some(&&0)); let i = it.fold(0, |i, &x| { assert_eq!(x, xs[i]); i + 1 }); assert_eq!(i, xs.len()); } /// This is an iterator that follows the Iterator contract, /// but it is not fused. After having returned None once, it will start /// producing elements if .next() is called again. pub struct CycleIter<'a, T: 'a> { index: usize, data: &'a [T], } pub fn cycle<T>(data: &[T]) -> CycleIter<T> { CycleIter { index: 0, data, } } impl<'a, T> Iterator for CycleIter<'a, T> { type Item = &'a T; fn next(&mut self) -> Option<Self::Item> { let elt = self.data.get(self.index); self.index += 1; self.index %= 1 + self.data.len(); elt } } #[test] fn test_iterator_peekable_remember_peek_none_1() { // Check that the loop using .peek() terminates let data = [1, 2, 3]; let mut iter = cycle(&data).peekable(); let mut n = 0; while let Some(_) = iter.next() { let is_the_last = iter.peek().is_none(); assert_eq!(is_the_last, n == data.len() - 1); n += 1; if n > data.len() { break; } } assert_eq!(n, data.len()); } #[test] fn test_iterator_peekable_remember_peek_none_2() { let data = [0]; let mut iter = cycle(&data).peekable(); iter.next(); assert_eq!(iter.peek(), None); assert_eq!(iter.last(), None); } #[test] fn test_iterator_peekable_remember_peek_none_3() { let data = [0]; let mut iter = cycle(&data).peekable(); iter.peek(); assert_eq!(iter.nth(0), Some(&0)); let mut iter = cycle(&data).peekable(); iter.next(); assert_eq!(iter.peek(), None); assert_eq!(iter.nth(0), None); } #[test] fn test_iterator_take_while() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19]; let ys = [0, 1, 2, 3, 5, 13]; let it = xs.iter().take_while(|&x| *x < 15); let mut i = 0; for x in it { assert_eq!(*x, ys[i]); i += 1; } assert_eq!(i, ys.len()); } #[test] fn test_iterator_skip_while() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19]; let ys = [15, 16, 17, 19]; let it = xs.iter().skip_while(|&x| *x < 15); let mut i = 0; for x in it { assert_eq!(*x, ys[i]); i += 1; } assert_eq!(i, ys.len()); } #[test] fn test_iterator_skip_while_fold() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19]; let ys = [15, 16, 17, 19]; let it = xs.iter().skip_while(|&x| *x < 15); let i = it.fold(0, |i, &x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let mut it = xs.iter().skip_while(|&x| *x < 15); assert_eq!(it.next(), Some(&ys[0])); // process skips before folding let i = it.fold(1, |i, &x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); } #[test] fn test_iterator_skip() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; let ys = [13, 15, 16, 17, 19, 20, 30]; let mut it = xs.iter().skip(5); let mut i = 0; while let Some(&x) = it.next() { assert_eq!(x, ys[i]); i += 1; assert_eq!(it.len(), xs.len()-5-i); } assert_eq!(i, ys.len()); assert_eq!(it.len(), 0); } #[test] fn test_iterator_skip_doubleended() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; let mut it = xs.iter().rev().skip(5); assert_eq!(it.next(), Some(&15)); assert_eq!(it.by_ref().rev().next(), Some(&0)); assert_eq!(it.next(), Some(&13)); assert_eq!(it.by_ref().rev().next(), Some(&1)); assert_eq!(it.next(), Some(&5)); assert_eq!(it.by_ref().rev().next(), Some(&2)); assert_eq!(it.next(), Some(&3)); assert_eq!(it.next(), None); let mut it = xs.iter().rev().skip(5).rev(); assert_eq!(it.next(), Some(&0)); assert_eq!(it.rev().next(), Some(&15)); let mut it_base = xs.iter(); { let mut it = it_base.by_ref().skip(5).rev(); assert_eq!(it.next(), Some(&30)); assert_eq!(it.next(), Some(&20)); assert_eq!(it.next(), Some(&19)); assert_eq!(it.next(), Some(&17)); assert_eq!(it.next(), Some(&16)); assert_eq!(it.next(), Some(&15)); assert_eq!(it.next(), Some(&13)); assert_eq!(it.next(), None); } // make sure the skipped parts have not been consumed assert_eq!(it_base.next(), Some(&0)); assert_eq!(it_base.next(), Some(&1)); assert_eq!(it_base.next(), Some(&2)); assert_eq!(it_base.next(), Some(&3)); assert_eq!(it_base.next(), Some(&5)); assert_eq!(it_base.next(), None); let it = xs.iter().skip(5).rev(); assert_eq!(it.last(), Some(&13)); } #[test] fn test_iterator_skip_nth() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; let mut it = xs.iter().skip(0); assert_eq!(it.nth(0), Some(&0)); assert_eq!(it.nth(1), Some(&2)); let mut it = xs.iter().skip(5); assert_eq!(it.nth(0), Some(&13)); assert_eq!(it.nth(1), Some(&16)); let mut it = xs.iter().skip(12); assert_eq!(it.nth(0), None); } #[test] fn test_iterator_skip_count() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; assert_eq!(xs.iter().skip(0).count(), 12); assert_eq!(xs.iter().skip(1).count(), 11); assert_eq!(xs.iter().skip(11).count(), 1); assert_eq!(xs.iter().skip(12).count(), 0); assert_eq!(xs.iter().skip(13).count(), 0); } #[test] fn test_iterator_skip_last() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; assert_eq!(xs.iter().skip(0).last(), Some(&30)); assert_eq!(xs.iter().skip(1).last(), Some(&30)); assert_eq!(xs.iter().skip(11).last(), Some(&30)); assert_eq!(xs.iter().skip(12).last(), None); assert_eq!(xs.iter().skip(13).last(), None); let mut it = xs.iter().skip(5); assert_eq!(it.next(), Some(&13)); assert_eq!(it.last(), Some(&30)); } #[test] fn test_iterator_skip_fold() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19, 20, 30]; let ys = [13, 15, 16, 17, 19, 20, 30]; let it = xs.iter().skip(5); let i = it.fold(0, |i, &x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let mut it = xs.iter().skip(5); assert_eq!(it.next(), Some(&ys[0])); // process skips before folding let i = it.fold(1, |i, &x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let it = xs.iter().skip(5); let i = it.rfold(ys.len(), |i, &x| { let i = i - 1; assert_eq!(x, ys[i]); i }); assert_eq!(i, 0); let mut it = xs.iter().skip(5); assert_eq!(it.next(), Some(&ys[0])); // process skips before folding let i = it.rfold(ys.len(), |i, &x| { let i = i - 1; assert_eq!(x, ys[i]); i }); assert_eq!(i, 1); } #[test] fn test_iterator_take() { let xs = [0, 1, 2, 3, 5, 13, 15, 16, 17, 19]; let ys = [0, 1, 2, 3, 5]; let mut it = xs.iter().take(5); let mut i = 0; assert_eq!(it.len(), 5); while let Some(&x) = it.next() { assert_eq!(x, ys[i]); i += 1; assert_eq!(it.len(), 5-i); } assert_eq!(i, ys.len()); assert_eq!(it.len(), 0); } #[test] fn test_iterator_take_nth() { let xs = [0, 1, 2, 4, 5]; let mut it = xs.iter(); { let mut take = it.by_ref().take(3); let mut i = 0; while let Some(&x) = take.nth(0) { assert_eq!(x, i); i += 1; } } assert_eq!(it.nth(1), Some(&5)); assert_eq!(it.nth(0), None); let xs = [0, 1, 2, 3, 4]; let mut it = xs.iter().take(7); let mut i = 1; while let Some(&x) = it.nth(1) { assert_eq!(x, i); i += 2; } } #[test] fn test_iterator_take_short() { let xs = [0, 1, 2, 3]; let ys = [0, 1, 2, 3]; let mut it = xs.iter().take(5); let mut i = 0; assert_eq!(it.len(), 4); while let Some(&x) = it.next() { assert_eq!(x, ys[i]); i += 1; assert_eq!(it.len(), 4-i); } assert_eq!(i, ys.len()); assert_eq!(it.len(), 0); } #[test] fn test_iterator_scan() { // test the type inference fn add(old: &mut isize, new: &usize) -> Option<f64> { *old += *new as isize; Some(*old as f64) } let xs = [0, 1, 2, 3, 4]; let ys = [0f64, 1.0, 3.0, 6.0, 10.0]; let it = xs.iter().scan(0, add); let mut i = 0; for x in it { assert_eq!(x, ys[i]); i += 1; } assert_eq!(i, ys.len()); } #[test] fn test_iterator_flat_map() { let xs = [0, 3, 6]; let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8]; let it = xs.iter().flat_map(|&x| (x..).step_by(1).take(3)); let mut i = 0; for x in it { assert_eq!(x, ys[i]); i += 1; } assert_eq!(i, ys.len()); } /// Test `FlatMap::fold` with items already picked off the front and back, /// to make sure all parts of the `FlatMap` are folded correctly. #[test] fn test_iterator_flat_map_fold() { let xs = [0, 3, 6]; let ys = [1, 2, 3, 4, 5, 6, 7]; let mut it = xs.iter().flat_map(|&x| x..x+3); assert_eq!(it.next(), Some(0)); assert_eq!(it.next_back(), Some(8)); let i = it.fold(0, |i, x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let mut it = xs.iter().flat_map(|&x| x..x+3); assert_eq!(it.next(), Some(0)); assert_eq!(it.next_back(), Some(8)); let i = it.rfold(ys.len(), |i, x| { assert_eq!(x, ys[i - 1]); i - 1 }); assert_eq!(i, 0); } #[test] fn test_iterator_flatten() { let xs = [0, 3, 6]; let ys = [0, 1, 2, 3, 4, 5, 6, 7, 8]; let it = xs.iter().map(|&x| (x..).step_by(1).take(3)).flatten(); let mut i = 0; for x in it { assert_eq!(x, ys[i]); i += 1; } assert_eq!(i, ys.len()); } /// Test `Flatten::fold` with items already picked off the front and back, /// to make sure all parts of the `Flatten` are folded correctly. #[test] fn test_iterator_flatten_fold() { let xs = [0, 3, 6]; let ys = [1, 2, 3, 4, 5, 6, 7]; let mut it = xs.iter().map(|&x| x..x+3).flatten(); assert_eq!(it.next(), Some(0)); assert_eq!(it.next_back(), Some(8)); let i = it.fold(0, |i, x| { assert_eq!(x, ys[i]); i + 1 }); assert_eq!(i, ys.len()); let mut it = xs.iter().map(|&x| x..x+3).flatten(); assert_eq!(it.next(), Some(0)); assert_eq!(it.next_back(), Some(8)); let i = it.rfold(ys.len(), |i, x| { assert_eq!(x, ys[i - 1]); i - 1 }); assert_eq!(i, 0); } #[test] fn test_inspect() { let xs = [1, 2, 3, 4]; let mut n = 0; let ys = xs.iter() .cloned() .inspect(|_| n += 1) .collect::<Vec<usize>>(); assert_eq!(n, xs.len()); assert_eq!(&xs[..], &ys[..]); } #[test] fn test_inspect_fold() { let xs = [1, 2, 3, 4]; let mut n = 0; { let it = xs.iter().inspect(|_| n += 1); let i = it.fold(0, |i, &x| { assert_eq!(x, xs[i]); i + 1 }); assert_eq!(i, xs.len()); } assert_eq!(n, xs.len()); let mut n = 0; { let it = xs.iter().inspect(|_| n += 1); let i = it.rfold(xs.len(), |i, &x| { assert_eq!(x, xs[i - 1]); i - 1 }); assert_eq!(i, 0); } assert_eq!(n, xs.len()); } #[test] fn test_cycle() { let cycle_len = 3; let it = (0..).step_by(1).take(cycle_len).cycle(); assert_eq!(it.size_hint(), (usize::MAX, None)); for (i, x) in it.take(100).enumerate() { assert_eq!(i % cycle_len, x); } let mut it = (0..).step_by(1).take(0).cycle(); assert_eq!(it.size_hint(), (0, Some(0))); assert_eq!(it.next(), None); } #[test] fn test_iterator_nth() { let v: &[_] = &[0, 1, 2, 3, 4]; for i in 0..v.len() { assert_eq!(v.iter().nth(i).unwrap(), &v[i]); } assert_eq!(v.iter().nth(v.len()), None); } #[test] fn test_iterator_last() { let v: &[_] = &[0, 1, 2, 3, 4]; assert_eq!(v.iter().last().unwrap(), &4); assert_eq!(v[..1].iter().last().unwrap(), &0); } #[test] fn test_iterator_len() { let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; assert_eq!(v[..4].iter().count(), 4); assert_eq!(v[..10].iter().count(), 10); assert_eq!(v[..0].iter().count(), 0); } #[test] fn test_iterator_sum() { let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; assert_eq!(v[..4].iter().cloned().sum::<i32>(), 6); assert_eq!(v.iter().cloned().sum::<i32>(), 55); assert_eq!(v[..0].iter().cloned().sum::<i32>(), 0); } #[test] fn test_iterator_sum_result() { let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)]; assert_eq!(v.iter().cloned().sum::<Result<i32, _>>(), Ok(10)); let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)]; assert_eq!(v.iter().cloned().sum::<Result<i32, _>>(), Err(())); } #[test] fn test_iterator_product() { let v: &[i32] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; assert_eq!(v[..4].iter().cloned().product::<i32>(), 0); assert_eq!(v[1..5].iter().cloned().product::<i32>(), 24); assert_eq!(v[..0].iter().cloned().product::<i32>(), 1); } #[test] fn test_iterator_product_result() { let v: &[Result<i32, ()>] = &[Ok(1), Ok(2), Ok(3), Ok(4)]; assert_eq!(v.iter().cloned().product::<Result<i32, _>>(), Ok(24)); let v: &[Result<i32, ()>] = &[Ok(1), Err(()), Ok(3), Ok(4)]; assert_eq!(v.iter().cloned().product::<Result<i32, _>>(), Err(())); } #[test] fn test_iterator_max() { let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; assert_eq!(v[..4].iter().cloned().max(), Some(3)); assert_eq!(v.iter().cloned().max(), Some(10)); assert_eq!(v[..0].iter().cloned().max(), None); } #[test] fn test_iterator_min() { let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; assert_eq!(v[..4].iter().cloned().min(), Some(0)); assert_eq!(v.iter().cloned().min(), Some(0)); assert_eq!(v[..0].iter().cloned().min(), None); } #[test] fn test_iterator_size_hint() { let c = (0..).step_by(1); let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; let v2 = &[10, 11, 12]; let vi = v.iter(); assert_eq!((0..).size_hint(), (usize::MAX, None)); assert_eq!(c.size_hint(), (usize::MAX, None)); assert_eq!(vi.clone().size_hint(), (10, Some(10))); assert_eq!(c.clone().take(5).size_hint(), (5, Some(5))); assert_eq!(c.clone().skip(5).size_hint().1, None); assert_eq!(c.clone().take_while(|_| false).size_hint(), (0, None)); assert_eq!(c.clone().skip_while(|_| false).size_hint(), (0, None)); assert_eq!(c.clone().enumerate().size_hint(), (usize::MAX, None)); assert_eq!(c.clone().chain(vi.clone().cloned()).size_hint(), (usize::MAX, None)); assert_eq!(c.clone().zip(vi.clone()).size_hint(), (10, Some(10))); assert_eq!(c.clone().scan(0, |_,_| Some(0)).size_hint(), (0, None)); assert_eq!(c.clone().filter(|_| false).size_hint(), (0, None)); assert_eq!(c.clone().map(|_| 0).size_hint(), (usize::MAX, None)); assert_eq!(c.filter_map(|_| Some(0)).size_hint(), (0, None)); assert_eq!(vi.clone().take(5).size_hint(), (5, Some(5))); assert_eq!(vi.clone().take(12).size_hint(), (10, Some(10))); assert_eq!(vi.clone().skip(3).size_hint(), (7, Some(7))); assert_eq!(vi.clone().skip(12).size_hint(), (0, Some(0))); assert_eq!(vi.clone().take_while(|_| false).size_hint(), (0, Some(10))); assert_eq!(vi.clone().skip_while(|_| false).size_hint(), (0, Some(10))); assert_eq!(vi.clone().enumerate().size_hint(), (10, Some(10))); assert_eq!(vi.clone().chain(v2).size_hint(), (13, Some(13))); assert_eq!(vi.clone().zip(v2).size_hint(), (3, Some(3))); assert_eq!(vi.clone().scan(0, |_,_| Some(0)).size_hint(), (0, Some(10))); assert_eq!(vi.clone().filter(|_| false).size_hint(), (0, Some(10))); assert_eq!(vi.clone().map(|&i| i+1).size_hint(), (10, Some(10))); assert_eq!(vi.filter_map(|_| Some(0)).size_hint(), (0, Some(10))); } #[test] fn test_collect() { let a = vec![1, 2, 3, 4, 5]; let b: Vec<isize> = a.iter().cloned().collect(); assert!(a == b); } #[test] fn test_all() { let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]); assert!(v.iter().all(|&x| x < 10)); assert!(!v.iter().all(|&x| x % 2 == 0)); assert!(!v.iter().all(|&x| x > 100)); assert!(v[..0].iter().all(|_| panic!())); } #[test] fn test_any() { let v: Box<[isize]> = Box::new([1, 2, 3, 4, 5]); assert!(v.iter().any(|&x| x < 10)); assert!(v.iter().any(|&x| x % 2 == 0)); assert!(!v.iter().any(|&x| x > 100)); assert!(!v[..0].iter().any(|_| panic!())); } #[test] fn test_find() { let v: &[isize] = &[1, 3, 9, 27, 103, 14, 11]; assert_eq!(*v.iter().find(|&&x| x & 1 == 0).unwrap(), 14); assert_eq!(*v.iter().find(|&&x| x % 3 == 0).unwrap(), 3); assert!(v.iter().find(|&&x| x % 12 == 0).is_none()); } #[test] fn test_find_map() { let xs: &[isize] = &[]; assert_eq!(xs.iter().find_map(half_if_even), None); let xs: &[isize] = &[3, 5]; assert_eq!(xs.iter().find_map(half_if_even), None); let xs: &[isize] = &[4, 5]; assert_eq!(xs.iter().find_map(half_if_even), Some(2)); let xs: &[isize] = &[3, 6]; assert_eq!(xs.iter().find_map(half_if_even), Some(3)); let xs: &[isize] = &[1, 2, 3, 4, 5, 6, 7]; let mut iter = xs.iter(); assert_eq!(iter.find_map(half_if_even), Some(1)); assert_eq!(iter.find_map(half_if_even), Some(2)); assert_eq!(iter.find_map(half_if_even), Some(3)); assert_eq!(iter.next(), Some(&7)); fn half_if_even(x: &isize) -> Option<isize> { if x % 2 == 0 { Some(x / 2) } else { None } } } #[test] fn test_position() { let v = &[1, 3, 9, 27, 103, 14, 11]; assert_eq!(v.iter().position(|x| *x & 1 == 0).unwrap(), 5); assert_eq!(v.iter().position(|x| *x % 3 == 0).unwrap(), 1); assert!(v.iter().position(|x| *x % 12 == 0).is_none()); } #[test] fn test_count() { let xs = &[1, 2, 2, 1, 5, 9, 0, 2]; assert_eq!(xs.iter().filter(|x| **x == 2).count(), 3); assert_eq!(xs.iter().filter(|x| **x == 5).count(), 1); assert_eq!(xs.iter().filter(|x| **x == 95).count(), 0); } #[test] fn test_max_by_key() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; assert_eq!(*xs.iter().max_by_key(|x| x.abs()).unwrap(), -10); } #[test] fn test_max_by() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; assert_eq!(*xs.iter().max_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), -10); } #[test] fn test_min_by_key() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; assert_eq!(*xs.iter().min_by_key(|x| x.abs()).unwrap(), 0); } #[test] fn test_min_by() { let xs: &[isize] = &[-3, 0, 1, 5, -10]; assert_eq!(*xs.iter().min_by(|x, y| x.abs().cmp(&y.abs())).unwrap(), 0); } #[test] fn test_by_ref() { let mut xs = 0..10; // sum the first five values let partial_sum = xs.by_ref().take(5).fold(0, |a, b| a + b); assert_eq!(partial_sum, 10); assert_eq!(xs.next(), Some(5)); } #[test] fn test_rev() { let xs = [2, 4, 6, 8, 10, 12, 14, 16]; let mut it = xs.iter(); it.next(); it.next(); assert!(it.rev().cloned().collect::<Vec<isize>>() == vec![16, 14, 12, 10, 8, 6]); } #[test] fn test_cloned() { let xs = [2, 4, 6, 8]; let mut it = xs.iter().cloned(); assert_eq!(it.len(), 4); assert_eq!(it.next(), Some(2)); assert_eq!(it.len(), 3); assert_eq!(it.next(), Some(4)); assert_eq!(it.len(), 2); assert_eq!(it.next_back(), Some(8)); assert_eq!(it.len(), 1); assert_eq!(it.next_back(), Some(6)); assert_eq!(it.len(), 0); assert_eq!(it.next_back(), None); } #[test] fn test_double_ended_map() { let xs = [1, 2, 3, 4, 5, 6]; let mut it = xs.iter().map(|&x| x * -1); assert_eq!(it.next(), Some(-1)); assert_eq!(it.next(), Some(-2)); assert_eq!(it.next_back(), Some(-6)); assert_eq!(it.next_back(), Some(-5)); assert_eq!(it.next(), Some(-3)); assert_eq!(it.next_back(), Some(-4)); assert_eq!(it.next(), None); } #[test] fn test_double_ended_enumerate() { let xs = [1, 2, 3, 4, 5, 6]; let mut it = xs.iter().cloned().enumerate(); assert_eq!(it.next(), Some((0, 1))); assert_eq!(it.next(), Some((1, 2))); assert_eq!(it.next_back(), Some((5, 6))); assert_eq!(it.next_back(), Some((4, 5))); assert_eq!(it.next_back(), Some((3, 4))); assert_eq!(it.next_back(), Some((2, 3))); assert_eq!(it.next(), None); } #[test] fn test_double_ended_zip() { let xs = [1, 2, 3, 4, 5, 6]; let ys = [1, 2, 3, 7]; let a = xs.iter().cloned(); let b = ys.iter().cloned(); let mut it = a.zip(b); assert_eq!(it.next(), Some((1, 1))); assert_eq!(it.next(), Some((2, 2))); assert_eq!(it.next_back(), Some((4, 7))); assert_eq!(it.next_back(), Some((3, 3))); assert_eq!(it.next(), None); } #[test] fn test_double_ended_filter() { let xs = [1, 2, 3, 4, 5, 6]; let mut it = xs.iter().filter(|&x| *x & 1 == 0); assert_eq!(it.next_back().unwrap(), &6); assert_eq!(it.next_back().unwrap(), &4); assert_eq!(it.next().unwrap(), &2); assert_eq!(it.next_back(), None); } #[test] fn test_double_ended_filter_map() { let xs = [1, 2, 3, 4, 5, 6]; let mut it = xs.iter().filter_map(|&x| if x & 1 == 0 { Some(x * 2) } else { None }); assert_eq!(it.next_back().unwrap(), 12); assert_eq!(it.next_back().unwrap(), 8); assert_eq!(it.next().unwrap(), 4); assert_eq!(it.next_back(), None); } #[test] fn test_double_ended_chain() { let xs = [1, 2, 3, 4, 5]; let ys = [7, 9, 11]; let mut it = xs.iter().chain(&ys).rev(); assert_eq!(it.next().unwrap(), &11); assert_eq!(it.next().unwrap(), &9); assert_eq!(it.next_back().unwrap(), &1); assert_eq!(it.next_back().unwrap(), &2); assert_eq!(it.next_back().unwrap(), &3); assert_eq!(it.next_back().unwrap(), &4); assert_eq!(it.next_back().unwrap(), &5); assert_eq!(it.next_back().unwrap(), &7); assert_eq!(it.next_back(), None); // test that .chain() is well behaved with an unfused iterator struct CrazyIterator(bool); impl CrazyIterator { fn new() -> CrazyIterator { CrazyIterator(false) } } impl Iterator for CrazyIterator { type Item = i32; fn next(&mut self) -> Option<i32> { if self.0 { Some(99) } else { self.0 = true; None } } } impl DoubleEndedIterator for CrazyIterator { fn next_back(&mut self) -> Option<i32> { self.next() } } assert_eq!(CrazyIterator::new().chain(0..10).rev().last(), Some(0)); assert!((0..10).chain(CrazyIterator::new()).rev().any(|i| i == 0)); } #[test] fn test_rposition() { fn f(xy: &(isize, char)) -> bool { let (_x, y) = *xy; y == 'b' } fn g(xy: &(isize, char)) -> bool { let (_x, y) = *xy; y == 'd' } let v = [(0, 'a'), (1, 'b'), (2, 'c'), (3, 'b')]; assert_eq!(v.iter().rposition(f), Some(3)); assert!(v.iter().rposition(g).is_none()); } #[test] #[should_panic] fn test_rposition_panic() { let v: [(Box<_>, Box<_>); 4] = [(box 0, box 0), (box 0, box 0), (box 0, box 0), (box 0, box 0)]; let mut i = 0; v.iter().rposition(|_elt| { if i == 2 { panic!() } i += 1; false }); } #[test] fn test_double_ended_flat_map() { let u = [0,1]; let v = [5,6,7,8]; let mut it = u.iter().flat_map(|x| &v[*x..v.len()]); assert_eq!(it.next_back().unwrap(), &8); assert_eq!(it.next().unwrap(), &5); assert_eq!(it.next_back().unwrap(), &7); assert_eq!(it.next_back().unwrap(), &6); assert_eq!(it.next_back().unwrap(), &8); assert_eq!(it.next().unwrap(), &6); assert_eq!(it.next_back().unwrap(), &7); assert_eq!(it.next_back(), None); assert_eq!(it.next(), None); assert_eq!(it.next_back(), None); } #[test] fn test_double_ended_flatten() { let u = [0,1]; let v = [5,6,7,8]; let mut it = u.iter().map(|x| &v[*x..v.len()]).flatten(); assert_eq!(it.next_back().unwrap(), &8); assert_eq!(it.next().unwrap(), &5); assert_eq!(it.next_back().unwrap(), &7); assert_eq!(it.next_back().unwrap(), &6); assert_eq!(it.next_back().unwrap(), &8); assert_eq!(it.next().unwrap(), &6); assert_eq!(it.next_back().unwrap(), &7); assert_eq!(it.next_back(), None); assert_eq!(it.next(), None); assert_eq!(it.next_back(), None); } #[test] fn test_double_ended_range() { assert_eq!((11..14).rev().collect::<Vec<_>>(), [13, 12, 11]); for _ in (10..0).rev() { panic!("unreachable"); } assert_eq!((11..14).rev().collect::<Vec<_>>(), [13, 12, 11]); for _ in (10..0).rev() { panic!("unreachable"); } } #[test] fn test_range() { assert_eq!((0..5).collect::<Vec<_>>(), [0, 1, 2, 3, 4]); assert_eq!((-10..-1).collect::<Vec<_>>(), [-10, -9, -8, -7, -6, -5, -4, -3, -2]); assert_eq!((0..5).rev().collect::<Vec<_>>(), [4, 3, 2, 1, 0]); assert_eq!((200..-5).count(), 0); assert_eq!((200..-5).rev().count(), 0); assert_eq!((200..200).count(), 0); assert_eq!((200..200).rev().count(), 0); assert_eq!((0..100).size_hint(), (100, Some(100))); // this test is only meaningful when sizeof usize < sizeof u64 assert_eq!((usize::MAX - 1..usize::MAX).size_hint(), (1, Some(1))); assert_eq!((-10..-1).size_hint(), (9, Some(9))); assert_eq!((-1..-10).size_hint(), (0, Some(0))); assert_eq!((-70..58).size_hint(), (128, Some(128))); assert_eq!((-128..127).size_hint(), (255, Some(255))); assert_eq!((-2..isize::MAX).size_hint(), (isize::MAX as usize + 2, Some(isize::MAX as usize + 2))); } #[test] fn test_range_exhaustion() { let mut r = 10..10; assert!(r.is_empty()); assert_eq!(r.next(), None); assert_eq!(r.next_back(), None); assert_eq!(r, 10..10); let mut r = 10..12; assert_eq!(r.next(), Some(10)); assert_eq!(r.next(), Some(11)); assert!(r.is_empty()); assert_eq!(r, 12..12); assert_eq!(r.next(), None); let mut r = 10..12; assert_eq!(r.next_back(), Some(11)); assert_eq!(r.next_back(), Some(10)); assert!(r.is_empty()); assert_eq!(r, 10..10); assert_eq!(r.next_back(), None); let mut r = 100..10; assert!(r.is_empty()); assert_eq!(r.next(), None); assert_eq!(r.next_back(), None); assert_eq!(r, 100..10); } #[test] fn test_range_inclusive_exhaustion() { let mut r = 10..=10; assert_eq!(r.next(), Some(10)); assert!(r.is_empty()); assert_eq!(r.next(), None); assert_eq!(r.next(), None); let mut r = 10..=10; assert_eq!(r.next_back(), Some(10)); assert!(r.is_empty()); assert_eq!(r.next_back(), None); let mut r = 10..=12; assert_eq!(r.next(), Some(10)); assert_eq!(r.next(), Some(11)); assert_eq!(r.next(), Some(12)); assert!(r.is_empty()); assert_eq!(r.next(), None); let mut r = 10..=12; assert_eq!(r.next_back(), Some(12)); assert_eq!(r.next_back(), Some(11)); assert_eq!(r.next_back(), Some(10)); assert!(r.is_empty()); assert_eq!(r.next_back(), None); let mut r = 10..=12; assert_eq!(r.nth(2), Some(12)); assert!(r.is_empty()); assert_eq!(r.next(), None); let mut r = 10..=12; assert_eq!(r.nth(5), None); assert!(r.is_empty()); assert_eq!(r.next(), None); let mut r = 100..=10; assert_eq!(r.next(), None); assert!(r.is_empty()); assert_eq!(r.next(), None); assert_eq!(r.next(), None); assert_eq!(r, 100..=10); let mut r = 100..=10; assert_eq!(r.next_back(), None); assert!(r.is_empty()); assert_eq!(r.next_back(), None); assert_eq!(r.next_back(), None); assert_eq!(r, 100..=10); } #[test] fn test_range_nth() { assert_eq!((10..15).nth(0), Some(10)); assert_eq!((10..15).nth(1), Some(11)); assert_eq!((10..15).nth(4), Some(14)); assert_eq!((10..15).nth(5), None); let mut r = 10..20; assert_eq!(r.nth(2), Some(12)); assert_eq!(r, 13..20); assert_eq!(r.nth(2), Some(15)); assert_eq!(r, 16..20); assert_eq!(r.nth(10), None); assert_eq!(r, 20..20); } #[test] fn test_range_from_nth() { assert_eq!((10..).nth(0), Some(10)); assert_eq!((10..).nth(1), Some(11)); assert_eq!((10..).nth(4), Some(14)); let mut r = 10..; assert_eq!(r.nth(2), Some(12)); assert_eq!(r, 13..); assert_eq!(r.nth(2), Some(15)); assert_eq!(r, 16..); assert_eq!(r.nth(10), Some(26)); assert_eq!(r, 27..); assert_eq!((0..).size_hint(), (usize::MAX, None)); } fn is_trusted_len<I: TrustedLen>(_: I) {} #[test] fn test_range_from_take() { let mut it = (0..).take(3); assert_eq!(it.next(), Some(0)); assert_eq!(it.next(), Some(1)); assert_eq!(it.next(), Some(2)); assert_eq!(it.next(), None); is_trusted_len((0..).take(3)); assert_eq!((0..).take(3).size_hint(), (3, Some(3))); assert_eq!((0..).take(0).size_hint(), (0, Some(0))); assert_eq!((0..).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX))); } #[test] fn test_range_from_take_collect() { let v: Vec<_> = (0..).take(3).collect(); assert_eq!(v, vec![0, 1, 2]); } #[test] fn test_range_inclusive_nth() { assert_eq!((10..=15).nth(0), Some(10)); assert_eq!((10..=15).nth(1), Some(11)); assert_eq!((10..=15).nth(5), Some(15)); assert_eq!((10..=15).nth(6), None); let mut r = 10_u8..=20; assert_eq!(r.nth(2), Some(12)); assert_eq!(r, 13..=20); assert_eq!(r.nth(2), Some(15)); assert_eq!(r, 16..=20); assert_eq!(r.is_empty(), false); assert_eq!(ExactSizeIterator::is_empty(&r), false); assert_eq!(r.nth(10), None); assert_eq!(r.is_empty(), true); assert_eq!(ExactSizeIterator::is_empty(&r), true); } #[test] fn test_range_step() { #![allow(deprecated)] assert_eq!((0..20).step_by(5).collect::<Vec<isize>>(), [0, 5, 10, 15]); assert_eq!((1..21).rev().step_by(5).collect::<Vec<isize>>(), [20, 15, 10, 5]); assert_eq!((1..21).rev().step_by(6).collect::<Vec<isize>>(), [20, 14, 8, 2]); assert_eq!((200..255).step_by(50).collect::<Vec<u8>>(), [200, 250]); assert_eq!((200..-5).step_by(1).collect::<Vec<isize>>(), []); assert_eq!((200..200).step_by(1).collect::<Vec<isize>>(), []); assert_eq!((0..20).step_by(1).size_hint(), (20, Some(20))); assert_eq!((0..20).step_by(21).size_hint(), (1, Some(1))); assert_eq!((0..20).step_by(5).size_hint(), (4, Some(4))); assert_eq!((1..21).rev().step_by(5).size_hint(), (4, Some(4))); assert_eq!((1..21).rev().step_by(6).size_hint(), (4, Some(4))); assert_eq!((20..-5).step_by(1).size_hint(), (0, Some(0))); assert_eq!((20..20).step_by(1).size_hint(), (0, Some(0))); assert_eq!((i8::MIN..i8::MAX).step_by(-(i8::MIN as i32) as usize).size_hint(), (2, Some(2))); assert_eq!((i16::MIN..i16::MAX).step_by(i16::MAX as usize).size_hint(), (3, Some(3))); assert_eq!((isize::MIN..isize::MAX).step_by(1).size_hint(), (usize::MAX, Some(usize::MAX))); } #[test] fn test_range_last_max() { assert_eq!((0..20).last(), Some(19)); assert_eq!((-20..0).last(), Some(-1)); assert_eq!((5..5).last(), None); assert_eq!((0..20).max(), Some(19)); assert_eq!((-20..0).max(), Some(-1)); assert_eq!((5..5).max(), None); } #[test] fn test_range_inclusive_last_max() { assert_eq!((0..=20).last(), Some(20)); assert_eq!((-20..=0).last(), Some(0)); assert_eq!((5..=5).last(), Some(5)); let mut r = 10..=10; r.next(); assert_eq!(r.last(), None); assert_eq!((0..=20).max(), Some(20)); assert_eq!((-20..=0).max(), Some(0)); assert_eq!((5..=5).max(), Some(5)); let mut r = 10..=10; r.next(); assert_eq!(r.max(), None); } #[test] fn test_range_min() { assert_eq!((0..20).min(), Some(0)); assert_eq!((-20..0).min(), Some(-20)); assert_eq!((5..5).min(), None); } #[test] fn test_range_inclusive_min() { assert_eq!((0..=20).min(), Some(0)); assert_eq!((-20..=0).min(), Some(-20)); assert_eq!((5..=5).min(), Some(5)); let mut r = 10..=10; r.next(); assert_eq!(r.min(), None); } #[test] fn test_range_inclusive_folds() { assert_eq!((1..=10).sum::<i32>(), 55); assert_eq!((1..=10).rev().sum::<i32>(), 55); let mut it = 40..=50; assert_eq!(it.try_fold(0, i8::checked_add), None); assert_eq!(it, 44..=50); assert_eq!(it.try_rfold(0, i8::checked_add), None); assert_eq!(it, 44..=47); let mut it = 10..=20; assert_eq!(it.try_fold(0, |a,b| Some(a+b)), Some(165)); assert!(it.is_empty()); let mut it = 10..=20; assert_eq!(it.try_rfold(0, |a,b| Some(a+b)), Some(165)); assert!(it.is_empty()); } #[test] fn test_repeat() { let mut it = repeat(42); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(repeat(42).size_hint(), (usize::MAX, None)); } #[test] fn test_repeat_take() { let mut it = repeat(42).take(3); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), None); is_trusted_len(repeat(42).take(3)); assert_eq!(repeat(42).take(3).size_hint(), (3, Some(3))); assert_eq!(repeat(42).take(0).size_hint(), (0, Some(0))); assert_eq!(repeat(42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX))); } #[test] fn test_repeat_take_collect() { let v: Vec<_> = repeat(42).take(3).collect(); assert_eq!(v, vec![42, 42, 42]); } #[test] fn test_repeat_with() { #[derive(PartialEq, Debug)] struct NotClone(usize); let mut it = repeat_with(|| NotClone(42)); assert_eq!(it.next(), Some(NotClone(42))); assert_eq!(it.next(), Some(NotClone(42))); assert_eq!(it.next(), Some(NotClone(42))); assert_eq!(repeat_with(|| NotClone(42)).size_hint(), (usize::MAX, None)); } #[test] fn test_repeat_with_take() { let mut it = repeat_with(|| 42).take(3); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), None); is_trusted_len(repeat_with(|| 42).take(3)); assert_eq!(repeat_with(|| 42).take(3).size_hint(), (3, Some(3))); assert_eq!(repeat_with(|| 42).take(0).size_hint(), (0, Some(0))); assert_eq!(repeat_with(|| 42).take(usize::MAX).size_hint(), (usize::MAX, Some(usize::MAX))); } #[test] fn test_repeat_with_take_collect() { let mut curr = 1; let v: Vec<_> = repeat_with(|| { let tmp = curr; curr *= 2; tmp }) .take(5).collect(); assert_eq!(v, vec![1, 2, 4, 8, 16]); } #[test] fn test_fuse() { let mut it = 0..3; assert_eq!(it.len(), 3); assert_eq!(it.next(), Some(0)); assert_eq!(it.len(), 2); assert_eq!(it.next(), Some(1)); assert_eq!(it.len(), 1); assert_eq!(it.next(), Some(2)); assert_eq!(it.len(), 0); assert_eq!(it.next(), None); assert_eq!(it.len(), 0); assert_eq!(it.next(), None); assert_eq!(it.len(), 0); assert_eq!(it.next(), None); assert_eq!(it.len(), 0); } #[test] fn test_fuse_nth() { let xs = [0, 1, 2]; let mut it = xs.iter(); assert_eq!(it.len(), 3); assert_eq!(it.nth(2), Some(&2)); assert_eq!(it.len(), 0); assert_eq!(it.nth(2), None); assert_eq!(it.len(), 0); } #[test] fn test_fuse_last() { let xs = [0, 1, 2]; let it = xs.iter(); assert_eq!(it.len(), 3); assert_eq!(it.last(), Some(&2)); } #[test] fn test_fuse_count() { let xs = [0, 1, 2]; let it = xs.iter(); assert_eq!(it.len(), 3); assert_eq!(it.count(), 3); // Can't check len now because count consumes. } #[test] fn test_fuse_fold() { let xs = [0, 1, 2]; let it = xs.iter(); // `FusedIterator` let i = it.fuse().fold(0, |i, &x| { assert_eq!(x, xs[i]); i + 1 }); assert_eq!(i, xs.len()); let it = xs.iter(); // `FusedIterator` let i = it.fuse().rfold(xs.len(), |i, &x| { assert_eq!(x, xs[i - 1]); i - 1 }); assert_eq!(i, 0); let it = xs.iter().scan((), |_, &x| Some(x)); // `!FusedIterator` let i = it.fuse().fold(0, |i, x| { assert_eq!(x, xs[i]); i + 1 }); assert_eq!(i, xs.len()); } #[test] fn test_once() { let mut it = once(42); assert_eq!(it.next(), Some(42)); assert_eq!(it.next(), None); } #[test] fn test_empty() { let mut it = empty::<i32>(); assert_eq!(it.next(), None); } #[test] fn test_chain_fold() { let xs = [1, 2, 3]; let ys = [1, 2, 0]; let mut iter = xs.iter().chain(&ys); iter.next(); let mut result = Vec::new(); iter.fold((), |(), &elt| result.push(elt)); assert_eq!(&[2, 3, 1, 2, 0], &result[..]); } #[test] fn test_step_replace_unsigned() { let mut x = 4u32; let y = x.replace_zero(); assert_eq!(x, 0); assert_eq!(y, 4); x = 5; let y = x.replace_one(); assert_eq!(x, 1); assert_eq!(y, 5); } #[test] fn test_step_replace_signed() { let mut x = 4i32; let y = x.replace_zero(); assert_eq!(x, 0); assert_eq!(y, 4); x = 5; let y = x.replace_one(); assert_eq!(x, 1); assert_eq!(y, 5); } #[test] fn test_step_replace_no_between() { let mut x = 4u128; let y = x.replace_zero(); assert_eq!(x, 0); assert_eq!(y, 4); x = 5; let y = x.replace_one(); assert_eq!(x, 1); assert_eq!(y, 5); } #[test] fn test_rev_try_folds() { let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((1..10).rev().try_fold(7, f), (1..10).try_rfold(7, f)); assert_eq!((1..10).rev().try_rfold(7, f), (1..10).try_fold(7, f)); let a = [10, 20, 30, 40, 100, 60, 70, 80, 90]; let mut iter = a.iter().rev(); assert_eq!(iter.try_fold(0_i8, |acc, &x| acc.checked_add(x)), None); assert_eq!(iter.next(), Some(&70)); let mut iter = a.iter().rev(); assert_eq!(iter.try_rfold(0_i8, |acc, &x| acc.checked_add(x)), None); assert_eq!(iter.next_back(), Some(&60)); } #[test] fn test_cloned_try_folds() { let a = [1, 2, 3, 4, 5, 6, 7, 8, 9]; let f = &|acc, x| i32::checked_add(2*acc, x); let f_ref = &|acc, &x| i32::checked_add(2*acc, x); assert_eq!(a.iter().cloned().try_fold(7, f), a.iter().try_fold(7, f_ref)); assert_eq!(a.iter().cloned().try_rfold(7, f), a.iter().try_rfold(7, f_ref)); let a = [10, 20, 30, 40, 100, 60, 70, 80, 90]; let mut iter = a.iter().cloned(); assert_eq!(iter.try_fold(0_i8, |acc, x| acc.checked_add(x)), None); assert_eq!(iter.next(), Some(60)); let mut iter = a.iter().cloned(); assert_eq!(iter.try_rfold(0_i8, |acc, x| acc.checked_add(x)), None); assert_eq!(iter.next_back(), Some(70)); } #[test] fn test_chain_try_folds() { let c = || (0..10).chain(10..20); let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!(c().try_fold(7, f), (0..20).try_fold(7, f)); assert_eq!(c().try_rfold(7, f), (0..20).rev().try_fold(7, f)); let mut iter = c(); assert_eq!(iter.position(|x| x == 5), Some(5)); assert_eq!(iter.next(), Some(6), "stopped in front, state Both"); assert_eq!(iter.position(|x| x == 13), Some(6)); assert_eq!(iter.next(), Some(14), "stopped in back, state Back"); assert_eq!(iter.try_fold(0, |acc, x| Some(acc+x)), Some((15..20).sum())); let mut iter = c().rev(); // use rev to access try_rfold assert_eq!(iter.position(|x| x == 15), Some(4)); assert_eq!(iter.next(), Some(14), "stopped in back, state Both"); assert_eq!(iter.position(|x| x == 5), Some(8)); assert_eq!(iter.next(), Some(4), "stopped in front, state Front"); assert_eq!(iter.try_fold(0, |acc, x| Some(acc+x)), Some((0..4).sum())); let mut iter = c(); iter.by_ref().rev().nth(14); // skip the last 15, ending in state Front assert_eq!(iter.try_fold(7, f), (0..5).try_fold(7, f)); let mut iter = c(); iter.nth(14); // skip the first 15, ending in state Back assert_eq!(iter.try_rfold(7, f), (15..20).try_rfold(7, f)); } #[test] fn test_map_try_folds() { let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((0..10).map(|x| x+3).try_fold(7, f), (3..13).try_fold(7, f)); assert_eq!((0..10).map(|x| x+3).try_rfold(7, f), (3..13).try_rfold(7, f)); let mut iter = (0..40).map(|x| x+10); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(20)); assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(46)); } #[test] fn test_filter_try_folds() { fn p(&x: &i32) -> bool { 0 <= x && x < 10 } let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((-10..20).filter(p).try_fold(7, f), (0..10).try_fold(7, f)); assert_eq!((-10..20).filter(p).try_rfold(7, f), (0..10).try_rfold(7, f)); let mut iter = (0..40).filter(|&x| x % 2 == 1); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(25)); assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(31)); } #[test] fn test_filter_map_try_folds() { let mp = &|x| if 0 <= x && x < 10 { Some(x*2) } else { None }; let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((-9..20).filter_map(mp).try_fold(7, f), (0..10).map(|x| 2*x).try_fold(7, f)); assert_eq!((-9..20).filter_map(mp).try_rfold(7, f), (0..10).map(|x| 2*x).try_rfold(7, f)); let mut iter = (0..40).filter_map(|x| if x%2 == 1 { None } else { Some(x*2 + 10) }); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(38)); assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(78)); } #[test] fn test_enumerate_try_folds() { let f = &|acc, (i, x)| usize::checked_add(2*acc, x/(i+1) + i); assert_eq!((9..18).enumerate().try_fold(7, f), (0..9).map(|i| (i, i+9)).try_fold(7, f)); assert_eq!((9..18).enumerate().try_rfold(7, f), (0..9).map(|i| (i, i+9)).try_rfold(7, f)); let mut iter = (100..200).enumerate(); let f = &|acc, (i, x)| u8::checked_add(acc, u8::checked_div(x, i as u8 + 1)?); assert_eq!(iter.try_fold(0, f), None); assert_eq!(iter.next(), Some((7, 107))); assert_eq!(iter.try_rfold(0, f), None); assert_eq!(iter.next_back(), Some((11, 111))); } #[test] fn test_peek_try_fold() { let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((1..20).peekable().try_fold(7, f), (1..20).try_fold(7, f)); let mut iter = (1..20).peekable(); assert_eq!(iter.peek(), Some(&1)); assert_eq!(iter.try_fold(7, f), (1..20).try_fold(7, f)); let mut iter = [100, 20, 30, 40, 50, 60, 70].iter().cloned().peekable(); assert_eq!(iter.peek(), Some(&100)); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.peek(), Some(&40)); } #[test] fn test_skip_while_try_fold() { let f = &|acc, x| i32::checked_add(2*acc, x); fn p(&x: &i32) -> bool { (x % 10) <= 5 } assert_eq!((1..20).skip_while(p).try_fold(7, f), (6..20).try_fold(7, f)); let mut iter = (1..20).skip_while(p); assert_eq!(iter.nth(5), Some(11)); assert_eq!(iter.try_fold(7, f), (12..20).try_fold(7, f)); let mut iter = (0..50).skip_while(|&x| (x % 20) < 15); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(23)); } #[test] fn test_take_while_folds() { let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((1..20).take_while(|&x| x != 10).try_fold(7, f), (1..10).try_fold(7, f)); let mut iter = (1..20).take_while(|&x| x != 10); assert_eq!(iter.try_fold(0, |x, y| Some(x+y)), Some((1..10).sum())); assert_eq!(iter.next(), None, "flag should be set"); let iter = (1..20).take_while(|&x| x != 10); assert_eq!(iter.fold(0, |x, y| x+y), (1..10).sum()); let mut iter = (10..50).take_while(|&x| x != 40); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(20)); } #[test] fn test_skip_try_folds() { let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((1..20).skip(9).try_fold(7, f), (10..20).try_fold(7, f)); assert_eq!((1..20).skip(9).try_rfold(7, f), (10..20).try_rfold(7, f)); let mut iter = (0..30).skip(10); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(20)); assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(24)); } #[test] fn test_take_try_folds() { let f = &|acc, x| i32::checked_add(2*acc, x); assert_eq!((10..30).take(10).try_fold(7, f), (10..20).try_fold(7, f)); //assert_eq!((10..30).take(10).try_rfold(7, f), (10..20).try_rfold(7, f)); let mut iter = (10..30).take(20); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(20)); //assert_eq!(iter.try_rfold(0, i8::checked_add), None); //assert_eq!(iter.next_back(), Some(24)); } #[test] fn test_flat_map_try_folds() { let f = &|acc, x| i32::checked_add(acc*2/3, x); let mr = &|x| (5*x)..(5*x + 5); assert_eq!((0..10).flat_map(mr).try_fold(7, f), (0..50).try_fold(7, f)); assert_eq!((0..10).flat_map(mr).try_rfold(7, f), (0..50).try_rfold(7, f)); let mut iter = (0..10).flat_map(mr); iter.next(); iter.next_back(); // have front and back iters in progress assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f)); let mut iter = (0..10).flat_map(|x| (4*x)..(4*x + 4)); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(17)); assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(35)); } #[test] fn test_flatten_try_folds() { let f = &|acc, x| i32::checked_add(acc*2/3, x); let mr = &|x| (5*x)..(5*x + 5); assert_eq!((0..10).map(mr).flatten().try_fold(7, f), (0..50).try_fold(7, f)); assert_eq!((0..10).map(mr).flatten().try_rfold(7, f), (0..50).try_rfold(7, f)); let mut iter = (0..10).map(mr).flatten(); iter.next(); iter.next_back(); // have front and back iters in progress assert_eq!(iter.try_rfold(7, f), (1..49).try_rfold(7, f)); let mut iter = (0..10).map(|x| (4*x)..(4*x + 4)).flatten(); assert_eq!(iter.try_fold(0, i8::checked_add), None); assert_eq!(iter.next(), Some(17)); assert_eq!(iter.try_rfold(0, i8::checked_add), None); assert_eq!(iter.next_back(), Some(35)); } #[test] fn test_functor_laws() { // identity: fn identity<T>(x: T) -> T { x } assert_eq!((0..10).map(identity).sum::<usize>(), (0..10).sum()); // composition: fn f(x: usize) -> usize { x + 3 } fn g(x: usize) -> usize { x * 2 } fn h(x: usize) -> usize { g(f(x)) } assert_eq!((0..10).map(f).map(g).sum::<usize>(), (0..10).map(h).sum()); } #[test] fn test_monad_laws_left_identity() { fn f(x: usize) -> impl Iterator<Item = usize> { (0..10).map(move |y| x * y) } assert_eq!(once(42).flat_map(f.clone()).sum::<usize>(), f(42).sum()); } #[test] fn test_monad_laws_right_identity() { assert_eq!((0..10).flat_map(|x| once(x)).sum::<usize>(), (0..10).sum()); } #[test] fn test_monad_laws_associativity() { fn f(x: usize) -> impl Iterator<Item = usize> { 0..x } fn g(x: usize) -> impl Iterator<Item = usize> { (0..x).rev() } assert_eq!((0..10).flat_map(f).flat_map(g).sum::<usize>(), (0..10).flat_map(|x| f(x).flat_map(g)).sum::<usize>()); }
29.239364
97
0.535104
d961efa5f734bdcaa3784dd00f92b9043ca6cd04
1,329
mod optimization; mod parser; mod transpiler; use std::fs::File; use std::io::{BufReader, Read, Result}; use std::path::{Path, PathBuf}; use structopt::StructOpt; #[derive(StructOpt, Debug)] #[structopt(name = "bfc")] struct Options { #[structopt(parse(from_os_str))] /// input file input: PathBuf, #[structopt(short, long, default_value = "out.c", parse(from_os_str))] /// output file out: PathBuf, } fn read_file<P: AsRef<Path>>(path: P) -> Result<String> { let f = File::open(path)?; let mut buf_reader = BufReader::new(f); let mut contents = String::new(); buf_reader.read_to_string(&mut contents)?; Ok(contents) } fn main() { let opt: Options = Options::from_args(); let r = read_file(opt.input); if let Ok(contents) = r { let parsed = parser::parse(contents.as_str()); match parsed { Ok(tree) => { let mut tree = tree; tree = optimization::optimize(tree); let mut transpiler = transpiler::Transpiler::new(tree, opt.out); transpiler.transpile(); } Err(e) => { eprintln!("error: {}", e.message); return; } } } else if let Err(e) = r { eprintln!("error: {}", e.to_string()); } }
25.075472
80
0.5538
e8c9169a61f5df4ffec13380014630aaea930d31
3,404
// SPDX-License-Identifier: MIT /// Mirred action /// /// The mirred action allows packet mirroring (copying) or /// redirecting (stealing) the packet it receives. Mirroring is what /// is sometimes referred to as Switch Port Analyzer (SPAN) and is /// commonly used to analyze and/or debug flows. use crate::{ nlas::{self, DefaultNla, NlaBuffer}, tc::{constants::*, TC_GEN_BUF_LEN}, traits::{Emitable, Parseable}, DecodeError, }; pub const KIND: &str = "mirred"; pub const TC_MIRRED_BUF_LEN: usize = TC_GEN_BUF_LEN + 8; #[derive(Debug, PartialEq, Eq, Clone)] pub enum Nla { Unspec(Vec<u8>), Tm(Vec<u8>), Parms(TcMirred), Other(DefaultNla), } impl nlas::Nla for Nla { fn value_len(&self) -> usize { use self::Nla::*; match self { Unspec(bytes) | Tm(bytes) => bytes.len(), Parms(_) => TC_MIRRED_BUF_LEN, Other(attr) => attr.value_len(), } } fn emit_value(&self, buffer: &mut [u8]) { use self::Nla::*; match self { Unspec(bytes) | Tm(bytes) => buffer.copy_from_slice(bytes.as_slice()), Parms(p) => p.emit(buffer), Other(attr) => attr.emit_value(buffer), } } fn kind(&self) -> u16 { use self::Nla::*; match self { Unspec(_) => TCA_MIRRED_UNSPEC, Tm(_) => TCA_MIRRED_TM, Parms(_) => TCA_MIRRED_PARMS, Other(nla) => nla.kind(), } } } impl<'a, T: AsRef<[u8]> + ?Sized> Parseable<NlaBuffer<&'a T>> for Nla { fn parse(buf: &NlaBuffer<&'a T>) -> Result<Self, DecodeError> { use self::Nla::*; let payload = buf.value(); Ok(match buf.kind() { TCA_MIRRED_UNSPEC => Unspec(payload.to_vec()), TCA_MIRRED_TM => Tm(payload.to_vec()), TCA_MIRRED_PARMS => Parms(TcMirred::parse(&TcMirredBuffer::new_checked(payload)?)?), _ => Other(DefaultNla::parse(buf)?), }) } } #[derive(Debug, PartialEq, Eq, Clone, Default)] pub struct TcMirred { pub index: u32, pub capab: u32, pub action: i32, pub refcnt: i32, pub bindcnt: i32, pub eaction: i32, pub ifindex: u32, } buffer!(TcMirredBuffer(TC_MIRRED_BUF_LEN) { index: (u32, 0..4), capab: (u32, 4..8), action: (i32, 8..12), refcnt: (i32, 12..16), bindcnt: (i32, 16..20), eaction: (i32, TC_GEN_BUF_LEN..(TC_GEN_BUF_LEN + 4)), ifindex: (u32, (TC_GEN_BUF_LEN + 4)..TC_MIRRED_BUF_LEN), }); impl Emitable for TcMirred { fn buffer_len(&self) -> usize { TC_MIRRED_BUF_LEN } fn emit(&self, buffer: &mut [u8]) { let mut packet = TcMirredBuffer::new(buffer); packet.set_index(self.index); packet.set_capab(self.capab); packet.set_action(self.action); packet.set_refcnt(self.refcnt); packet.set_bindcnt(self.bindcnt); packet.set_eaction(self.eaction); packet.set_ifindex(self.ifindex); } } impl<T: AsRef<[u8]>> Parseable<TcMirredBuffer<T>> for TcMirred { fn parse(buf: &TcMirredBuffer<T>) -> Result<Self, DecodeError> { Ok(Self { index: buf.index(), capab: buf.capab(), action: buf.action(), refcnt: buf.refcnt(), bindcnt: buf.bindcnt(), eaction: buf.eaction(), ifindex: buf.ifindex(), }) } }
27.901639
96
0.571387
71ffdde5ef623f033ecb5240181d46c7ed022424
271
#![feature(phase)] #[phase(plugin, link)] extern crate stainless; describe! top_level { it "should be less specific" { assert_eq!(1u, 1u); } describe! nested { it "should be more specific" { assert_eq!(2u, 2u); } } }
16.9375
38
0.546125
6a3513aa2dc40a8c5af5d1107ec436ce030228cd
813
extern crate ed25519_dalek; extern crate ewasm_api; extern crate sha2; mod verify; #[cfg(not(test))] #[no_mangle] pub extern "C" fn main() { ewasm_api::consume_gas(2000); // NOTE: EIP-665 doesn't clarify what should happen if the input is shorter or longer. // This seems to be the best approach, consider it an error. if ewasm_api::calldata_size() != 128 { ewasm_api::revert(); } let mut tmp = [0u8; 128]; ewasm_api::unsafe_calldata_copy(0, 128, &mut tmp); match verify::verify(&tmp) { Ok(true) => { ewasm_api::finish_data(&[0x00u8; 4]); } Ok(false) => { ewasm_api::finish_data(&[0xffu8; 4]); } Err(_) => { // FIXME: send the error message? ewasm_api::revert(); } } }
24.636364
90
0.570726
03a7dd1ba33bc1ccd8148c340b162ffcb820f858
3,209
//! This example will run a interactive command inside the container using `docker exec`, //! passing trough input and output into the tty running inside the container use bollard::container::{Config, RemoveContainerOptions}; use bollard::Docker; use bollard::exec::{CreateExecOptions, ResizeExecOptions, StartExecResults}; use bollard::image::CreateImageOptions; use futures_util::{StreamExt, TryStreamExt}; use std::io::{stdout, Read, Write}; use std::time::Duration; #[cfg(not(windows))] use termion::raw::IntoRawMode; #[cfg(not(windows))] use termion::{async_stdin, terminal_size}; use tokio::io::AsyncWriteExt; use tokio::task::spawn; use tokio::time::sleep; const IMAGE: &'static str = "alpine:3"; #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error + 'static>> { let docker = Docker::connect_with_socket_defaults().unwrap(); #[cfg(not(windows))] let tty_size = terminal_size()?; docker .create_image( Some(CreateImageOptions { from_image: IMAGE, ..Default::default() }), None, None, ) .try_collect::<Vec<_>>() .await?; let alpine_config = Config { image: Some(IMAGE), tty: Some(true), ..Default::default() }; let id = docker .create_container::<&str, &str>(None, alpine_config) .await? .id; docker.start_container::<String>(&id, None).await?; let exec = docker .create_exec( &id, CreateExecOptions { attach_stdout: Some(true), attach_stderr: Some(true), attach_stdin: Some(true), tty: Some(true), cmd: Some(vec!["sh"]), ..Default::default() }, ) .await? .id; #[cfg(not(windows))] if let StartExecResults::Attached { mut output, mut input, } = docker.start_exec(&exec, None).await? { // pipe stdin into the docker exec stream input spawn(async move { let mut stdin = async_stdin().bytes(); loop { if let Some(Ok(byte)) = stdin.next() { input.write(&[byte]).await.ok(); } else { sleep(Duration::from_nanos(10)).await; } } }); docker .resize_exec( &exec, ResizeExecOptions { height: tty_size.1, width: tty_size.0, }, ) .await?; // set stdout in raw mode so we can do tty stuff let stdout = stdout(); let mut stdout = stdout.lock().into_raw_mode()?; // pipe docker exec output into stdout while let Some(Ok(output)) = output.next().await { stdout.write(output.into_bytes().as_ref())?; stdout.flush()?; } } docker .remove_container( &id, Some(RemoveContainerOptions { force: true, ..Default::default() }), ) .await?; Ok(()) }
27.194915
89
0.520411
0e966f1d51c03ef88993787bb3d4a92ee7ed4287
10,753
//! Implementation of [RedJubjub], a specialization of RedDSA to the Jubjub //! curve. //! //! [RedJubjub]: https://zips.z.cash/protocol/protocol.pdf#concretereddsa use ff::{Field, PrimeField}; use group::GroupEncoding; use jubjub::{ExtendedPoint, SubgroupPoint}; use rand_core::RngCore; use std::io::{self, Read, Write}; use std::ops::{AddAssign, MulAssign, Neg}; use crate::util::hash_to_scalar; fn read_scalar<R: Read>(mut reader: R) -> io::Result<jubjub::Fr> { let mut s_repr = [0u8; 32]; reader.read_exact(s_repr.as_mut())?; Option::from(jubjub::Fr::from_repr(s_repr)) .ok_or_else(|| io::Error::new(io::ErrorKind::InvalidInput, "scalar is not in field")) } fn write_scalar<W: Write>(s: &jubjub::Fr, mut writer: W) -> io::Result<()> { writer.write_all(s.to_repr().as_ref()) } fn h_star(a: &[u8], b: &[u8]) -> jubjub::Fr { hash_to_scalar(b"Zcash_RedJubjubH", a, b) } #[derive(Copy, Clone, Debug)] pub struct Signature { rbar: [u8; 32], sbar: [u8; 32], } pub struct PrivateKey(pub jubjub::Fr); #[derive(Debug)] pub struct PublicKey(pub ExtendedPoint); impl Signature { pub fn read<R: Read>(mut reader: R) -> io::Result<Self> { let mut rbar = [0u8; 32]; let mut sbar = [0u8; 32]; reader.read_exact(&mut rbar)?; reader.read_exact(&mut sbar)?; Ok(Signature { rbar, sbar }) } pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> { writer.write_all(&self.rbar)?; writer.write_all(&self.sbar) } } impl PrivateKey { pub fn randomize(&self, alpha: jubjub::Fr) -> Self { let mut tmp = self.0; tmp.add_assign(&alpha); PrivateKey(tmp) } pub fn read<R: Read>(reader: R) -> io::Result<Self> { let pk = read_scalar::<R>(reader)?; Ok(PrivateKey(pk)) } pub fn write<W: Write>(&self, writer: W) -> io::Result<()> { write_scalar::<W>(&self.0, writer) } pub fn sign<R: RngCore>(&self, msg: &[u8], rng: &mut R, p_g: SubgroupPoint) -> Signature { // T = (l_H + 128) bits of randomness // For H*, l_H = 512 bits let mut t = [0u8; 80]; rng.fill_bytes(&mut t[..]); // r = H*(T || M) let r = h_star(&t[..], msg); // R = r . P_G let r_g = p_g * r; let rbar = r_g.to_bytes(); // S = r + H*(Rbar || M) . sk let mut s = h_star(&rbar[..], msg); s.mul_assign(&self.0); s.add_assign(&r); let mut sbar = [0u8; 32]; write_scalar::<&mut [u8]>(&s, &mut sbar[..]) .expect("Jubjub scalars should serialize to 32 bytes"); Signature { rbar, sbar } } } impl PublicKey { pub fn from_private(privkey: &PrivateKey, p_g: SubgroupPoint) -> Self { PublicKey((p_g * privkey.0).into()) } pub fn randomize(&self, alpha: jubjub::Fr, p_g: SubgroupPoint) -> Self { PublicKey(ExtendedPoint::from(p_g * alpha) + self.0) } pub fn read<R: Read>(mut reader: R) -> io::Result<Self> { let mut bytes = [0u8; 32]; reader.read_exact(&mut bytes)?; let p = ExtendedPoint::from_bytes(&bytes).map(PublicKey); if p.is_some().into() { Ok(p.unwrap()) } else { Err(io::Error::new( io::ErrorKind::InvalidInput, "invalid RedJubjub public key", )) } } pub fn write<W: Write>(&self, mut writer: W) -> io::Result<()> { writer.write_all(&self.0.to_bytes()) } pub fn verify(&self, msg: &[u8], sig: &Signature, p_g: SubgroupPoint) -> bool { // c = H*(Rbar || M) let c = h_star(&sig.rbar[..], msg); // Signature checks: // R != invalid let r = { let r = ExtendedPoint::from_bytes(&sig.rbar); if r.is_none().into() { return false; } r.unwrap() }; // S < order(G) // (jubjub::Scalar guarantees its representation is in the field) let s = match read_scalar::<&[u8]>(&sig.sbar[..]) { Ok(s) => s, Err(_) => return false, }; // 0 = h_G(-S . P_G + R + c . vk) ((self.0 * c) + r - (p_g * s)) .mul_by_cofactor() .is_identity() .into() } } pub struct BatchEntry<'a> { vk: PublicKey, msg: &'a [u8], sig: Signature, } // TODO: #82: This is a naive implementation currently, // and doesn't use multiexp. pub fn batch_verify<'a, R: RngCore>( mut rng: &mut R, batch: &[BatchEntry<'a>], p_g: SubgroupPoint, ) -> bool { let mut acc = ExtendedPoint::identity(); for entry in batch { let mut r = { let r = ExtendedPoint::from_bytes(&entry.sig.rbar); if r.is_none().into() { return false; } r.unwrap() }; let mut s = match read_scalar::<&[u8]>(&entry.sig.sbar[..]) { Ok(s) => s, Err(_) => return false, }; let mut c = h_star(&entry.sig.rbar[..], entry.msg); let z = jubjub::Fr::random(&mut rng); s.mul_assign(&z); s = s.neg(); r = r * z; c.mul_assign(&z); acc = acc + r + (&entry.vk.0 * c) + (p_g * s); } acc = acc.mul_by_cofactor().into(); acc.is_identity().into() } #[cfg(test)] mod tests { use group::Group; use rand_core::SeedableRng; use rand_xorshift::XorShiftRng; use super::*; use crate::constants::SPENDING_KEY_GENERATOR; #[test] fn test_batch_verify() { let mut rng = XorShiftRng::from_seed([ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5, ]); let p_g = SPENDING_KEY_GENERATOR; let sk1 = PrivateKey(jubjub::Fr::random(&mut rng)); let vk1 = PublicKey::from_private(&sk1, p_g); let msg1 = b"Foo bar"; let sig1 = sk1.sign(msg1, &mut rng, p_g); assert!(vk1.verify(msg1, &sig1, p_g)); let sk2 = PrivateKey(jubjub::Fr::random(&mut rng)); let vk2 = PublicKey::from_private(&sk2, p_g); let msg2 = b"Foo bar"; let sig2 = sk2.sign(msg2, &mut rng, p_g); assert!(vk2.verify(msg2, &sig2, p_g)); let mut batch = vec![ BatchEntry { vk: vk1, msg: msg1, sig: sig1, }, BatchEntry { vk: vk2, msg: msg2, sig: sig2, }, ]; assert!(batch_verify(&mut rng, &batch, p_g)); batch[0].sig = sig2; assert!(!batch_verify(&mut rng, &batch, p_g)); } #[test] fn cofactor_check() { let mut rng = XorShiftRng::from_seed([ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5, ]); let zero = jubjub::ExtendedPoint::identity(); let p_g = SPENDING_KEY_GENERATOR; let jubjub_modulus_bytes = [ 0xb7, 0x2c, 0xf7, 0xd6, 0x5e, 0x0e, 0x97, 0xd0, 0x82, 0x10, 0xc8, 0xcc, 0x93, 0x20, 0x68, 0xa6, 0x00, 0x3b, 0x34, 0x01, 0x01, 0x3b, 0x67, 0x06, 0xa9, 0xaf, 0x33, 0x65, 0xea, 0xb4, 0x7d, 0x0e, ]; // Get a point of order 8 let p8 = loop { let r = jubjub::ExtendedPoint::random(&mut rng) .to_niels() .multiply_bits(&jubjub_modulus_bytes); let r2 = r.double(); let r4 = r2.double(); let r8 = r4.double(); if r2 != zero && r4 != zero && r8 == zero { break r; } }; let sk = PrivateKey(jubjub::Fr::random(&mut rng)); let vk = PublicKey::from_private(&sk, p_g); // TODO: This test will need to change when #77 is fixed let msg = b"Foo bar"; let sig = sk.sign(msg, &mut rng, p_g); assert!(vk.verify(msg, &sig, p_g)); let vktorsion = PublicKey(vk.0 + p8); assert!(vktorsion.verify(msg, &sig, p_g)); } #[test] fn round_trip_serialization() { let mut rng = XorShiftRng::from_seed([ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5, ]); let p_g = SPENDING_KEY_GENERATOR; for _ in 0..1000 { let sk = PrivateKey(jubjub::Fr::random(&mut rng)); let vk = PublicKey::from_private(&sk, p_g); let msg = b"Foo bar"; let sig = sk.sign(msg, &mut rng, p_g); let mut sk_bytes = [0u8; 32]; let mut vk_bytes = [0u8; 32]; let mut sig_bytes = [0u8; 64]; sk.write(&mut sk_bytes[..]).unwrap(); vk.write(&mut vk_bytes[..]).unwrap(); sig.write(&mut sig_bytes[..]).unwrap(); let sk_2 = PrivateKey::read(&sk_bytes[..]).unwrap(); let vk_2 = PublicKey::from_private(&sk_2, p_g); let mut vk_2_bytes = [0u8; 32]; vk_2.write(&mut vk_2_bytes[..]).unwrap(); assert!(vk_bytes == vk_2_bytes); let vk_2 = PublicKey::read(&vk_bytes[..]).unwrap(); let sig_2 = Signature::read(&sig_bytes[..]).unwrap(); assert!(vk.verify(msg, &sig_2, p_g)); assert!(vk_2.verify(msg, &sig, p_g)); assert!(vk_2.verify(msg, &sig_2, p_g)); } } #[test] fn random_signatures() { let mut rng = XorShiftRng::from_seed([ 0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5, ]); let p_g = SPENDING_KEY_GENERATOR; for _ in 0..1000 { let sk = PrivateKey(jubjub::Fr::random(&mut rng)); let vk = PublicKey::from_private(&sk, p_g); let msg1 = b"Foo bar"; let msg2 = b"Spam eggs"; let sig1 = sk.sign(msg1, &mut rng, p_g); let sig2 = sk.sign(msg2, &mut rng, p_g); assert!(vk.verify(msg1, &sig1, p_g)); assert!(vk.verify(msg2, &sig2, p_g)); assert!(!vk.verify(msg1, &sig2, p_g)); assert!(!vk.verify(msg2, &sig1, p_g)); let alpha = jubjub::Fr::random(&mut rng); let rsk = sk.randomize(alpha); let rvk = vk.randomize(alpha, p_g); let sig1 = rsk.sign(msg1, &mut rng, p_g); let sig2 = rsk.sign(msg2, &mut rng, p_g); assert!(rvk.verify(msg1, &sig1, p_g)); assert!(rvk.verify(msg2, &sig2, p_g)); assert!(!rvk.verify(msg1, &sig2, p_g)); assert!(!rvk.verify(msg2, &sig1, p_g)); } } }
29.70442
95
0.515763
e2e4d4a010013dfd571bb7d5aa87d68fb70ceb5d
140
//! Mun Test //! //! Mun Test contains shared functionality for testing Mun crates. #![warn(missing_docs)] pub use driver::*; mod driver;
15.555556
66
0.692857
26ffef9ebe44bafd628d6580ab5cef9b42052f6d
2,328
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! lint on if expressions with an else if, but without a final else branch use crate::rustc::lint::{EarlyContext, EarlyLintPass, LintArray, LintPass, in_external_macro, LintContext}; use crate::rustc::{declare_tool_lint, lint_array}; use crate::syntax::ast::*; use crate::utils::span_lint_and_sugg; /// **What it does:** Checks for usage of if expressions with an `else if` branch, /// but without a final `else` branch. /// /// **Why is this bad?** Some coding guidelines require this (e.g. MISRA-C:2004 Rule 14.10). /// /// **Known problems:** None. /// /// **Example:** /// ```rust /// if x.is_positive() { /// a(); /// } else if x.is_negative() { /// b(); /// } /// ``` /// /// Could be written: /// /// ```rust /// if x.is_positive() { /// a(); /// } else if x.is_negative() { /// b(); /// } else { /// // we don't care about zero /// } /// ``` declare_clippy_lint! { pub ELSE_IF_WITHOUT_ELSE, restriction, "if expression with an `else if`, but without a final `else` branch" } #[derive(Copy, Clone)] pub struct ElseIfWithoutElse; impl LintPass for ElseIfWithoutElse { fn get_lints(&self) -> LintArray { lint_array!(ELSE_IF_WITHOUT_ELSE) } } impl EarlyLintPass for ElseIfWithoutElse { fn check_expr(&mut self, cx: &EarlyContext<'_>, mut item: &Expr) { if in_external_macro(cx.sess(), item.span) { return; } while let ExprKind::If(_, _, Some(ref els)) = item.node { if let ExprKind::If(_, _, None) = els.node { span_lint_and_sugg( cx, ELSE_IF_WITHOUT_ELSE, els.span, "if expression with an `else if`, but without a final `else`", "add an `else` block here", String::new() ); } item = els; } } }
28.048193
107
0.592784
0e88ad68d75d9dbd9935b9ffd7f60d7721a9ad7a
3,837
//! Actix web Diesel integration example //! //! Diesel does not support tokio, so we have to run it in separate threads using the web::block //! function which offloads blocking code (like Diesel's) in order to not block the server's thread. #[macro_use] extern crate diesel; use actix_web::{get, web, App, Error, HttpResponse, HttpServer}; //, post , middleware use diesel::pg::PgConnection; use diesel::r2d2::{self, ConnectionManager}; // use uuid::Uuid; mod actions; mod models; mod schema; type DbPool = r2d2::Pool<ConnectionManager<PgConnection>>; /// Finds retailer by UID. #[get("/retailer/{r_id}")] async fn get_retailer( pool: web::Data<DbPool>, r_id: web::Path<i32>, ) -> Result<HttpResponse, Error> { let r_id = r_id.into_inner(); let conn = pool.get().expect("couldn't get db connection from pool"); // use web::block to offload blocking Diesel code without blocking server thread let retailer = web::block(move || actions::find_retailer_by_id(r_id, &conn)) .await .map_err(|e| { eprintln!("{}", e); HttpResponse::InternalServerError().finish() })?; if let Some(retailer) = retailer { Ok(HttpResponse::Ok().json(retailer)) } else { let res = HttpResponse::NotFound() .body(format!("No reatailer found with uid: {}", r_id)); Ok(res) } } /// Get ALL retailers (100 in the test DB) #[get("/retailers")] async fn get_retailers( pool: web::Data<DbPool> ) -> Result<HttpResponse, Error> { let conn = pool.get().expect("couldn't get db connection from pool"); // use web::block to offload blocking Diesel code without blocking server thread let retailers = web::block(move || actions::get_all_retailers(&conn)) .await .map_err(|e| { eprintln!("{}", e); HttpResponse::InternalServerError().finish() })?; if let Some(retailer) = retailers { Ok(HttpResponse::Ok().json(retailer)) } else { let res = HttpResponse::NotFound() .body(format!("No retailers found.")); Ok(res) } } #[get("/")] async fn get_status() -> Result<HttpResponse, Error> { Ok(HttpResponse::Ok().json(models::StatusMsg { status: 200, msg: String::from("Ok"), })) } /// Inserts new user with name defined in form. // #[post("/user")] // async fn add_user( // pool: web::Data<DbPool>, // form: web::Json<models::NewUser>, // ) -> Result<HttpResponse, Error> { // let conn = pool.get().expect("couldn't get db connection from pool"); // // use web::block to offload blocking Diesel code without blocking server thread // let user = web::block(move || actions::insert_new_user(&form.name, &conn)) // .await // .map_err(|e| { // eprintln!("{}", e); // HttpResponse::InternalServerError().finish() // })?; // Ok(HttpResponse::Ok().json(user)) // } #[actix_rt::main] async fn main() -> std::io::Result<()> { // std::env::set_var("RUST_LOG", "actix_web=info"); // env_logger::init(); dotenv::dotenv().ok(); // set up database connection pool let connspec = std::env::var("DATABASE_URL").expect("DATABASE_URL"); let manager = ConnectionManager::<PgConnection>::new(connspec); let pool = r2d2::Pool::builder() .build(manager) .expect("Failed to create pool."); let bind = "127.0.0.1:8000"; println!("Starting server at:: {}", &bind); // Start HTTP server HttpServer::new(move || { App::new() // set up DB pool to be used with web::Data<Pool> extractor .data(pool.clone()) // .wrap(middleware::Logger::default()) .service(get_retailers) .service(get_retailer) .service(get_status) }) .bind(&bind)? .run() .await }
29.290076
100
0.607506
112497a9575ef3ddf67305c499800180fd9723dc
11,933
use itertools::Itertools; use rustc_hash::FxHashMap as HashMap; use std::fmt; use fp::prime::ValidPrime; use fp::vector::{FpVector, SliceMut}; use once::OnceVec; use crate::algebra::combinatorics::TruncatedPolynomialMonomialBasis; use crate::algebra::Algebra; #[derive(Clone, Debug, Eq, PartialEq)] pub struct PolynomialAlgebraMonomial { pub degree: i32, pub poly: FpVector, pub ext: FpVector, pub valid: bool, } impl fmt::Display for PolynomialAlgebraMonomial { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "UAM(degree={}, valid={}, poly={}, ext={})", self.degree, self.valid, self.poly, self.ext )?; Ok(()) } } impl PolynomialAlgebraMonomial { pub fn new(p: ValidPrime) -> Self { Self { degree: 0xFEDCBA9, // Looks invalid to me! poly: FpVector::new(p, 0), ext: FpVector::new(ValidPrime::new(2), 0), valid: true, } } } #[derive(Default)] pub struct PolynomialAlgebraTableEntry { pub index_to_monomial: Vec<PolynomialAlgebraMonomial>, // degree -> index -> AdemBasisElement pub monomial_to_index: HashMap<PolynomialAlgebraMonomial, usize>, // degree -> AdemBasisElement -> index } impl PolynomialAlgebraTableEntry { pub fn new() -> Self { Self::default() } } #[allow(clippy::derive_hash_xor_eq)] impl std::hash::Hash for PolynomialAlgebraMonomial { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.poly.hash(state); self.ext.hash(state); } } pub trait PolynomialAlgebra: std::fmt::Display + Sized + Send + Sync { fn prime(&self) -> ValidPrime; fn polynomial_monomials(&self) -> &TruncatedPolynomialMonomialBasis; fn exterior_monomials(&self) -> &TruncatedPolynomialMonomialBasis; fn min_degree(&self) -> i32 { 0 } fn polynomial_generators_in_degree(&self, degree: i32) -> usize; fn exterior_generators_in_degree(&self, degree: i32) -> usize; fn repr_poly_generator(&self, degree: i32, _index: usize) -> (String, u32); fn repr_ext_generator(&self, degree: i32, _index: usize) -> String; fn basis_table(&self) -> &OnceVec<PolynomialAlgebraTableEntry>; fn frobenius_on_generator(&self, degree: i32, index: usize) -> Option<usize>; fn compute_generating_set(&self, degree: i32); fn compute_basis_step(&self, degree: i32) { assert!(degree as usize == self.basis_table().len()); let num_poly_gens = self.polynomial_generators_in_degree(degree); let num_ext_gens = self.exterior_generators_in_degree(degree); let poly_parts = self.polynomial_monomials(); let ext_parts = self.exterior_monomials(); if degree > 0 { poly_parts.add_gens_and_calculate_parts(degree, num_poly_gens); ext_parts.add_gens_and_calculate_parts(degree, num_ext_gens); } let mut table = PolynomialAlgebraTableEntry::new(); for poly_deg in 0..=degree { let ext_deg = degree - poly_deg; for p in poly_parts.parts(poly_deg) { for e in ext_parts.parts(ext_deg) { let index = table.index_to_monomial.len(); let mut m = PolynomialAlgebraMonomial { degree, poly: p.clone(), ext: e.clone(), valid: true, }; self.set_monomial_degree(&mut m, degree); table.monomial_to_index.insert(m.clone(), index); table.index_to_monomial.push(m); } } } self.basis_table().push(table); } fn monomial_to_index(&self, monomial: &PolynomialAlgebraMonomial) -> usize { self.basis_table()[monomial.degree as usize] .monomial_to_index .get(monomial) .copied() .unwrap_or_else(|| panic!("Didn't find monomial: {}", monomial)) } fn index_to_monomial(&self, degree: i32, index: usize) -> &PolynomialAlgebraMonomial { &self.basis_table()[degree as usize].index_to_monomial[index] } fn frobenius_monomial(&self, target: &mut FpVector, source: &FpVector) { let p = *self.prime() as i32; for (i, c) in source.iter_nonzero() { let (gen_degree, gen_index) = self.polynomial_monomials().internal_idx_to_gen_deg(i); let frob = self.frobenius_on_generator(gen_degree, gen_index); if let Some(e) = frob { let out_idx = self .polynomial_monomials() .gen_deg_idx_to_internal_idx(p * gen_degree, e); target.add_basis_element(out_idx, c); } } } fn multiply_monomials( &self, target: &mut PolynomialAlgebraMonomial, source: &PolynomialAlgebraMonomial, ) -> Option<u32> { let minus_one = *self.prime() - 1; self.set_monomial_degree(target, target.degree + source.degree); let mut temp_source_ext = source.ext.clone(); temp_source_ext.set_scratch_vector_size(target.ext.len()); // If we made sign_rule handle vectors of different lengths, we could avoid cloning ext here. let coeff = if target.ext.sign_rule(&temp_source_ext) { minus_one } else { 1 }; target.ext.add_truncate(&temp_source_ext, 1)?; let mut carry_vec = [FpVector::new(self.prime(), target.poly.len())]; let mut source_vec = source.poly.clone(); source_vec.set_scratch_vector_size(target.poly.len()); let mut carry_q = true; while carry_q { carry_q = target.poly.add_carry(&source_vec, 1, &mut carry_vec); if carry_q { source_vec.set_to_zero(); self.frobenius_monomial(&mut source_vec, &carry_vec[0]); carry_vec[0].set_to_zero(); } } Some(coeff) } fn multiply_polynomials( &self, target: &mut FpVector, coeff: u32, left_degree: i32, left: &FpVector, right_degree: i32, right: &FpVector, ) { let p = *self.prime(); target.set_scratch_vector_size(self.dimension(left_degree + right_degree)); for (left_idx, left_entry) in left.iter_nonzero() { for (right_idx, right_entry) in right.iter_nonzero() { let mut target_mono = self.index_to_monomial(left_degree, left_idx).clone(); let source_mono = self.index_to_monomial(right_degree, right_idx); let nonzero_result = self.multiply_monomials(&mut target_mono, source_mono); if let Some(c) = nonzero_result { let idx = self.monomial_to_index(&target_mono); target.add_basis_element(idx, (left_entry * right_entry * c * coeff) % p); } } } } fn multiply_polynomial_by_monomial( &self, target: &mut FpVector, coeff: u32, left_degree: i32, left: &FpVector, right_mono: &PolynomialAlgebraMonomial, ) { let p = *self.prime(); target.extend_len(self.dimension(left_degree + right_mono.degree)); for (left_idx, left_entry) in left.iter_nonzero() { let mut target_mono = self.index_to_monomial(left_degree, left_idx).clone(); // Could reduce cloning a bit but probably best not to worry. let nonzero_result = self.multiply_monomials(&mut target_mono, right_mono); if let Some(c) = nonzero_result { let idx = self.monomial_to_index(&target_mono); target.add_basis_element(idx, (left_entry * c * coeff) % p); } } } // At p=2 this is redundant but at odd primes one must worry about signs. fn multiply_monomial_by_polynomial( &self, target: &mut FpVector, coeff: u32, left_mono: &PolynomialAlgebraMonomial, right_degree: i32, right: &FpVector, ) { let p = *self.prime(); target.extend_len(self.dimension(right_degree + left_mono.degree)); for (right_idx, right_entry) in right.iter_nonzero() { let mut target_mono = left_mono.clone(); // Could reduce cloning a bit but probably best not to worry. let right_mono = self.index_to_monomial(right_degree, right_idx); let nonzero_result = self.multiply_monomials(&mut target_mono, right_mono); if let Some(c) = nonzero_result { let idx = self.monomial_to_index(&target_mono); target.add_basis_element(idx, (right_entry * c * coeff) % p); } } } fn set_monomial_degree(&self, mono: &mut PolynomialAlgebraMonomial, degree: i32) { mono.degree = degree; mono.ext.set_scratch_vector_size( self.exterior_monomials() .generators_up_to_degree(mono.degree), ); mono.poly.set_scratch_vector_size( self.polynomial_monomials() .generators_up_to_degree(mono.degree), ); } fn max_computed_degree(&self) -> i32 { self.basis_table().len() as i32 - 1 } } impl<A: PolynomialAlgebra> Algebra for A { fn prime(&self) -> ValidPrime { self.prime() } fn compute_basis(&self, degree: i32) { self.compute_generating_set(degree); for i in self.max_computed_degree() + 1..=degree { self.compute_basis_step(i); } } fn dimension(&self, degree: i32) -> usize { if degree < 0 { 0 } else { self.basis_table()[degree as usize].index_to_monomial.len() } } fn basis_element_to_string(&self, degree: i32, index: usize) -> String { let mono = self.index_to_monomial(degree, index); let mut exp_map = HashMap::default(); for (i, e) in mono.poly.iter_nonzero() { let (gen_deg, gen_idx) = self.polynomial_monomials().internal_idx_to_gen_deg(i); let (var, var_exp) = self.repr_poly_generator(gen_deg, gen_idx); let entry = exp_map.entry(var).or_insert((0, gen_deg / var_exp as i32)); entry.0 += (e * var_exp) as i32; } let result = exp_map .iter() .sorted_by_key(|(_, &(_, gen_deg))| gen_deg) .map(|(var, &(var_exp, gen_deg))| { let pow = if var_exp > 1 { format!("^{{{}}}", var_exp) } else { "".to_string() }; let s = format!("{}{}", var, pow); (s, gen_deg) }) .merge_by( mono.ext.iter_nonzero().map(|(i, _)| { let (gen_deg, gen_idx) = self.exterior_monomials().internal_idx_to_gen_deg(i); let var = self.repr_ext_generator(gen_deg, gen_idx); (var, gen_deg) }), |x, y| x.1 < y.1, ) .map(|(v, _gen_deg)| v) .join(" "); if result.is_empty() { "1".to_string() } else { result } } fn multiply_basis_elements( &self, mut result: SliceMut, coeff: u32, left_degree: i32, left_idx: usize, right_degree: i32, right_idx: usize, ) { if coeff == 0 { return; } let mut target = self.index_to_monomial(left_degree, left_idx).clone(); let source = self.index_to_monomial(right_degree, right_idx); if self.multiply_monomials(&mut target, source).is_some() { let idx = self.monomial_to_index(&target); result.add_basis_element(idx, coeff); } } }
35.942771
150
0.579318
1d5d5930731179a07486822ebdc62230c27dac80
450
fn main() { [1; <Multiply<Five, Five>>::VAL]; //~^ ERROR: constant expression depends on a generic parameter } trait TypeVal<T> { const VAL: T; } struct Five; struct Multiply<N, M> { _n: PhantomData, //~ ERROR cannot find type `PhantomData` in this scope } impl<N, M> TypeVal<usize> for Multiply<N, M> where N: TypeVal<VAL> {} //~^ ERROR cannot find type `VAL` in this scope //~| ERROR not all trait items implemented, missing: `VAL`
30
75
0.664444
89af327ba30834369fa2803f996f831b67295c0b
2,246
use super::super::parameters::alternative_id::AlternativeID; use super::super::parameters::any::Any; use super::super::parameters::media_type::MediaType; use super::super::parameters::preference::Preference; use super::super::parameters::property_id::PropertyID; use super::super::parameters::typ::Type; use super::super::parameters::Parameter; use super::super::values::uri::URI; use super::super::values::Value; use super::super::Set; use super::*; use std::fmt::{self, Display, Formatter, Write}; use validators::{Validated, ValidatedWrapper}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct CalendarAddressURI { pub typ: Option<Type>, pub media_type: Option<MediaType>, pub property_id: Option<PropertyID>, pub preference: Option<Preference>, pub alternative_id: Option<AlternativeID>, pub any: Option<Set<Any>>, pub value: URI, } impl CalendarAddressURI { pub fn from_uri(uri: URI) -> CalendarAddressURI { CalendarAddressURI { typ: None, media_type: None, property_id: None, preference: None, alternative_id: None, any: None, value: uri, } } } impl Property for CalendarAddressURI { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { f.write_str("CALADRURI")?; macro_rules! fmt { ($c:tt, $p:ident) => { fmt_g!($c, Parameter, self, $p, f); }; } fmt!(0, typ); fmt!(0, media_type); fmt!(0, property_id); fmt!(0, preference); fmt!(0, alternative_id); fmt!(2, any); f.write_char(':')?; Value::fmt(&self.value, f)?; f.write_str("\r\n")?; Ok(()) } } impl Display for CalendarAddressURI { fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> { Property::fmt(self, f) } } impl Validated for CalendarAddressURI {} impl ValidatedWrapper for CalendarAddressURI { type Error = &'static str; fn from_string(_from_string_input: String) -> Result<Self, Self::Error> { unimplemented!(); } fn from_str(_from_str_input: &str) -> Result<Self, Self::Error> { unimplemented!(); } }
25.235955
77
0.604185
bf7c1c90974757a27d1c3b9273ea02e9a98a2cbe
56,062
//! Rusqlite is an ergonomic wrapper for using SQLite from Rust. It attempts to //! expose an interface similar to [rust-postgres](https://github.com/sfackler/rust-postgres). //! //! ```rust //! use rusqlite::{params, Connection, Result}; //! use time::Timespec; //! //! #[derive(Debug)] //! struct Person { //! id: i32, //! name: String, //! time_created: Timespec, //! data: Option<Vec<u8>>, //! } //! //! fn main() -> Result<()> { //! let conn = Connection::open_in_memory()?; //! //! conn.execute( //! "CREATE TABLE person ( //! id INTEGER PRIMARY KEY, //! name TEXT NOT NULL, //! time_created TEXT NOT NULL, //! data BLOB //! )", //! params![], //! )?; //! let me = Person { //! id: 0, //! name: "Steven".to_string(), //! time_created: time::get_time(), //! data: None, //! }; //! conn.execute( //! "INSERT INTO person (name, time_created, data) //! VALUES (?1, ?2, ?3)", //! params![me.name, me.time_created, me.data], //! )?; //! //! let mut stmt = conn.prepare("SELECT id, name, time_created, data FROM person")?; //! let person_iter = stmt.query_map(params![], |row| { //! Ok(Person { //! id: row.get(0)?, //! name: row.get(1)?, //! time_created: row.get(2)?, //! data: row.get(3)?, //! }) //! })?; //! //! for person in person_iter { //! println!("Found person {:?}", person.unwrap()); //! } //! Ok(()) //! } //! ``` #![allow(unknown_lints)] pub use libsqlite3_sys as ffi; use std::cell::RefCell; use std::convert; use std::default::Default; use std::ffi::{CStr, CString}; use std::fmt; use std::os::raw::{c_char, c_int}; use std::path::{Path, PathBuf}; use std::result; use std::str; use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use crate::cache::StatementCache; use crate::inner_connection::{InnerConnection, BYPASS_SQLITE_INIT}; use crate::raw_statement::RawStatement; use crate::types::ValueRef; pub use crate::cache::CachedStatement; pub use crate::column::Column; pub use crate::error::Error; pub use crate::ffi::ErrorCode; #[cfg(feature = "hooks")] pub use crate::hooks::Action; #[cfg(feature = "load_extension")] pub use crate::load_extension_guard::LoadExtensionGuard; pub use crate::row::{AndThenRows, MappedRows, Row, RowIndex, Rows}; pub use crate::statement::{Statement, StatementStatus}; pub use crate::transaction::{DropBehavior, Savepoint, Transaction, TransactionBehavior}; pub use crate::types::ToSql; pub use crate::version::*; #[macro_use] mod error; #[cfg(feature = "backup")] pub mod backup; #[cfg(feature = "blob")] pub mod blob; mod busy; mod cache; #[cfg(feature = "collation")] mod collation; mod column; pub mod config; #[cfg(any(feature = "functions", feature = "vtab"))] mod context; #[cfg(feature = "functions")] pub mod functions; #[cfg(feature = "hooks")] mod hooks; mod inner_connection; #[cfg(feature = "limits")] pub mod limits; #[cfg(feature = "load_extension")] mod load_extension_guard; mod pragma; mod raw_statement; mod row; #[cfg(feature = "session")] pub mod session; mod statement; #[cfg(feature = "trace")] pub mod trace; mod transaction; pub mod types; mod unlock_notify; mod version; #[cfg(feature = "vtab")] pub mod vtab; // Number of cached prepared statements we'll hold on to. const STATEMENT_CACHE_DEFAULT_CAPACITY: usize = 16; /// To be used when your statement has no [parameter](https://sqlite.org/lang_expr.html#varparam). pub const NO_PARAMS: &[&dyn ToSql] = &[]; /// A macro making it more convenient to pass heterogeneous lists /// of parameters as a `&[&dyn ToSql]`. /// /// # Example /// /// ```rust,no_run /// # use rusqlite::{Result, Connection, params}; /// /// struct Person { /// name: String, /// age_in_years: u8, /// data: Option<Vec<u8>>, /// } /// /// fn add_person(conn: &Connection, person: &Person) -> Result<()> { /// conn.execute("INSERT INTO person (name, age_in_years, data) /// VALUES (?1, ?2, ?3)", /// params![person.name, person.age_in_years, person.data])?; /// Ok(()) /// } /// ``` #[macro_export] macro_rules! params { () => { $crate::NO_PARAMS }; ($($param:expr),+ $(,)?) => { &[$(&$param as &dyn $crate::ToSql),+] }; } /// A macro making it more convenient to pass lists of named parameters /// as a `&[(&str, &dyn ToSql)]`. /// /// # Example /// /// ```rust,no_run /// # use rusqlite::{Result, Connection, named_params}; /// /// struct Person { /// name: String, /// age_in_years: u8, /// data: Option<Vec<u8>>, /// } /// /// fn add_person(conn: &Connection, person: &Person) -> Result<()> { /// conn.execute_named( /// "INSERT INTO person (name, age_in_years, data) /// VALUES (:name, :age, :data)", /// named_params!{ /// ":name": person.name, /// ":age": person.age_in_years, /// ":data": person.data, /// } /// )?; /// Ok(()) /// } /// ``` #[macro_export] macro_rules! named_params { () => { &[] }; // Note: It's a lot more work to support this as part of the same macro as // `params!`, unfortunately. ($($param_name:literal: $param_val:expr),+ $(,)?) => { &[$(($param_name, &$param_val as &dyn $crate::ToSql)),+] }; } /// A typedef of the result returned by many methods. pub type Result<T> = result::Result<T, Error>; /// See the [method documentation](#tymethod.optional). pub trait OptionalExtension<T> { /// Converts a `Result<T>` into a `Result<Option<T>>`. /// /// By default, Rusqlite treats 0 rows being returned from a query that is /// expected to return 1 row as an error. This method will /// handle that error, and give you back an `Option<T>` instead. fn optional(self) -> Result<Option<T>>; } impl<T> OptionalExtension<T> for Result<T> { fn optional(self) -> Result<Option<T>> { match self { Ok(value) => Ok(Some(value)), Err(Error::QueryReturnedNoRows) => Ok(None), Err(e) => Err(e), } } } unsafe fn errmsg_to_string(errmsg: *const c_char) -> String { let c_slice = CStr::from_ptr(errmsg).to_bytes(); String::from_utf8_lossy(c_slice).into_owned() } fn str_to_cstring(s: &str) -> Result<CString> { Ok(CString::new(s)?) } /// Returns `Ok((string ptr, len as c_int, SQLITE_STATIC | SQLITE_TRANSIENT))` /// normally. /// Returns errors if the string has embedded nuls or is too large for sqlite. /// The `sqlite3_destructor_type` item is always `SQLITE_TRANSIENT` unless /// the string was empty (in which case it's `SQLITE_STATIC`, and the ptr is /// static). fn str_for_sqlite(s: &[u8]) -> Result<(*const c_char, c_int, ffi::sqlite3_destructor_type)> { let len = len_as_c_int(s.len())?; if memchr::memchr(0, s).is_none() { let (ptr, dtor_info) = if len != 0 { (s.as_ptr() as *const c_char, ffi::SQLITE_TRANSIENT()) } else { // Return a pointer guaranteed to live forever ("".as_ptr() as *const c_char, ffi::SQLITE_STATIC()) }; Ok((ptr, len, dtor_info)) } else { // There's an embedded nul, so we fabricate a NulError. let e = CString::new(s); Err(Error::NulError(e.unwrap_err())) } } // Helper to cast to c_int safely, returning the correct error type if the cast // failed. fn len_as_c_int(len: usize) -> Result<c_int> { if len >= (c_int::max_value() as usize) { Err(Error::SqliteFailure( ffi::Error::new(ffi::SQLITE_TOOBIG), None, )) } else { Ok(len as c_int) } } fn path_to_cstring(p: &Path) -> Result<CString> { let s = p.to_str().ok_or_else(|| Error::InvalidPath(p.to_owned()))?; str_to_cstring(s) } /// Name for a database within a SQLite connection. #[derive(Copy, Clone)] pub enum DatabaseName<'a> { /// The main database. Main, /// The temporary database (e.g., any "CREATE TEMPORARY TABLE" tables). Temp, /// A database that has been attached via "ATTACH DATABASE ...". Attached(&'a str), } // Currently DatabaseName is only used by the backup and blob mods, so hide // this (private) impl to avoid dead code warnings. #[cfg(any( feature = "backup", feature = "blob", feature = "session", feature = "bundled" ))] impl DatabaseName<'_> { fn to_cstring(&self) -> Result<CString> { use self::DatabaseName::{Attached, Main, Temp}; match *self { Main => str_to_cstring("main"), Temp => str_to_cstring("temp"), Attached(s) => str_to_cstring(s), } } } /// A connection to a SQLite database. pub struct Connection { db: RefCell<InnerConnection>, cache: StatementCache, path: Option<PathBuf>, } unsafe impl Send for Connection {} impl Drop for Connection { fn drop(&mut self) { self.flush_prepared_statement_cache(); } } impl Connection { /// Open a new connection to a SQLite database. /// /// `Connection::open(path)` is equivalent to /// `Connection::open_with_flags(path, /// OpenFlags::SQLITE_OPEN_READ_WRITE | /// OpenFlags::SQLITE_OPEN_CREATE)`. /// /// ```rust,no_run /// # use rusqlite::{Connection, Result}; /// fn open_my_db() -> Result<()> { /// let path = "./my_db.db3"; /// let db = Connection::open(&path)?; /// println!("{}", db.is_autocommit()); /// Ok(()) /// } /// ``` /// /// # Failure /// /// Will return `Err` if `path` cannot be converted to a C-compatible /// string or if the underlying SQLite open call fails. pub fn open<P: AsRef<Path>>(path: P) -> Result<Connection> { let flags = OpenFlags::default(); Connection::open_with_flags(path, flags) } /// Open a new connection to an in-memory SQLite database. /// /// # Failure /// /// Will return `Err` if the underlying SQLite open call fails. pub fn open_in_memory() -> Result<Connection> { let flags = OpenFlags::default(); Connection::open_in_memory_with_flags(flags) } /// Open a new connection to a SQLite database. /// /// [Database Connection](http://www.sqlite.org/c3ref/open.html) for a description of valid /// flag combinations. /// /// # Failure /// /// Will return `Err` if `path` cannot be converted to a C-compatible /// string or if the underlying SQLite open call fails. pub fn open_with_flags<P: AsRef<Path>>(path: P, flags: OpenFlags) -> Result<Connection> { let c_path = path_to_cstring(path.as_ref())?; InnerConnection::open_with_flags(&c_path, flags).map(|db| Connection { db: RefCell::new(db), cache: StatementCache::with_capacity(STATEMENT_CACHE_DEFAULT_CAPACITY), path: Some(path.as_ref().to_path_buf()), }) } /// Open a new connection to an in-memory SQLite database. /// /// [Database Connection](http://www.sqlite.org/c3ref/open.html) for a description of valid /// flag combinations. /// /// # Failure /// /// Will return `Err` if the underlying SQLite open call fails. pub fn open_in_memory_with_flags(flags: OpenFlags) -> Result<Connection> { let c_memory = str_to_cstring(":memory:")?; InnerConnection::open_with_flags(&c_memory, flags).map(|db| Connection { db: RefCell::new(db), cache: StatementCache::with_capacity(STATEMENT_CACHE_DEFAULT_CAPACITY), path: None, }) } /// Convenience method to run multiple SQL statements (that cannot take any /// parameters). /// /// Uses [sqlite3_exec](http://www.sqlite.org/c3ref/exec.html) under the hood. /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Connection, Result}; /// fn create_tables(conn: &Connection) -> Result<()> { /// conn.execute_batch( /// "BEGIN; /// CREATE TABLE foo(x INTEGER); /// CREATE TABLE bar(y TEXT); /// COMMIT;", /// ) /// } /// ``` /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn execute_batch(&self, sql: &str) -> Result<()> { self.db.borrow_mut().execute_batch(sql) } /// Convenience method to prepare and execute a single SQL statement. /// /// On success, returns the number of rows that were changed or inserted or /// deleted (via `sqlite3_changes`). /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Connection}; /// fn update_rows(conn: &Connection) { /// match conn.execute("UPDATE foo SET bar = 'baz' WHERE qux = ?", &[1i32]) { /// Ok(updated) => println!("{} rows were updated", updated), /// Err(err) => println!("update failed: {}", err), /// } /// } /// ``` /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn execute<P>(&self, sql: &str, params: P) -> Result<usize> where P: IntoIterator, P::Item: ToSql, { self.prepare(sql) .and_then(|mut stmt| stmt.check_no_tail().and_then(|_| stmt.execute(params))) } /// Convenience method to prepare and execute a single SQL statement with /// named parameter(s). /// /// On success, returns the number of rows that were changed or inserted or /// deleted (via `sqlite3_changes`). /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Connection, Result}; /// fn insert(conn: &Connection) -> Result<usize> { /// conn.execute_named( /// "INSERT INTO test (name) VALUES (:name)", /// &[(":name", &"one")], /// ) /// } /// ``` /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn execute_named(&self, sql: &str, params: &[(&str, &dyn ToSql)]) -> Result<usize> { self.prepare(sql).and_then(|mut stmt| { stmt.check_no_tail() .and_then(|_| stmt.execute_named(params)) }) } /// Get the SQLite rowid of the most recent successful INSERT. /// /// Uses [sqlite3_last_insert_rowid](https://www.sqlite.org/c3ref/last_insert_rowid.html) under /// the hood. pub fn last_insert_rowid(&self) -> i64 { self.db.borrow_mut().last_insert_rowid() } /// Convenience method to execute a query that is expected to return a /// single row. /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Result,Connection, NO_PARAMS}; /// fn preferred_locale(conn: &Connection) -> Result<String> { /// conn.query_row( /// "SELECT value FROM preferences WHERE name='locale'", /// NO_PARAMS, /// |row| row.get(0), /// ) /// } /// ``` /// /// If the query returns more than one row, all rows except the first are /// ignored. /// /// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the /// query truly is optional, you can call `.optional()` on the result of /// this to get a `Result<Option<T>>`. /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn query_row<T, P, F>(&self, sql: &str, params: P, f: F) -> Result<T> where P: IntoIterator, P::Item: ToSql, F: FnOnce(&Row<'_>) -> Result<T>, { let mut stmt = self.prepare(sql)?; stmt.check_no_tail()?; stmt.query_row(params, f) } /// Convenience method to execute a query with named parameter(s) that is /// expected to return a single row. /// /// If the query returns more than one row, all rows except the first are /// ignored. /// /// Returns `Err(QueryReturnedNoRows)` if no results are returned. If the /// query truly is optional, you can call `.optional()` on the result of /// this to get a `Result<Option<T>>`. /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn query_row_named<T, F>(&self, sql: &str, params: &[(&str, &dyn ToSql)], f: F) -> Result<T> where F: FnOnce(&Row<'_>) -> Result<T>, { let mut stmt = self.prepare(sql)?; stmt.check_no_tail()?; stmt.query_row_named(params, f) } /// Convenience method to execute a query that is expected to return a /// single row, and execute a mapping via `f` on that returned row with /// the possibility of failure. The `Result` type of `f` must implement /// `std::convert::From<Error>`. /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Result,Connection, NO_PARAMS}; /// fn preferred_locale(conn: &Connection) -> Result<String> { /// conn.query_row_and_then( /// "SELECT value FROM preferences WHERE name='locale'", /// NO_PARAMS, /// |row| row.get(0), /// ) /// } /// ``` /// /// If the query returns more than one row, all rows except the first are /// ignored. /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn query_row_and_then<T, E, P, F>(&self, sql: &str, params: P, f: F) -> result::Result<T, E> where P: IntoIterator, P::Item: ToSql, F: FnOnce(&Row<'_>) -> result::Result<T, E>, E: convert::From<Error>, { let mut stmt = self.prepare(sql)?; stmt.check_no_tail()?; let mut rows = stmt.query(params)?; rows.get_expected_row().map_err(E::from).and_then(|r| f(&r)) } /// Prepare a SQL statement for execution. /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Connection, Result}; /// fn insert_new_people(conn: &Connection) -> Result<()> { /// let mut stmt = conn.prepare("INSERT INTO People (name) VALUES (?)")?; /// stmt.execute(&["Joe Smith"])?; /// stmt.execute(&["Bob Jones"])?; /// Ok(()) /// } /// ``` /// /// # Failure /// /// Will return `Err` if `sql` cannot be converted to a C-compatible string /// or if the underlying SQLite call fails. pub fn prepare(&self, sql: &str) -> Result<Statement<'_>> { self.db.borrow_mut().prepare(self, sql) } /// Close the SQLite connection. /// /// This is functionally equivalent to the `Drop` implementation for /// `Connection` except that on failure, it returns an error and the /// connection itself (presumably so closing can be attempted again). /// /// # Failure /// /// Will return `Err` if the underlying SQLite call fails. pub fn close(self) -> std::result::Result<(), (Connection, Error)> { self.flush_prepared_statement_cache(); let r = self.db.borrow_mut().close(); r.map_err(move |err| (self, err)) } /// Enable loading of SQLite extensions. Strongly consider using /// `LoadExtensionGuard` instead of this function. /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Connection, Result}; /// # use std::path::{Path}; /// fn load_my_extension(conn: &Connection) -> Result<()> { /// conn.load_extension_enable()?; /// conn.load_extension(Path::new("my_sqlite_extension"), None)?; /// conn.load_extension_disable() /// } /// ``` /// /// # Failure /// /// Will return `Err` if the underlying SQLite call fails. #[cfg(feature = "load_extension")] pub fn load_extension_enable(&self) -> Result<()> { self.db.borrow_mut().enable_load_extension(1) } /// Disable loading of SQLite extensions. /// /// See `load_extension_enable` for an example. /// /// # Failure /// /// Will return `Err` if the underlying SQLite call fails. #[cfg(feature = "load_extension")] pub fn load_extension_disable(&self) -> Result<()> { self.db.borrow_mut().enable_load_extension(0) } /// Load the SQLite extension at `dylib_path`. `dylib_path` is passed /// through to `sqlite3_load_extension`, which may attempt OS-specific /// modifications if the file cannot be loaded directly. /// /// If `entry_point` is `None`, SQLite will attempt to find the entry /// point. If it is not `None`, the entry point will be passed through /// to `sqlite3_load_extension`. /// /// ## Example /// /// ```rust,no_run /// # use rusqlite::{Connection, Result, LoadExtensionGuard}; /// # use std::path::{Path}; /// fn load_my_extension(conn: &Connection) -> Result<()> { /// let _guard = LoadExtensionGuard::new(conn)?; /// /// conn.load_extension("my_sqlite_extension", None) /// } /// ``` /// /// # Failure /// /// Will return `Err` if the underlying SQLite call fails. #[cfg(feature = "load_extension")] pub fn load_extension<P: AsRef<Path>>( &self, dylib_path: P, entry_point: Option<&str>, ) -> Result<()> { self.db .borrow_mut() .load_extension(dylib_path.as_ref(), entry_point) } /// Get access to the underlying SQLite database connection handle. /// /// # Warning /// /// You should not need to use this function. If you do need to, please /// [open an issue on the rusqlite repository](https://github.com/jgallagher/rusqlite/issues) and describe /// your use case. /// /// # Safety /// /// This function is unsafe because it gives you raw access /// to the SQLite connection, and what you do with it could impact the /// safety of this `Connection`. pub unsafe fn handle(&self) -> *mut ffi::sqlite3 { self.db.borrow().db() } /// Create a `Connection` from a raw handle. /// /// The underlying SQLite database connection handle will not be closed when /// the returned connection is dropped/closed. pub unsafe fn from_handle(db: *mut ffi::sqlite3) -> Result<Connection> { let db_path = db_filename(db); let db = InnerConnection::new(db, false); Ok(Connection { db: RefCell::new(db), cache: StatementCache::with_capacity(STATEMENT_CACHE_DEFAULT_CAPACITY), path: db_path, }) } /// Get access to a handle that can be used to interrupt long running /// queries from another thread. pub fn get_interrupt_handle(&self) -> InterruptHandle { self.db.borrow().get_interrupt_handle() } fn decode_result(&self, code: c_int) -> Result<()> { self.db.borrow_mut().decode_result(code) } /// Return the number of rows modified, inserted or deleted by the most /// recently completed INSERT, UPDATE or DELETE statement on the database /// connection. fn changes(&self) -> usize { self.db.borrow_mut().changes() } /// Test for auto-commit mode. /// Autocommit mode is on by default. pub fn is_autocommit(&self) -> bool { self.db.borrow().is_autocommit() } /// Determine if all associated prepared statements have been reset. #[cfg(feature = "bundled")] pub fn is_busy(&self) -> bool { self.db.borrow().is_busy() } } impl fmt::Debug for Connection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Connection") .field("path", &self.path) .finish() } } bitflags::bitflags! { #[doc = "Flags for opening SQLite database connections."] #[doc = "See [sqlite3_open_v2](http://www.sqlite.org/c3ref/open.html) for details."] #[repr(C)] pub struct OpenFlags: ::std::os::raw::c_int { const SQLITE_OPEN_READ_ONLY = ffi::SQLITE_OPEN_READONLY; const SQLITE_OPEN_READ_WRITE = ffi::SQLITE_OPEN_READWRITE; const SQLITE_OPEN_CREATE = ffi::SQLITE_OPEN_CREATE; const SQLITE_OPEN_URI = 0x0000_0040; const SQLITE_OPEN_MEMORY = 0x0000_0080; const SQLITE_OPEN_NO_MUTEX = ffi::SQLITE_OPEN_NOMUTEX; const SQLITE_OPEN_FULL_MUTEX = ffi::SQLITE_OPEN_FULLMUTEX; const SQLITE_OPEN_SHARED_CACHE = 0x0002_0000; const SQLITE_OPEN_PRIVATE_CACHE = 0x0004_0000; } } impl Default for OpenFlags { fn default() -> OpenFlags { OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE | OpenFlags::SQLITE_OPEN_NO_MUTEX | OpenFlags::SQLITE_OPEN_URI } } /// rusqlite's check for a safe SQLite threading mode requires SQLite 3.7.0 or /// later. If you are running against a SQLite older than that, rusqlite /// attempts to ensure safety by performing configuration and initialization of /// SQLite itself the first time you /// attempt to open a connection. By default, rusqlite panics if that /// initialization fails, since that could mean SQLite has been initialized in /// single-thread mode. /// /// If you are encountering that panic _and_ can ensure that SQLite has been /// initialized in either multi-thread or serialized mode, call this function /// prior to attempting to open a connection and rusqlite's initialization /// process will by skipped. /// /// # Safety /// /// This function is unsafe because if you call it and SQLite has actually been /// configured to run in single-thread mode, /// you may enounter memory errors or data corruption or any number of terrible /// things that should not be possible when you're using Rust. pub unsafe fn bypass_sqlite_initialization() { BYPASS_SQLITE_INIT.store(true, Ordering::Relaxed); } /// rusqlite performs a one-time check that the runtime SQLite version is at /// least as new as the version of SQLite found when rusqlite was built. /// Bypassing this check may be dangerous; e.g., if you use features of SQLite /// that are not present in the runtime version. /// /// # Safety /// /// If you are sure the runtime version is compatible with the /// build-time version for your usage, you can bypass the version check by /// calling this function before your first connection attempt. pub unsafe fn bypass_sqlite_version_check() { #[cfg(not(feature = "bundled"))] inner_connection::BYPASS_VERSION_CHECK.store(true, Ordering::Relaxed); } /// Allows interrupting a long-running computation. pub struct InterruptHandle { db_lock: Arc<Mutex<*mut ffi::sqlite3>>, } unsafe impl Send for InterruptHandle {} unsafe impl Sync for InterruptHandle {} impl InterruptHandle { /// Interrupt the query currently executing on another thread. This will /// cause that query to fail with a `SQLITE3_INTERRUPT` error. pub fn interrupt(&self) { let db_handle = self.db_lock.lock().unwrap(); if !db_handle.is_null() { unsafe { ffi::sqlite3_interrupt(*db_handle) } } } } #[cfg(feature = "bundled")] // 3.7.10 unsafe fn db_filename(db: *mut ffi::sqlite3) -> Option<PathBuf> { let db_name = DatabaseName::Main.to_cstring().unwrap(); let db_filename = ffi::sqlite3_db_filename(db, db_name.as_ptr()); if db_filename.is_null() { None } else { CStr::from_ptr(db_filename).to_str().ok().map(PathBuf::from) } } #[cfg(not(feature = "bundled"))] unsafe fn db_filename(_: *mut ffi::sqlite3) -> Option<PathBuf> { None } #[cfg(test)] mod test { use super::*; use crate::ffi; use fallible_iterator::FallibleIterator; use std::error::Error as StdError; use std::fmt; use tempdir::TempDir; // this function is never called, but is still type checked; in // particular, calls with specific instantiations will require // that those types are `Send`. #[allow(dead_code, unconditional_recursion)] fn ensure_send<T: Send>() { ensure_send::<Connection>(); ensure_send::<InterruptHandle>(); } #[allow(dead_code, unconditional_recursion)] fn ensure_sync<T: Sync>() { ensure_sync::<InterruptHandle>(); } pub fn checked_memory_handle() -> Connection { Connection::open_in_memory().unwrap() } #[test] fn test_concurrent_transactions_busy_commit() { use std::time::Duration; let tmp = TempDir::new("locked").unwrap(); let path = tmp.path().join("transactions.db3"); Connection::open(&path) .expect("create temp db") .execute_batch( " BEGIN; CREATE TABLE foo(x INTEGER); INSERT INTO foo VALUES(42); END;", ) .expect("create temp db"); let mut db1 = Connection::open_with_flags(&path, OpenFlags::SQLITE_OPEN_READ_WRITE).unwrap(); let mut db2 = Connection::open_with_flags(&path, OpenFlags::SQLITE_OPEN_READ_ONLY).unwrap(); db1.busy_timeout(Duration::from_millis(0)).unwrap(); db2.busy_timeout(Duration::from_millis(0)).unwrap(); { let tx1 = db1.transaction().unwrap(); let tx2 = db2.transaction().unwrap(); // SELECT first makes sqlite lock with a shared lock tx1.query_row("SELECT x FROM foo LIMIT 1", NO_PARAMS, |_| Ok(())) .unwrap(); tx2.query_row("SELECT x FROM foo LIMIT 1", NO_PARAMS, |_| Ok(())) .unwrap(); tx1.execute("INSERT INTO foo VALUES(?1)", &[1]).unwrap(); let _ = tx2.execute("INSERT INTO foo VALUES(?1)", &[2]); let _ = tx1.commit(); let _ = tx2.commit(); } let _ = db1 .transaction() .expect("commit should have closed transaction"); let _ = db2 .transaction() .expect("commit should have closed transaction"); } #[test] fn test_persistence() { let temp_dir = TempDir::new("test_open_file").unwrap(); let path = temp_dir.path().join("test.db3"); { let db = Connection::open(&path).unwrap(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER); INSERT INTO foo VALUES(42); END;"; db.execute_batch(sql).unwrap(); } let path_string = path.to_str().unwrap(); let db = Connection::open(&path_string).unwrap(); let the_answer: Result<i64> = db.query_row("SELECT x FROM foo", NO_PARAMS, |r| r.get(0)); assert_eq!(42i64, the_answer.unwrap()); } #[test] fn test_open() { assert!(Connection::open_in_memory().is_ok()); let db = checked_memory_handle(); assert!(db.close().is_ok()); } #[test] fn test_open_failure() { let filename = "no_such_file.db"; let result = Connection::open_with_flags(filename, OpenFlags::SQLITE_OPEN_READ_ONLY); assert!(!result.is_ok()); let err = result.err().unwrap(); if let Error::SqliteFailure(e, Some(msg)) = err { assert_eq!(ErrorCode::CannotOpen, e.code); assert_eq!(ffi::SQLITE_CANTOPEN, e.extended_code); assert!( msg.contains(filename), "error message '{}' does not contain '{}'", msg, filename ); } else { panic!("SqliteFailure expected"); } } #[test] fn test_close_retry() { let db = checked_memory_handle(); // force the DB to be busy by preparing a statement; this must be done at the // FFI level to allow us to call .close() without dropping the prepared // statement first. let raw_stmt = { use super::str_to_cstring; use std::mem::MaybeUninit; use std::os::raw::c_int; use std::ptr; let raw_db = db.db.borrow_mut().db; let sql = "SELECT 1"; let mut raw_stmt = MaybeUninit::uninit(); let cstring = str_to_cstring(sql).unwrap(); let rc = unsafe { ffi::sqlite3_prepare_v2( raw_db, cstring.as_ptr(), (sql.len() + 1) as c_int, raw_stmt.as_mut_ptr(), ptr::null_mut(), ) }; assert_eq!(rc, ffi::SQLITE_OK); let raw_stmt: *mut ffi::sqlite3_stmt = unsafe { raw_stmt.assume_init() }; raw_stmt }; // now that we have an open statement, trying (and retrying) to close should // fail. let (db, _) = db.close().unwrap_err(); let (db, _) = db.close().unwrap_err(); let (db, _) = db.close().unwrap_err(); // finalize the open statement so a final close will succeed assert_eq!(ffi::SQLITE_OK, unsafe { ffi::sqlite3_finalize(raw_stmt) }); db.close().unwrap(); } #[test] fn test_open_with_flags() { for bad_flags in &[ OpenFlags::empty(), OpenFlags::SQLITE_OPEN_READ_ONLY | OpenFlags::SQLITE_OPEN_READ_WRITE, OpenFlags::SQLITE_OPEN_READ_ONLY | OpenFlags::SQLITE_OPEN_CREATE, ] { assert!(Connection::open_in_memory_with_flags(*bad_flags).is_err()); } } #[test] fn test_execute_batch() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER); INSERT INTO foo VALUES(1); INSERT INTO foo VALUES(2); INSERT INTO foo VALUES(3); INSERT INTO foo VALUES(4); END;"; db.execute_batch(sql).unwrap(); db.execute_batch("UPDATE foo SET x = 3 WHERE x < 3") .unwrap(); assert!(db.execute_batch("INVALID SQL").is_err()); } #[test] fn test_execute() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER)").unwrap(); assert_eq!( 1, db.execute("INSERT INTO foo(x) VALUES (?)", &[1i32]) .unwrap() ); assert_eq!( 1, db.execute("INSERT INTO foo(x) VALUES (?)", &[2i32]) .unwrap() ); assert_eq!( 3i32, db.query_row::<i32, _, _>("SELECT SUM(x) FROM foo", NO_PARAMS, |r| r.get(0)) .unwrap() ); } #[test] fn test_execute_select() { let db = checked_memory_handle(); let err = db.execute("SELECT 1 WHERE 1 < ?", &[1i32]).unwrap_err(); if err != Error::ExecuteReturnedResults { panic!("Unexpected error: {}", err); } } #[test] #[cfg(feature = "extra_check")] fn test_execute_multiple() { let db = checked_memory_handle(); let err = db .execute( "CREATE TABLE foo(x INTEGER); CREATE TABLE foo(x INTEGER)", NO_PARAMS, ) .unwrap_err(); match err { Error::MultipleStatement => (), _ => panic!("Unexpected error: {}", err), } } #[test] fn test_prepare_column_names() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER);").unwrap(); let stmt = db.prepare("SELECT * FROM foo").unwrap(); assert_eq!(stmt.column_count(), 1); assert_eq!(stmt.column_names(), vec!["x"]); let stmt = db.prepare("SELECT x AS a, x AS b FROM foo").unwrap(); assert_eq!(stmt.column_count(), 2); assert_eq!(stmt.column_names(), vec!["a", "b"]); } #[test] fn test_prepare_execute() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER);").unwrap(); let mut insert_stmt = db.prepare("INSERT INTO foo(x) VALUES(?)").unwrap(); assert_eq!(insert_stmt.execute(&[1i32]).unwrap(), 1); assert_eq!(insert_stmt.execute(&[2i32]).unwrap(), 1); assert_eq!(insert_stmt.execute(&[3i32]).unwrap(), 1); assert_eq!(insert_stmt.execute(&["hello".to_string()]).unwrap(), 1); assert_eq!(insert_stmt.execute(&["goodbye".to_string()]).unwrap(), 1); assert_eq!(insert_stmt.execute(&[types::Null]).unwrap(), 1); let mut update_stmt = db.prepare("UPDATE foo SET x=? WHERE x<?").unwrap(); assert_eq!(update_stmt.execute(&[3i32, 3i32]).unwrap(), 2); assert_eq!(update_stmt.execute(&[3i32, 3i32]).unwrap(), 0); assert_eq!(update_stmt.execute(&[8i32, 8i32]).unwrap(), 3); } #[test] fn test_prepare_query() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER);").unwrap(); let mut insert_stmt = db.prepare("INSERT INTO foo(x) VALUES(?)").unwrap(); assert_eq!(insert_stmt.execute(&[1i32]).unwrap(), 1); assert_eq!(insert_stmt.execute(&[2i32]).unwrap(), 1); assert_eq!(insert_stmt.execute(&[3i32]).unwrap(), 1); let mut query = db .prepare("SELECT x FROM foo WHERE x < ? ORDER BY x DESC") .unwrap(); { let mut rows = query.query(&[4i32]).unwrap(); let mut v = Vec::<i32>::new(); while let Some(row) = rows.next().unwrap() { v.push(row.get(0).unwrap()); } assert_eq!(v, [3i32, 2, 1]); } { let mut rows = query.query(&[3i32]).unwrap(); let mut v = Vec::<i32>::new(); while let Some(row) = rows.next().unwrap() { v.push(row.get(0).unwrap()); } assert_eq!(v, [2i32, 1]); } } #[test] fn test_query_map() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); INSERT INTO foo VALUES(3, \", \"); INSERT INTO foo VALUES(2, \"world\"); INSERT INTO foo VALUES(1, \"!\"); END;"; db.execute_batch(sql).unwrap(); let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC").unwrap(); let results: Result<Vec<String>> = query .query(NO_PARAMS) .unwrap() .map(|row| row.get(1)) .collect(); assert_eq!(results.unwrap().concat(), "hello, world!"); } #[test] fn test_query_row() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER); INSERT INTO foo VALUES(1); INSERT INTO foo VALUES(2); INSERT INTO foo VALUES(3); INSERT INTO foo VALUES(4); END;"; db.execute_batch(sql).unwrap(); assert_eq!( 10i64, db.query_row::<i64, _, _>("SELECT SUM(x) FROM foo", NO_PARAMS, |r| r.get(0)) .unwrap() ); let result: Result<i64> = db.query_row("SELECT x FROM foo WHERE x > 5", NO_PARAMS, |r| r.get(0)); match result.unwrap_err() { Error::QueryReturnedNoRows => (), err => panic!("Unexpected error {}", err), } let bad_query_result = db.query_row("NOT A PROPER QUERY; test123", NO_PARAMS, |_| Ok(())); assert!(bad_query_result.is_err()); } #[test] fn test_optional() { let db = checked_memory_handle(); let result: Result<i64> = db.query_row("SELECT 1 WHERE 0 <> 0", NO_PARAMS, |r| r.get(0)); let result = result.optional(); match result.unwrap() { None => (), _ => panic!("Unexpected result"), } let result: Result<i64> = db.query_row("SELECT 1 WHERE 0 == 0", NO_PARAMS, |r| r.get(0)); let result = result.optional(); match result.unwrap() { Some(1) => (), _ => panic!("Unexpected result"), } let bad_query_result: Result<i64> = db.query_row("NOT A PROPER QUERY", NO_PARAMS, |r| r.get(0)); let bad_query_result = bad_query_result.optional(); assert!(bad_query_result.is_err()); } #[test] fn test_pragma_query_row() { let db = checked_memory_handle(); assert_eq!( "memory", db.query_row::<String, _, _>("PRAGMA journal_mode", NO_PARAMS, |r| r.get(0)) .unwrap() ); assert_eq!( "off", db.query_row::<String, _, _>("PRAGMA journal_mode=off", NO_PARAMS, |r| r.get(0)) .unwrap() ); } #[test] fn test_prepare_failures() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER);").unwrap(); let err = db.prepare("SELECT * FROM does_not_exist").unwrap_err(); assert!(format!("{}", err).contains("does_not_exist")); } #[test] fn test_last_insert_rowid() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER PRIMARY KEY)") .unwrap(); db.execute_batch("INSERT INTO foo DEFAULT VALUES").unwrap(); assert_eq!(db.last_insert_rowid(), 1); let mut stmt = db.prepare("INSERT INTO foo DEFAULT VALUES").unwrap(); for _ in 0i32..9 { stmt.execute(NO_PARAMS).unwrap(); } assert_eq!(db.last_insert_rowid(), 10); } #[test] fn test_is_autocommit() { let db = checked_memory_handle(); assert!( db.is_autocommit(), "autocommit expected to be active by default" ); } #[test] #[cfg(feature = "bundled")] fn test_is_busy() { let db = checked_memory_handle(); assert!(!db.is_busy()); let mut stmt = db.prepare("PRAGMA schema_version").unwrap(); assert!(!db.is_busy()); { let mut rows = stmt.query(NO_PARAMS).unwrap(); assert!(!db.is_busy()); let row = rows.next().unwrap(); assert!(db.is_busy()); assert!(row.is_some()); } assert!(!db.is_busy()); } #[test] fn test_statement_debugging() { let db = checked_memory_handle(); let query = "SELECT 12345"; let stmt = db.prepare(query).unwrap(); assert!(format!("{:?}", stmt).contains(query)); } #[test] fn test_notnull_constraint_error() { // extended error codes for constraints were added in SQLite 3.7.16; if we're // running on our bundled version, we know the extended error code exists. #[cfg(feature = "bundled")] fn check_extended_code(extended_code: c_int) { assert_eq!(extended_code, ffi::SQLITE_CONSTRAINT_NOTNULL); } #[cfg(not(feature = "bundled"))] fn check_extended_code(_extended_code: c_int) {} let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x NOT NULL)").unwrap(); let result = db.execute("INSERT INTO foo (x) VALUES (NULL)", NO_PARAMS); assert!(result.is_err()); match result.unwrap_err() { Error::SqliteFailure(err, _) => { assert_eq!(err.code, ErrorCode::ConstraintViolation); check_extended_code(err.extended_code); } err => panic!("Unexpected error {}", err), } } #[test] fn test_version_string() { let n = version_number(); let major = n / 1_000_000; let minor = (n % 1_000_000) / 1_000; let patch = n % 1_000; assert!(version().contains(&format!("{}.{}.{}", major, minor, patch))); } #[test] #[cfg(feature = "functions")] fn test_interrupt() { let db = checked_memory_handle(); let interrupt_handle = db.get_interrupt_handle(); db.create_scalar_function("interrupt", 0, false, move |_| { interrupt_handle.interrupt(); Ok(0) }) .unwrap(); let mut stmt = db .prepare("SELECT interrupt() FROM (SELECT 1 UNION SELECT 2 UNION SELECT 3)") .unwrap(); let result: Result<Vec<i32>> = stmt.query(NO_PARAMS).unwrap().map(|r| r.get(0)).collect(); match result.unwrap_err() { Error::SqliteFailure(err, _) => { assert_eq!(err.code, ErrorCode::OperationInterrupted); } err => { panic!("Unexpected error {}", err); } } } #[test] fn test_interrupt_close() { let db = checked_memory_handle(); let handle = db.get_interrupt_handle(); handle.interrupt(); db.close().unwrap(); handle.interrupt(); // Look at it's internals to see if we cleared it out properly. let db_guard = handle.db_lock.lock().unwrap(); assert!(db_guard.is_null()); // It would be nice to test that we properly handle close/interrupt // running at the same time, but it seems impossible to do with any // degree of reliability. } #[test] fn test_get_raw() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(i, x);").unwrap(); let vals = ["foobar", "1234", "qwerty"]; let mut insert_stmt = db.prepare("INSERT INTO foo(i, x) VALUES(?, ?)").unwrap(); for (i, v) in vals.iter().enumerate() { let i_to_insert = i as i64; assert_eq!(insert_stmt.execute(params![i_to_insert, v]).unwrap(), 1); } let mut query = db.prepare("SELECT i, x FROM foo").unwrap(); let mut rows = query.query(NO_PARAMS).unwrap(); while let Some(row) = rows.next().unwrap() { let i = row.get_raw(0).as_i64().unwrap(); let expect = vals[i as usize]; let x = row.get_raw("x").as_str().unwrap(); assert_eq!(x, expect); } } #[test] fn test_from_handle() { let db = checked_memory_handle(); let handle = unsafe { db.handle() }; { let db = unsafe { Connection::from_handle(handle) }.unwrap(); db.execute_batch("PRAGMA VACUUM").unwrap(); } db.close().unwrap(); } mod query_and_then_tests { use super::*; #[derive(Debug)] enum CustomError { SomeError, Sqlite(Error), } impl fmt::Display for CustomError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> ::std::result::Result<(), fmt::Error> { match *self { CustomError::SomeError => write!(f, "{}", self.description()), CustomError::Sqlite(ref se) => write!(f, "{}: {}", self.description(), se), } } } impl StdError for CustomError { fn description(&self) -> &str { "my custom error" } fn cause(&self) -> Option<&dyn StdError> { match *self { CustomError::SomeError => None, CustomError::Sqlite(ref se) => Some(se), } } } impl From<Error> for CustomError { fn from(se: Error) -> CustomError { CustomError::Sqlite(se) } } type CustomResult<T> = ::std::result::Result<T, CustomError>; #[test] fn test_query_and_then() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); INSERT INTO foo VALUES(3, \", \"); INSERT INTO foo VALUES(2, \"world\"); INSERT INTO foo VALUES(1, \"!\"); END;"; db.execute_batch(sql).unwrap(); let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC").unwrap(); let results: Result<Vec<String>> = query .query_and_then(NO_PARAMS, |row| row.get(1)) .unwrap() .collect(); assert_eq!(results.unwrap().concat(), "hello, world!"); } #[test] fn test_query_and_then_fails() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); INSERT INTO foo VALUES(3, \", \"); INSERT INTO foo VALUES(2, \"world\"); INSERT INTO foo VALUES(1, \"!\"); END;"; db.execute_batch(sql).unwrap(); let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC").unwrap(); let bad_type: Result<Vec<f64>> = query .query_and_then(NO_PARAMS, |row| row.get(1)) .unwrap() .collect(); match bad_type.unwrap_err() { Error::InvalidColumnType(..) => (), err => panic!("Unexpected error {}", err), } let bad_idx: Result<Vec<String>> = query .query_and_then(NO_PARAMS, |row| row.get(3)) .unwrap() .collect(); match bad_idx.unwrap_err() { Error::InvalidColumnIndex(_) => (), err => panic!("Unexpected error {}", err), } } #[test] fn test_query_and_then_custom_error() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); INSERT INTO foo VALUES(3, \", \"); INSERT INTO foo VALUES(2, \"world\"); INSERT INTO foo VALUES(1, \"!\"); END;"; db.execute_batch(sql).unwrap(); let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC").unwrap(); let results: CustomResult<Vec<String>> = query .query_and_then(NO_PARAMS, |row| row.get(1).map_err(CustomError::Sqlite)) .unwrap() .collect(); assert_eq!(results.unwrap().concat(), "hello, world!"); } #[test] fn test_query_and_then_custom_error_fails() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); INSERT INTO foo VALUES(3, \", \"); INSERT INTO foo VALUES(2, \"world\"); INSERT INTO foo VALUES(1, \"!\"); END;"; db.execute_batch(sql).unwrap(); let mut query = db.prepare("SELECT x, y FROM foo ORDER BY x DESC").unwrap(); let bad_type: CustomResult<Vec<f64>> = query .query_and_then(NO_PARAMS, |row| row.get(1).map_err(CustomError::Sqlite)) .unwrap() .collect(); match bad_type.unwrap_err() { CustomError::Sqlite(Error::InvalidColumnType(..)) => (), err => panic!("Unexpected error {}", err), } let bad_idx: CustomResult<Vec<String>> = query .query_and_then(NO_PARAMS, |row| row.get(3).map_err(CustomError::Sqlite)) .unwrap() .collect(); match bad_idx.unwrap_err() { CustomError::Sqlite(Error::InvalidColumnIndex(_)) => (), err => panic!("Unexpected error {}", err), } let non_sqlite_err: CustomResult<Vec<String>> = query .query_and_then(NO_PARAMS, |_| Err(CustomError::SomeError)) .unwrap() .collect(); match non_sqlite_err.unwrap_err() { CustomError::SomeError => (), err => panic!("Unexpected error {}", err), } } #[test] fn test_query_row_and_then_custom_error() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); END;"; db.execute_batch(sql).unwrap(); let query = "SELECT x, y FROM foo ORDER BY x DESC"; let results: CustomResult<String> = db.query_row_and_then(query, NO_PARAMS, |row| { row.get(1).map_err(CustomError::Sqlite) }); assert_eq!(results.unwrap(), "hello"); } #[test] fn test_query_row_and_then_custom_error_fails() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); END;"; db.execute_batch(sql).unwrap(); let query = "SELECT x, y FROM foo ORDER BY x DESC"; let bad_type: CustomResult<f64> = db.query_row_and_then(query, NO_PARAMS, |row| { row.get(1).map_err(CustomError::Sqlite) }); match bad_type.unwrap_err() { CustomError::Sqlite(Error::InvalidColumnType(..)) => (), err => panic!("Unexpected error {}", err), } let bad_idx: CustomResult<String> = db.query_row_and_then(query, NO_PARAMS, |row| { row.get(3).map_err(CustomError::Sqlite) }); match bad_idx.unwrap_err() { CustomError::Sqlite(Error::InvalidColumnIndex(_)) => (), err => panic!("Unexpected error {}", err), } let non_sqlite_err: CustomResult<String> = db.query_row_and_then(query, NO_PARAMS, |_| Err(CustomError::SomeError)); match non_sqlite_err.unwrap_err() { CustomError::SomeError => (), err => panic!("Unexpected error {}", err), } } #[test] fn test_dynamic() { let db = checked_memory_handle(); let sql = "BEGIN; CREATE TABLE foo(x INTEGER, y TEXT); INSERT INTO foo VALUES(4, \"hello\"); END;"; db.execute_batch(sql).unwrap(); db.query_row("SELECT * FROM foo", params![], |r| { assert_eq!(2, r.column_count()); Ok(()) }) .unwrap(); } #[test] fn test_dyn_box() { let db = checked_memory_handle(); db.execute_batch("CREATE TABLE foo(x INTEGER);").unwrap(); let b: Box<dyn ToSql> = Box::new(5); db.execute("INSERT INTO foo VALUES(?)", &[b]).unwrap(); db.query_row("SELECT x FROM foo", params![], |r| { assert_eq!(5, r.get_unwrap::<_, i32>(0)); Ok(()) }) .unwrap(); } } }
33.231772
110
0.552799
8f31d219b966125e494ae113598c3a808aa1bcbc
4,330
#![allow(non_camel_case_types)] #![allow(non_upper_case_globals)] #![allow(unused_imports)] #![allow(dead_code)] #![allow(unused_variables)] #![allow(unused_macros)] pub mod assembler; pub mod assembler_x64; pub mod avx; pub mod constants_x64; pub mod dseg; pub mod generic; pub mod utils; pub use self::utils::*; #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[repr(C)] pub enum MachineMode { Int8, Int32, Int64, Float32, Float64, Ptr, } impl MachineMode { pub extern "C" fn size(self) -> usize { match self { MachineMode::Int8 => 1, MachineMode::Int32 => 4, MachineMode::Int64 => 8, MachineMode::Ptr => 8, MachineMode::Float32 => 4, MachineMode::Float64 => 8, } } } #[derive(Copy, Clone, PartialEq, Eq, Debug)] #[repr(C)] pub enum CondCode { Zero, NonZero, Equal, NotEqual, Greater, GreaterEq, Less, LessEq, UnsignedGreater, UnsignedGreaterEq, UnsignedLess, UnsignedLessEq, } const PAGE_SIZE: usize = 4096; use core::mem; #[cfg(target_family = "unix")] fn setup(size: usize) -> *mut u8 { unsafe { let size = size * PAGE_SIZE; let mut content: *mut libc::c_void = mem::uninitialized(); libc::posix_memalign(&mut content, 4096, size); let result = libc::mmap(content, size, libc::PROT_EXEC | libc::PROT_READ | libc::PROT_WRITE, libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, -1, 0); mem::transmute(result) } } #[cfg(target_family = "windows")] fn setup(size: usize) -> *mut u8 { unsafe { let _size = size * PAGE_SIZE; let mem: *mut u8 = mem::transmute(winapi::um::memoryapi::VirtualAlloc( ::std::ptr::null_mut(), _size, winapi::um::winnt::MEM_COMMIT, winapi::um::winnt::PAGE_EXECUTE_READWRITE, )); mem } } #[derive(Copy, Clone)] #[repr(C)] pub struct Memory { start: *const u8, end: *const u8, pointer: *const u8, size: usize, } impl Memory { pub extern "C" fn new(ptr: *const u8) -> Memory { Memory { start: unsafe { ptr.offset(0) }, end: ptr, pointer: ptr, size: 0xdead } } pub extern "C" fn start(&self) -> *const u8 { self.start } pub extern "C" fn end(&self) -> *const u8 { self.end } pub extern "C" fn ptr(&self) -> *const u8 { self.pointer } pub extern "C" fn size(&self) -> usize { self.size } } use self::assembler::Assembler; #[no_mangle] pub extern "C" fn get_executable_memory(buf: &Assembler) -> Memory { let data = copy_vec(buf.data()); let dseg = &buf.dseg; let total_size = data.len() + dseg.size() as usize; let ptr = setup(total_size); dseg.finish(ptr); let start; unsafe { start = ptr.offset(dseg.size() as isize); ::core::ptr::copy_nonoverlapping(data.as_ptr(), start as *mut u8, data.len()); }; let memory = Memory { start, end: unsafe { ptr.offset(total_size as isize) }, pointer: ptr, size: total_size }; memory } pub mod c_api { use crate::constants_x64::*; use crate::assembler::Mem; use crate::assembler::*; use super::MachineMode; #[no_mangle] pub extern "C" fn mem_base(reg: Register,off: i32) -> Mem { return Mem::Base(reg,off); } #[no_mangle] pub extern "C" fn mem_local(off: i32) -> Mem { return Mem::Local(off); } #[no_mangle] pub extern "C" fn mem_offset(reg: Register,v1: i32,v2: i32) -> Mem { Mem::Offset(reg,v1,v2) } #[no_mangle] pub extern "C" fn mem_index(reg: Register,reg2: Register,v1: i32,v2: i32) -> Mem { Mem::Index(reg,reg2,v1,v2) } #[no_mangle] pub extern "C" fn asm_load_int(buf: &mut Assembler,mode: MachineMode,imm: i64,dst: Register) { buf.load_int_const(mode,dst,imm); } #[no_mangle] pub extern "C" fn asm_load_float(buf: &mut Assembler,mode: MachineMode,imm: f64,dst: XMMRegister) { buf.load_float_const(mode,dst,imm); } }
24.602273
103
0.55612
26f433595f96e8cd347534c59a6fe6c118d09af6
2,870
use core::mem::size_of; use std::io::Cursor; use castflip::{DecastIO, NE, SE, LE, BE}; use crate::{IData1, IVals1}; #[test] fn idata1() { let idata1 = IData1::gen(); let mut ne_output = Cursor::new(vec![0_u8; size_of::<IVals1>()]); let mut se_output = Cursor::new(vec![0_u8; size_of::<IVals1>()]); let mut le_output = Cursor::new(vec![0_u8; size_of::<IVals1>()]); let mut be_output = Cursor::new(vec![0_u8; size_of::<IVals1>()]); let mut ne_size = 0; ne_size += ne_output.decastf::<i8>(&idata1.ne_vals.val1_i8, NE).unwrap(); ne_size += ne_output.decastf::<i8>(&idata1.ne_vals.val2_i8, NE).unwrap(); ne_size += ne_output.decastf::<i16>(&idata1.ne_vals.val_i16, NE).unwrap(); ne_size += ne_output.decastf::<i32>(&idata1.ne_vals.val_i32, NE).unwrap(); ne_size += ne_output.decastf::<i64>(&idata1.ne_vals.val_i64, NE).unwrap(); ne_size += ne_output.decastf::<i128>(&idata1.ne_vals.val_i128, NE).unwrap(); let mut se_size = 0; se_size += se_output.decastf::<i8>(&idata1.ne_vals.val1_i8, SE).unwrap(); se_size += se_output.decastf::<i8>(&idata1.ne_vals.val2_i8, SE).unwrap(); se_size += se_output.decastf::<i16>(&idata1.ne_vals.val_i16, SE).unwrap(); se_size += se_output.decastf::<i32>(&idata1.ne_vals.val_i32, SE).unwrap(); se_size += se_output.decastf::<i64>(&idata1.ne_vals.val_i64, SE).unwrap(); se_size += se_output.decastf::<i128>(&idata1.ne_vals.val_i128, SE).unwrap(); let mut le_size = 0; le_size += le_output.decastf::<i8>(&idata1.ne_vals.val1_i8, LE).unwrap(); le_size += le_output.decastf::<i8>(&idata1.ne_vals.val2_i8, LE).unwrap(); le_size += le_output.decastf::<i16>(&idata1.ne_vals.val_i16, LE).unwrap(); le_size += le_output.decastf::<i32>(&idata1.ne_vals.val_i32, LE).unwrap(); le_size += le_output.decastf::<i64>(&idata1.ne_vals.val_i64, LE).unwrap(); le_size += le_output.decastf::<i128>(&idata1.ne_vals.val_i128, LE).unwrap(); let mut be_size = 0; be_size += be_output.decastf::<i8>(&idata1.ne_vals.val1_i8, BE).unwrap(); be_size += be_output.decastf::<i8>(&idata1.ne_vals.val2_i8, BE).unwrap(); be_size += be_output.decastf::<i16>(&idata1.ne_vals.val_i16, BE).unwrap(); be_size += be_output.decastf::<i32>(&idata1.ne_vals.val_i32, BE).unwrap(); be_size += be_output.decastf::<i64>(&idata1.ne_vals.val_i64, BE).unwrap(); be_size += be_output.decastf::<i128>(&idata1.ne_vals.val_i128, BE).unwrap(); assert_eq!(ne_size, size_of::<IVals1>()); assert_eq!(se_size, size_of::<IVals1>()); assert_eq!(le_size, size_of::<IVals1>()); assert_eq!(be_size, size_of::<IVals1>()); assert_eq!(ne_output.into_inner(), idata1.ne_bytes); assert_eq!(se_output.into_inner(), idata1.se_bytes); assert_eq!(le_output.into_inner(), idata1.le_bytes); assert_eq!(be_output.into_inner(), idata1.be_bytes); }
48.644068
80
0.66899
bb34d8b21c61db4dea12d8aa1933529592e2fcf1
535
mod address; mod connection; mod delivery_type; mod interleaved; mod layers; mod mikey; mod mode; mod setup; pub use self::address::{Address, AddressError, ExtensionAddress, HostPort}; pub use self::connection::{Connection, ConnectionError}; pub use self::delivery_type::{DeliveryType, DeliveryTypeError}; pub use self::interleaved::{Interleaved, InterleavedError}; pub use self::layers::{Layers, LayersError}; pub use self::mikey::{MIKEYError, MIKEY}; pub use self::mode::{Mode, ModeError}; pub use self::setup::{Setup, SetupError};
29.722222
75
0.766355
2fdd51f44fa22592ea6d3042b81edb9532cce8e7
2,104
// Copyright 2020-2021 The Datafuse Authors. // // SPDX-License-Identifier: Apache-2.0. #[test] fn test_to_type_name_function() -> anyhow::Result<()> { use std::sync::Arc; use common_datablocks::*; use common_datavalues::*; use pretty_assertions::assert_eq; use crate::udfs::*; use crate::*; #[allow(dead_code)] struct Test { name: &'static str, display: &'static str, nullable: bool, block: DataBlock, expect: DataArrayRef, error: &'static str, func: Box<dyn IFunction> } let schema = Arc::new(DataSchema::new(vec![DataField::new( "a", DataType::Boolean, false )])); let field_a = ColumnFunction::try_create("a")?; let tests = vec![Test { name: "to_type_name-example-passed", display: "toTypeName(a)", nullable: false, func: ToTypeNameFunction::try_create("toTypeName", &[field_a.clone()])?, block: DataBlock::create(schema.clone(), vec![Arc::new(BooleanArray::from(vec![ true, true, true, false, ]))]), expect: Arc::new(StringArray::from(vec![ "Boolean", "Boolean", "Boolean", "Boolean", ])), error: "" }]; for t in tests { let func = t.func; println!("{:?}", t.name); if let Err(e) = func.eval(&t.block) { assert_eq!(t.error, e.to_string()); } // Display check. let expect_display = t.display.to_string(); let actual_display = format!("{}", func); assert_eq!(expect_display, actual_display); // Nullable check. let expect_null = t.nullable; let actual_null = func.nullable(t.block.schema())?; assert_eq!(expect_null, actual_null); let ref v = func.eval(&t.block)?; // Type check. let expect_type = func.return_type(t.block.schema())?; let actual_type = v.data_type(); assert_eq!(expect_type, actual_type); assert_eq!(v.to_array(t.block.num_rows())?.as_ref(), t.expect.as_ref()); } Ok(()) }
28.053333
87
0.562738
502e33de8411306288d771616c3367374fdc4425
29,055
// Copyright 2018-2021 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::{ generator, GenerateCode, GenerateCodeUsing as _, }; use derive_more::From; use ir::{ Callable as _, HexLiteral as _, }; use proc_macro2::{ Ident, TokenStream as TokenStream2, }; use quote::{ format_ident, quote, quote_spanned, }; use syn::spanned::Spanned as _; /// Generates code for the message and constructor dispatcher. /// /// This code efficiently selects the dispatched ink! constructor or message /// by inspecting the first four bytes (selector) of the given input bytes. /// /// As this happens on every contract execution this code must be highly optimized. /// For that purpose a so-called dispatch enum is being generated that has a /// specialized `scale::Decode` implementation taking the first four bytes of /// the input stream in order to identify the enum variant that it is going to /// produce out of the rest of the input buffer. /// /// The rest of the input buffer is then automatically decoded directly into the /// expected input types of the respective ink! constructor or message. #[derive(From)] pub struct Dispatch<'a> { contract: &'a ir::Contract, } impl_as_ref_for_generator!(Dispatch); impl GenerateCode for Dispatch<'_> { fn generate_code(&self) -> TokenStream2 { let no_cross_calling_cfg = self.generate_code_using::<generator::CrossCallingConflictCfg>(); let entry_points = self.generate_entry_points(); let dispatch_using_mode = self.generate_dispatch_using_mode(); let dispatch_trait_impl_namespaces = self.generate_trait_impl_namespaces(); let dispatch_trait_impls = self.generate_dispatch_trait_impls(); let message_dispatch_enum = self.generate_message_dispatch_enum(); let constructor_dispatch_enum = self.generate_constructor_dispatch_enum(); quote! { // We do not generate contract dispatch code while the contract // is being tested or the contract is a dependency of another // since both resulting compilations do not require dispatching. #[cfg(not(test))] #no_cross_calling_cfg const _: () = { #entry_points #dispatch_using_mode #dispatch_trait_impl_namespaces #dispatch_trait_impls #message_dispatch_enum #constructor_dispatch_enum }; } } } impl Dispatch<'_> { /// Generates the static ink! contract entry points. /// /// # Note /// /// Those are expected to exist by the smart contracts host module. /// They guide the dispatch, set-up and tear-down of a smart contract. fn generate_entry_points(&self) -> TokenStream2 { let storage_ident = self.contract.module().storage().ident(); let all_messages_deny_payment = self.all_messages_deny_payment(); quote! { #[cfg(not(test))] #[no_mangle] fn deploy() { <#storage_ident as ::ink_lang::DispatchUsingMode>::dispatch_using_mode( ::ink_lang::DispatchMode::Instantiate, ).unwrap_or_else(|error| { ::core::panic!("dispatching constructor failed: {}", error) }) } #[cfg(not(test))] #[no_mangle] fn call() { if #all_messages_deny_payment { ::ink_lang::deny_payment::<<#storage_ident as ::ink_lang::ContractEnv>::Env>() .expect("caller transferred value even though all ink! message deny payments") } <#storage_ident as ::ink_lang::DispatchUsingMode>::dispatch_using_mode( ::ink_lang::DispatchMode::Call, ).unwrap_or_else(|error| { ::core::panic!("dispatching message failed: {}", error) }) } } } /// Generates the `DispatchUsingMode` trait implementation to guide contract dispatch. fn generate_dispatch_using_mode(&self) -> TokenStream2 { let storage_ident = self.contract.module().storage().ident(); quote! { impl ::ink_lang::DispatchUsingMode for #storage_ident { #[allow(unused_parens)] fn dispatch_using_mode( mode: ::ink_lang::DispatchMode ) -> ::core::result::Result<(), ::ink_lang::DispatchError> { match mode { ::ink_lang::DispatchMode::Instantiate => { <<#storage_ident as ::ink_lang::ConstructorDispatcher>::Type as ::ink_lang::Execute>::execute( ::ink_env::decode_input::<<#storage_ident as ::ink_lang::ConstructorDispatcher>::Type>() .map_err(|_| ::ink_lang::DispatchError::CouldNotReadInput)? ) } ::ink_lang::DispatchMode::Call => { <<#storage_ident as ::ink_lang::MessageDispatcher>::Type as ::ink_lang::Execute>::execute( ::ink_env::decode_input::<<#storage_ident as ::ink_lang::MessageDispatcher>::Type>() .map_err(|_| ::ink_lang::DispatchError::CouldNotReadInput)? ) } } } } } } /// Returns the generated ink! namespace identifier for the given callable kind. fn dispatch_trait_impl_namespace(kind: ir::CallableKind) -> Ident { match kind { ir::CallableKind::Constructor => format_ident!("__ink_ConstructorInfo"), ir::CallableKind::Message => format_ident!("__ink_MessageInfo"), } } /// Generates utility types to emulate namespaces to disambiguate dispatch trait /// implementations for ink! messages and ink! constructors with overlapping /// selectors. fn generate_trait_impl_namespaces(&self) -> TokenStream2 { let message_namespace = Self::dispatch_trait_impl_namespace(ir::CallableKind::Message); let constructor_namespace = Self::dispatch_trait_impl_namespace(ir::CallableKind::Constructor); quote! { // Selector namespace for ink! messages of the root smart contract. // // # Note // // - We have separate namespaces for ink! messages and constructors to // allow for overlapping selectors between them. // - The `ID` const parameter uniquely identifies one of the ink! messages // implemented by the root smart contract. #[doc(hidden)] pub struct #message_namespace<const ID: ::core::primitive::u32> {} // Selector namespace for ink! constructors of the root smart contract. // // # Note // // - We have separate namespaces for ink! messages and constructors to // allow for overlapping selectors between them. // - The `ID` const parameter uniquely identifies one of the ink! constructors // implemented by the root smart contract. #[doc(hidden)] pub struct #constructor_namespace<const ID: ::core::primitive::u32> {} } } /// Generates code for the dispatch trait implementations for a generic ink! callable. fn generate_trait_impls_for_callable<C>( &self, cws: ir::CallableWithSelector<'_, C>, ) -> TokenStream2 where C: ir::Callable + quote::ToTokens, { let callable = cws.callable(); let callable_span = callable.span(); let selector = cws.composed_selector(); let (selector_bytes, selector_id) = ( selector.hex_lits(), selector.into_be_u32().hex_padded_suffixed(), ); let input_types = callable .inputs() .map(|pat_type| &pat_type.ty) .collect::<Vec<_>>(); let storage_ident = self.contract.module().storage().ident(); let namespace = Self::dispatch_trait_impl_namespace(cws.kind()); let input_types_tuple = if input_types.len() != 1 { // Pack all types into a tuple if they are not exactly 1. // This results in `()` for zero input types. quote! { ( #( #input_types ),* ) } } else { // Return the single type without turning it into a tuple. quote! { #( #input_types )* } }; let fn_input_impl = quote_spanned!(callable.inputs_span() => impl ::ink_lang::FnInput for #namespace::<#selector_id> { type Input = #input_types_tuple; } ); let fn_selector_impl = quote_spanned!(callable_span => impl ::ink_lang::FnSelector for #namespace::<#selector_id> { const SELECTOR: ::ink_env::call::Selector = ::ink_env::call::Selector::new([ #( #selector_bytes ),* ]); } ); let fn_state_impl = quote_spanned!(callable_span => impl ::ink_lang::FnState for #namespace::<#selector_id> { type State = #storage_ident; } ); quote! { #fn_input_impl #fn_selector_impl #fn_state_impl } } /// Returns a tuple of: /// /// - Vector over the generated identifier bindings (`__ink_binding_N`) for all inputs. /// - `TokenStream` representing the binding identifiers as tuple (for equal to two or more inputs), /// as single identifier (for exactly one input) or as wildcard (`_`) if there are /// no input bindings. /// /// # Examples /// /// **No inputs:** /// ``` /// # use quote::quote; /// # let x: (Vec<()>, _) = /// ( vec![], /// quote! { _ } ) /// # ; /// ``` /// /// **Exactly one input:** /// ``` /// # use quote::quote; /// # let __ink_binding_0 = (); /// ( vec![__ink_binding_0], /// quote! { __ink_binding_0 } ) /// # ; /// ``` /// /// **Multiple (equal to two or more) inputs:** /// ``` /// # use quote::quote; /// # let __ink_binding_0 = (); /// # let __ink_binding_1 = (); /// ( vec![__ink_binding_0, __ink_binding_1, /* ... */], /// quote! { (__ink_binding_0, __ink_binding_1, ..) } ) /// # ; /// ``` fn generate_input_bindings<C>(callable: &C) -> (Vec<Ident>, TokenStream2) where C: ir::Callable, { let input_bindings = callable .inputs() .enumerate() .map(|(n, _pat_type)| format_ident!("__ink_binding_{}", n)) .collect::<Vec<_>>(); let inputs_as_tuple_or_wildcard = match input_bindings.len() { 0 => quote! { _ }, 1 => quote! { #( #input_bindings ),* }, _ => quote! { ( #( #input_bindings ),* ) }, }; (input_bindings, inputs_as_tuple_or_wildcard) } /// Generates all the dispatch trait implementations for the given ink! message. fn generate_trait_impls_for_message( &self, cws: ir::CallableWithSelector<'_, ir::Message>, ) -> TokenStream2 { let message = cws.callable(); let message_span = message.span(); let selector = cws.composed_selector(); let selector_id = selector.into_be_u32().hex_padded_suffixed(); let output_tokens = message .output() .map(quote::ToTokens::to_token_stream) .unwrap_or_else(|| quote! { () }); let is_mut = message.receiver().is_ref_mut(); let storage_ident = self.contract.module().storage().ident(); let message_ident = message.ident(); let namespace = Self::dispatch_trait_impl_namespace(ir::CallableKind::Message); let fn_output_impl = quote_spanned!(message.output().span() => impl ::ink_lang::FnOutput for #namespace::<#selector_id> { #[allow(unused_parens)] type Output = #output_tokens; } ); let callable_impl = self.generate_trait_impls_for_callable(cws); let (mut_token, message_trait_ident) = if is_mut { ( Some(syn::token::Mut::default()), format_ident!("MessageMut"), ) } else { (None, format_ident!("MessageRef")) }; let (input_bindings, inputs_as_tuple_or_wildcard) = Self::generate_input_bindings(message); let as_trait = cws.item_impl().trait_path().map(|trait_path| { quote_spanned!(message_span => as #trait_path ) }); let message_impl = quote_spanned!(message_span => impl ::ink_lang::#message_trait_ident for #namespace::<#selector_id> { const CALLABLE: fn( &#mut_token <Self as ::ink_lang::FnState>::State, <Self as ::ink_lang::FnInput>::Input ) -> <Self as ::ink_lang::FnOutput>::Output = |state, #inputs_as_tuple_or_wildcard| { <#storage_ident #as_trait>::#message_ident(state, #( #input_bindings ),* ) }; } ); quote_spanned!(message_span => #callable_impl #fn_output_impl #message_impl ) } /// Generates all the dispatch trait implementations for the given ink! constructor. fn generate_trait_impls_for_constructor( &self, cws: ir::CallableWithSelector<'_, ir::Constructor>, ) -> TokenStream2 { let constructor = cws.callable(); let constructor_span = constructor.span(); let selector = cws.composed_selector(); let selector_id = selector.into_be_u32().hex_padded_suffixed(); let storage_ident = self.contract.module().storage().ident(); let constructor_ident = constructor.ident(); let namespace = Self::dispatch_trait_impl_namespace(ir::CallableKind::Constructor); let callable_impl = self.generate_trait_impls_for_callable(cws); let (input_bindings, inputs_as_tuple_or_wildcard) = Self::generate_input_bindings(constructor); let as_trait = cws.item_impl().trait_path().map(|trait_path| { quote_spanned!(constructor_span => as #trait_path ) }); let constructor_impl = quote_spanned!(constructor_span => impl ::ink_lang::Constructor for #namespace::<#selector_id> { const CALLABLE: fn( <Self as ::ink_lang::FnInput>::Input ) -> <Self as ::ink_lang::FnState>::State = |#inputs_as_tuple_or_wildcard| { <#storage_ident #as_trait>::#constructor_ident(#( #input_bindings ),* ) }; } ); quote_spanned!(constructor_span => #callable_impl #constructor_impl ) } /// Generate all dispatch trait implementations for ink! messages and ink! constructors. fn generate_dispatch_trait_impls(&self) -> TokenStream2 { let message_impls = self .contract_messages() .map(|message| self.generate_trait_impls_for_message(message)); let constructor_impls = self .contract_constructors() .map(|constructor| self.generate_trait_impls_for_constructor(constructor)); quote! { #( #message_impls )* #( #constructor_impls )* } } /// Generates variant identifiers for the generated dispatch enum. /// /// Since we want to avoid generating random names we generate identifiers /// in terms of the selectors of the associated ink! messages or constructors. /// /// ## Example /// /// Given prefix of `"Message"` and selector with bytes `0xDEADBEEF` we /// generate the following identifier: `__ink_Message_0xdeadbeef` /// /// This way it is clear that this is an ink! generated identifier and even /// encodes the unique selector bytes to make the identifier unique. fn generate_dispatch_variant_ident<C>( &self, cws: ir::CallableWithSelector<'_, C>, ) -> Ident where C: ir::Callable, { let selector_bytes = cws.composed_selector().as_bytes().to_owned(); let prefix = match cws.callable().kind() { ir::CallableKind::Message => "Message", ir::CallableKind::Constructor => "Constructor", }; quote::format_ident!( "__ink_{}_0x{:02X}{:02X}{:02X}{:02X}", prefix, selector_bytes[0], selector_bytes[1], selector_bytes[2], selector_bytes[3] ) } /// Generates one match arm of the dispatch `scale::Decode` implementation. /// /// # Note /// /// There is one match arm per ink! constructor or message for the dispatch /// `scale::Decode` implementation. fn generate_dispatch_variant_decode<C>( &self, cws: ir::CallableWithSelector<'_, C>, ) -> TokenStream2 where C: ir::Callable, { let selector_bytes = cws.composed_selector().hex_lits(); let variant_ident = self.generate_dispatch_variant_ident(cws); let variant_types = cws.callable().inputs().map(|arg| &arg.ty); quote! { [ #( #selector_bytes ),* ] => { ::core::result::Result::Ok(Self::#variant_ident( #( <#variant_types as ::scale::Decode>::decode(input)? ),* )) } } } /// Generates one match arm of the dispatch variant enum. /// /// # Note /// /// There is one match arm per ink! constructor or message for the dispatch /// `scale::Decode` implementation. fn generate_dispatch_variant_arm<C>( &self, cws: ir::CallableWithSelector<'_, C>, ) -> TokenStream2 where C: ir::Callable, { let input_types = cws.callable().inputs().map(|arg| &arg.ty); let variant_ident = self.generate_dispatch_variant_ident(cws); quote! { #variant_ident(#(#input_types),*) } } /// Returns `true` if all ink! messages of `self` deny payments. /// /// # Note /// /// This information is used to produce better code in this scenario. fn all_messages_deny_payment(&self) -> bool { self.contract .module() .impls() .flat_map(ir::ItemImpl::iter_messages) .all(|message| !message.is_payable()) } /// Generates one match arm of the dispatch message for the `execute` implementation. /// /// # Note /// /// This is basically the code per ink! message that is going to be executed after /// the dispatch has already taken place. fn generate_dispatch_execute_message_arm( &self, cws: ir::CallableWithSelector<'_, ir::Message>, ) -> TokenStream2 { let storage_ident = self.contract.module().storage().ident(); let ident = self.generate_dispatch_variant_ident(cws); let message = cws.callable(); let arg_pats = message.inputs().map(|arg| &arg.pat).collect::<Vec<_>>(); let arg_inputs = if arg_pats.len() == 1 { quote! { #(#arg_pats),* } } else { quote! { ( #(#arg_pats),* ) } }; let (mut_mod, msg_trait, exec_fn) = match message.receiver() { ir::Receiver::RefMut => { ( Some(quote! { mut }), quote! { MessageMut }, quote! { execute_message_mut }, ) } ir::Receiver::Ref => { (None, quote! { MessageRef }, quote! { execute_message }) } }; let selector_id = cws.composed_selector().into_be_u32().hex_padded_suffixed(); let namespace = Self::dispatch_trait_impl_namespace(ir::CallableKind::Message); // If all ink! messages deny payment we can move the payment check to before // the message dispatch which is more efficient. let accepts_payments = cws.is_payable() || self.all_messages_deny_payment(); let is_dynamic_storage_allocation_enabled = self .contract .config() .is_dynamic_storage_allocator_enabled(); quote! { Self::#ident(#(#arg_pats),*) => { ::ink_lang::#exec_fn::<<#storage_ident as ::ink_lang::ContractEnv>::Env, #namespace::<#selector_id>, _>( ::ink_lang::AcceptsPayments(#accepts_payments), ::ink_lang::EnablesDynamicStorageAllocator(#is_dynamic_storage_allocation_enabled), move |state: &#mut_mod #storage_ident| { <#namespace::<#selector_id> as ::ink_lang::#msg_trait>::CALLABLE( state, #arg_inputs ) } ) } } } /// Returns an iterator over all ink! messages of the ink! contract. fn contract_messages( &self, ) -> impl Iterator<Item = ir::CallableWithSelector<ir::Message>> { self.contract .module() .impls() .map(|impl_item| impl_item.iter_messages()) .flatten() } /// Generates the entire dispatch variant enum for all ink! messages. fn generate_message_dispatch_enum(&self) -> TokenStream2 { let storage_ident = self.contract.module().storage().ident(); let message_variants = self .contract_messages() .map(|message| self.generate_dispatch_variant_arm(message)); let decode_message = self .contract_messages() .map(|message| self.generate_dispatch_variant_decode(message)); let execute_variants = self .contract_messages() .map(|message| self.generate_dispatch_execute_message_arm(message)); quote! { const _: () = { #[doc(hidden)] pub enum __ink_MessageDispatchEnum { #( #message_variants ),* } impl ::ink_lang::MessageDispatcher for #storage_ident { type Type = __ink_MessageDispatchEnum; } impl ::scale::Decode for __ink_MessageDispatchEnum { fn decode<I: ::scale::Input>(input: &mut I) -> ::core::result::Result<Self, ::scale::Error> { match <[::core::primitive::u8; 4usize] as ::scale::Decode>::decode(input)? { #( #decode_message )* _invalid => ::core::result::Result::Err( <::scale::Error as ::core::convert::From<&'static ::core::primitive::str>>::from( "encountered unknown ink! message selector" ) ) } } } impl ::ink_lang::Execute for __ink_MessageDispatchEnum { fn execute(self) -> ::core::result::Result<(), ::ink_lang::DispatchError> { match self { #( #execute_variants )* } } } }; } } /// Generates one match arm of the dispatch constructor for the `execute` implementation. /// /// # Note /// /// This is basically the code per ink! constructor that is going to be executed after /// the dispatch has already taken place. fn generate_dispatch_execute_constructor_arm( &self, cws: ir::CallableWithSelector<'_, ir::Constructor>, ) -> TokenStream2 { let ident = self.generate_dispatch_variant_ident(cws); let constructor = cws.callable(); let arg_pats = constructor.inputs().map(|arg| &arg.pat).collect::<Vec<_>>(); let arg_inputs = if arg_pats.len() == 1 { quote! { #(#arg_pats),* } } else { quote! { ( #(#arg_pats),* ) } }; let selector_id = cws.composed_selector().into_be_u32().hex_padded_suffixed(); let namespace = Self::dispatch_trait_impl_namespace(ir::CallableKind::Constructor); let is_dynamic_storage_allocation_enabled = self .contract .config() .is_dynamic_storage_allocator_enabled(); quote! { Self::#ident(#(#arg_pats),*) => { ::ink_lang::execute_constructor::<#namespace::<#selector_id>, _>( ::ink_lang::EnablesDynamicStorageAllocator(#is_dynamic_storage_allocation_enabled), move || { <#namespace::<#selector_id> as ::ink_lang::Constructor>::CALLABLE( #arg_inputs ) } ) } } } /// Returns an iterator over all ink! constructors of the ink! contract. fn contract_constructors( &self, ) -> impl Iterator<Item = ir::CallableWithSelector<ir::Constructor>> { self.contract .module() .impls() .map(|impl_item| impl_item.iter_constructors()) .flatten() } /// Generates the entire dispatch variant enum for all ink! messages. fn generate_constructor_dispatch_enum(&self) -> TokenStream2 { let storage_ident = self.contract.module().storage().ident(); let message_variants = self .contract_constructors() .map(|message| self.generate_dispatch_variant_arm(message)); let decode_message = self .contract_constructors() .map(|message| self.generate_dispatch_variant_decode(message)); let execute_variants = self .contract_constructors() .map(|cws| self.generate_dispatch_execute_constructor_arm(cws)); quote! { const _: () = { #[doc(hidden)] pub enum __ink_ConstructorDispatchEnum { #( #message_variants ),* } impl ::ink_lang::ConstructorDispatcher for #storage_ident { type Type = __ink_ConstructorDispatchEnum; } impl ::scale::Decode for __ink_ConstructorDispatchEnum { fn decode<I: ::scale::Input>(input: &mut I) -> ::core::result::Result<Self, ::scale::Error> { match <[::core::primitive::u8; 4usize] as ::scale::Decode>::decode(input)? { #( #decode_message )* _invalid => ::core::result::Result::Err( <::scale::Error as ::core::convert::From<&'static ::core::primitive::str>>::from( "encountered unknown ink! constructor selector" ) ) } } } impl ::ink_lang::Execute for __ink_ConstructorDispatchEnum { fn execute(self) -> ::core::result::Result<(), ::ink_lang::DispatchError> { match self { #( #execute_variants )* } } } }; } } }
41.212766
123
0.543177
507b0bfd3d3c6805bd8264c66eef47b005574a6c
1,615
#![no_std] #![no_main] extern crate itsybitsy_m0 as hal; extern crate panic_halt; use hal::clock::GenericClockController; use hal::delay::Delay; use hal::entry; use hal::pac::{CorePeripherals, Peripherals}; use hal::prelude::*; use hal::timer::SpinTimer; use smart_leds::{hsv::RGB8, SmartLedsWrite}; fn rgb_wheel(position: u8) -> RGB8 { match position { 0..=85 => { RGB8 { r: (255 - position * 3), g: (position * 3), b: 0 } } 86..=170 => { let position = position - 85; RGB8 { r: 0, g: (255 - position * 3), b: (position * 3) } } _ => { let position = position - 170; RGB8 { r: (position * 3), g: 0, b: (255 - position * 3) } } } } #[entry] fn main() -> ! { let mut peripherals = Peripherals::take().unwrap(); let core = CorePeripherals::take().unwrap(); let mut clocks = GenericClockController::with_internal_32kosc( peripherals.GCLK, &mut peripherals.PM, &mut peripherals.SYSCTRL, &mut peripherals.NVMCTRL, ); let mut delay = Delay::new(core.SYST, &mut clocks); let mut pins = hal::Pins::new(peripherals.PORT).split(); let mut rgb = pins.dotstar.init(SpinTimer::new(12), &mut pins.port); let mut val = 0; loop { // Can't use the modulo operator on a u8 with an overflowing u8 or with a u16 val = match val { 255 => 0, _ => val + 1 }; let color: [RGB8; 1] = [rgb_wheel(val)]; rgb.write(color.iter().cloned()).unwrap(); delay.delay_ms(60u8); } }
26.47541
85
0.555418
9ca77637ff7802a56b1c45fa60334d7626ab8870
16,404
/*! Representations for the space usage of various parts of a Tantivy index. This can be used programmatically, and will also be exposed in a human readable fashion in tantivy-cli. One important caveat for all of this functionality is that none of it currently takes storage-level details into consideration. For example, if your file system block size is 4096 bytes, we can under-count actual resultant space usage by up to 4095 bytes per file. */ use schema::Field; use std::collections::HashMap; use SegmentComponent; /// Indicates space usage in bytes pub type ByteCount = usize; /// Enum containing any of the possible space usage results for segment components. pub enum ComponentSpaceUsage { /// Data is stored per field in a uniform way PerField(PerFieldSpaceUsage), /// Data is stored in separate pieces in the store Store(StoreSpaceUsage), /// Some sort of raw byte count Basic(ByteCount), } /// Represents combined space usage of an entire searcher and its component segments. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SearcherSpaceUsage { segments: Vec<SegmentSpaceUsage>, total: ByteCount, } impl SearcherSpaceUsage { pub(crate) fn new() -> SearcherSpaceUsage { SearcherSpaceUsage { segments: Vec::new(), total: 0, } } /// Add a segment, to `self`. /// Performs no deduplication or other intelligence. pub(crate) fn add_segment(&mut self, segment: SegmentSpaceUsage) { self.total += segment.total(); self.segments.push(segment); } /// Per segment space usage pub fn segments(&self) -> &[SegmentSpaceUsage] { &self.segments[..] } /// Returns total byte usage of this searcher, including all large subcomponents. /// Does not account for smaller things like `meta.json`. pub fn total(&self) -> ByteCount { self.total } } /// Represents combined space usage for all of the large components comprising a segment. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct SegmentSpaceUsage { num_docs: u32, termdict: PerFieldSpaceUsage, postings: PerFieldSpaceUsage, positions: PerFieldSpaceUsage, positions_idx: PerFieldSpaceUsage, fast_fields: PerFieldSpaceUsage, fieldnorms: PerFieldSpaceUsage, store: StoreSpaceUsage, deletes: ByteCount, total: ByteCount, } impl SegmentSpaceUsage { #[allow(clippy::too_many_arguments)] pub(crate) fn new( num_docs: u32, termdict: PerFieldSpaceUsage, postings: PerFieldSpaceUsage, positions: PerFieldSpaceUsage, positions_idx: PerFieldSpaceUsage, fast_fields: PerFieldSpaceUsage, fieldnorms: PerFieldSpaceUsage, store: StoreSpaceUsage, deletes: ByteCount, ) -> SegmentSpaceUsage { let total = termdict.total() + postings.total() + positions.total() + fast_fields.total() + fieldnorms.total() + store.total() + deletes; SegmentSpaceUsage { num_docs, termdict, postings, positions, positions_idx, fast_fields, fieldnorms, store, deletes, total, } } /// Space usage for the given component /// /// Clones the underlying data. /// Use the components directly if this is somehow in performance critical code. pub fn component(&self, component: SegmentComponent) -> ComponentSpaceUsage { use self::ComponentSpaceUsage::*; use SegmentComponent::*; match component { POSTINGS => PerField(self.postings().clone()), POSITIONS => PerField(self.positions().clone()), POSITIONSSKIP => PerField(self.positions_skip_idx().clone()), FASTFIELDS => PerField(self.fast_fields().clone()), FIELDNORMS => PerField(self.fieldnorms().clone()), TERMS => PerField(self.termdict().clone()), STORE => Store(self.store().clone()), DELETE => Basic(self.deletes()), } } /// Num docs in segment pub fn num_docs(&self) -> u32 { self.num_docs } /// Space usage for term dictionary pub fn termdict(&self) -> &PerFieldSpaceUsage { &self.termdict } /// Space usage for postings list pub fn postings(&self) -> &PerFieldSpaceUsage { &self.postings } /// Space usage for positions pub fn positions(&self) -> &PerFieldSpaceUsage { &self.positions } /// Space usage for positions skip idx pub fn positions_skip_idx(&self) -> &PerFieldSpaceUsage { &self.positions_idx } /// Space usage for fast fields pub fn fast_fields(&self) -> &PerFieldSpaceUsage { &self.fast_fields } /// Space usage for field norms pub fn fieldnorms(&self) -> &PerFieldSpaceUsage { &self.fieldnorms } /// Space usage for stored documents pub fn store(&self) -> &StoreSpaceUsage { &self.store } /// Space usage for document deletions pub fn deletes(&self) -> ByteCount { self.deletes } /// Total space usage in bytes for this segment. pub fn total(&self) -> ByteCount { self.total } } /// Represents space usage for the Store for this segment. /// /// This is composed of two parts. /// `data` represents the compressed data itself. /// `offsets` represents a lookup to find the start of a block #[derive(Clone, Debug, Serialize, Deserialize)] pub struct StoreSpaceUsage { data: ByteCount, offsets: ByteCount, } impl StoreSpaceUsage { pub(crate) fn new(data: ByteCount, offsets: ByteCount) -> StoreSpaceUsage { StoreSpaceUsage { data, offsets } } /// Space usage for the data part of the store pub fn data_usage(&self) -> ByteCount { self.data } /// Space usage for the offsets part of the store (doc ID -> offset) pub fn offsets_usage(&self) -> ByteCount { self.offsets } /// Total space usage in bytes for this Store pub fn total(&self) -> ByteCount { self.data + self.offsets } } /// Represents space usage for all of the (field, index) pairs that appear in a CompositeFile. /// /// A field can appear with a single index (typically 0) or with multiple indexes. /// Multiple indexes are used to handle variable length things, where #[derive(Clone, Debug, Serialize, Deserialize)] pub struct PerFieldSpaceUsage { fields: HashMap<Field, FieldUsage>, total: ByteCount, } impl PerFieldSpaceUsage { pub(crate) fn new(fields: HashMap<Field, FieldUsage>) -> PerFieldSpaceUsage { let total = fields.values().map(|x| x.total()).sum(); PerFieldSpaceUsage { fields, total } } /// Per field space usage pub fn fields(&self) -> impl Iterator<Item = (&Field, &FieldUsage)> { self.fields.iter() } /// Bytes used by the represented file pub fn total(&self) -> ByteCount { self.total } } /// Represents space usage of a given field, breaking it down into the (field, index) pairs that /// comprise it. /// /// See documentation for PerFieldSpaceUsage for slightly more information. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct FieldUsage { field: Field, num_bytes: ByteCount, /// A field can be composed of more than one piece. /// These pieces are indexed by arbitrary numbers starting at zero. /// `self.num_bytes` includes all of `self.sub_num_bytes`. sub_num_bytes: Vec<Option<ByteCount>>, } impl FieldUsage { pub(crate) fn empty(field: Field) -> FieldUsage { FieldUsage { field, num_bytes: 0, sub_num_bytes: Vec::new(), } } pub(crate) fn add_field_idx(&mut self, idx: usize, size: ByteCount) { if self.sub_num_bytes.len() < idx + 1 { self.sub_num_bytes.resize(idx + 1, None); } assert!(self.sub_num_bytes[idx].is_none()); self.sub_num_bytes[idx] = Some(size); self.num_bytes += size } /// Field pub fn field(&self) -> Field { self.field } /// Space usage for each index pub fn sub_num_bytes(&self) -> &[Option<ByteCount>] { &self.sub_num_bytes[..] } /// Total bytes used for this field in this context pub fn total(&self) -> ByteCount { self.num_bytes } } #[cfg(test)] mod test { use core::Index; use schema::Field; use schema::Schema; use schema::STORED; use schema::{FAST, INT_INDEXED, TEXT}; use space_usage::ByteCount; use space_usage::PerFieldSpaceUsage; use Term; #[test] fn test_empty() { let schema = Schema::builder().build(); let index = Index::create_in_ram(schema.clone()); index.load_searchers().unwrap(); let searcher = index.searcher(); let searcher_space_usage = searcher.space_usage(); assert_eq!(0, searcher_space_usage.total()); } fn expect_single_field( field_space: &PerFieldSpaceUsage, field: &Field, min_size: ByteCount, max_size: ByteCount, ) { assert!(field_space.total() >= min_size); assert!(field_space.total() <= max_size); assert_eq!( vec![(field, field_space.total())], field_space .fields() .map(|(x, y)| (x, y.total())) .collect::<Vec<_>>() ); } #[test] fn test_fast_indexed() { let mut schema_builder = Schema::builder(); let name = schema_builder.add_u64_field("name", FAST | INT_INDEXED); let schema = schema_builder.build(); let index = Index::create_in_ram(schema.clone()); { let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); index_writer.add_document(doc!(name => 1u64)); index_writer.add_document(doc!(name => 2u64)); index_writer.add_document(doc!(name => 10u64)); index_writer.add_document(doc!(name => 20u64)); index_writer.commit().unwrap(); } index.load_searchers().unwrap(); let searcher = index.searcher(); let searcher_space_usage = searcher.space_usage(); assert!(searcher_space_usage.total() > 0); assert_eq!(1, searcher_space_usage.segments().len()); let segment = &searcher_space_usage.segments()[0]; assert!(segment.total() > 0); assert_eq!(4, segment.num_docs()); expect_single_field(segment.termdict(), &name, 1, 512); expect_single_field(segment.postings(), &name, 1, 512); assert_eq!(0, segment.positions().total()); assert_eq!(0, segment.positions_skip_idx().total()); expect_single_field(segment.fast_fields(), &name, 1, 512); expect_single_field(segment.fieldnorms(), &name, 1, 512); // TODO: understand why the following fails // assert_eq!(0, segment.store().total()); assert_eq!(0, segment.deletes()); } #[test] fn test_text() { let mut schema_builder = Schema::builder(); let name = schema_builder.add_text_field("name", TEXT); let schema = schema_builder.build(); let index = Index::create_in_ram(schema.clone()); { let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); index_writer.add_document(doc!(name => "hi")); index_writer.add_document(doc!(name => "this is a test")); index_writer.add_document( doc!(name => "some more documents with some word overlap with the other test"), ); index_writer.add_document(doc!(name => "hello hi goodbye")); index_writer.commit().unwrap(); } index.load_searchers().unwrap(); let searcher = index.searcher(); let searcher_space_usage = searcher.space_usage(); assert!(searcher_space_usage.total() > 0); assert_eq!(1, searcher_space_usage.segments().len()); let segment = &searcher_space_usage.segments()[0]; assert!(segment.total() > 0); assert_eq!(4, segment.num_docs()); expect_single_field(segment.termdict(), &name, 1, 512); expect_single_field(segment.postings(), &name, 1, 512); expect_single_field(segment.positions(), &name, 1, 512); expect_single_field(segment.positions_skip_idx(), &name, 1, 512); assert_eq!(0, segment.fast_fields().total()); expect_single_field(segment.fieldnorms(), &name, 1, 512); // TODO: understand why the following fails // assert_eq!(0, segment.store().total()); assert_eq!(0, segment.deletes()); } #[test] fn test_store() { let mut schema_builder = Schema::builder(); let name = schema_builder.add_text_field("name", STORED); let schema = schema_builder.build(); let index = Index::create_in_ram(schema.clone()); { let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); index_writer.add_document(doc!(name => "hi")); index_writer.add_document(doc!(name => "this is a test")); index_writer.add_document( doc!(name => "some more documents with some word overlap with the other test"), ); index_writer.add_document(doc!(name => "hello hi goodbye")); index_writer.commit().unwrap(); } index.load_searchers().unwrap(); let searcher = index.searcher(); let searcher_space_usage = searcher.space_usage(); assert!(searcher_space_usage.total() > 0); assert_eq!(1, searcher_space_usage.segments().len()); let segment = &searcher_space_usage.segments()[0]; assert!(segment.total() > 0); assert_eq!(4, segment.num_docs()); assert_eq!(0, segment.termdict().total()); assert_eq!(0, segment.postings().total()); assert_eq!(0, segment.positions().total()); assert_eq!(0, segment.positions_skip_idx().total()); assert_eq!(0, segment.fast_fields().total()); assert_eq!(0, segment.fieldnorms().total()); assert!(segment.store().total() > 0); assert!(segment.store().total() < 512); assert_eq!(0, segment.deletes()); } #[test] fn test_deletes() { let mut schema_builder = Schema::builder(); let name = schema_builder.add_u64_field("name", INT_INDEXED); let schema = schema_builder.build(); let index = Index::create_in_ram(schema.clone()); { let mut index_writer = index.writer_with_num_threads(1, 3_000_000).unwrap(); index_writer.add_document(doc!(name => 1u64)); index_writer.add_document(doc!(name => 2u64)); index_writer.add_document(doc!(name => 3u64)); index_writer.add_document(doc!(name => 4u64)); index_writer.commit().unwrap(); } { let mut index_writer2 = index.writer(50_000_000).unwrap(); index_writer2.delete_term(Term::from_field_u64(name, 2u64)); index_writer2.delete_term(Term::from_field_u64(name, 3u64)); // ok, now we should have a deleted doc index_writer2.commit().unwrap(); } index.load_searchers().unwrap(); let searcher = index.searcher(); let searcher_space_usage = searcher.space_usage(); assert!(searcher_space_usage.total() > 0); assert_eq!(1, searcher_space_usage.segments().len()); let segment = &searcher_space_usage.segments()[0]; assert!(segment.total() > 0); assert_eq!(2, segment.num_docs()); expect_single_field(segment.termdict(), &name, 1, 512); expect_single_field(segment.postings(), &name, 1, 512); assert_eq!(0, segment.positions().total()); assert_eq!(0, segment.positions_skip_idx().total()); assert_eq!(0, segment.fast_fields().total()); expect_single_field(segment.fieldnorms(), &name, 1, 512); // TODO: understand why the following fails // assert_eq!(0, segment.store().total()); assert!(segment.deletes() > 0); } }
32.939759
99
0.617471
6120cab253daf2929afaf2b4f1f4412e1c5f600c
1,663
use jsonwebtoken::{Algorithm, decode, DecodingKey, Validation}; use serde::Deserialize; use crate::authentication::error::error_type::AuthenticationError; use crate::authentication::scheme::bearer::jwt::token::Claims; use crate::authentication::scheme::bearer::jwt::token::decoder::TokenDecoder; pub trait RsaKeyComponents { fn get_n(&self) -> String; fn get_e(&self) -> String; } #[derive(Clone)] pub struct RsaJwtDecoder { algorithm: Algorithm, decoding_keys: Vec<DecodingKey<'static>>, } impl RsaJwtDecoder { pub fn new(algorithm: Algorithm, rsa_keys: Vec<Box<dyn RsaKeyComponents>>) -> RsaJwtDecoder { let mut decoding_keys: Vec<DecodingKey<'static>> = Vec::new(); for rsa_key in &rsa_keys { let n: String = rsa_key.get_n(); let e: String = rsa_key.get_e(); // It is important to call into_static(). Otherwise there are problems with the lifetimes of n and e. let decoding_key = DecodingKey::from_rsa_components(n.as_ref(), e.as_ref()).into_static(); decoding_keys.push(decoding_key); } RsaJwtDecoder { algorithm, decoding_keys, } } } impl<T: for<'b> Deserialize<'b> + Claims> TokenDecoder<T> for RsaJwtDecoder { fn decode_token(&self, token: &str) -> Result<Box<T>, AuthenticationError> { for key in &self.decoding_keys { let result = decode::<T>(token, key, &Validation::new(self.algorithm)); if let Ok(decoded_token) = result { return Ok(Box::new(decoded_token.claims)); } } Err(AuthenticationError::InvalidToken) } }
34.645833
113
0.64101
fb81651ac6fe92cb1b0309eab22393cc895203c2
38,554
#[cfg(feature = "std")] use core::f64; use core::i64; use anyhow::Result; use crate::{RoundMode::*, *}; mod macros; #[test] fn from_decimal() -> Result<()> { test_fixed_point! { case (numerator | Layout, denominator | i32, expected | FixedPoint) => { assert_eq!(FixedPoint::from_decimal(numerator, denominator)?, expected); }, all { (5_000_000_000, -9, fp!(5)); (1, 0, fp!(1)); (1, 1, fp!(10)); }, fp128 { (5_000_000_000_000_000_000, -18, fp!(5)); }, }; Ok(()) } #[test] #[cfg(feature = "std")] fn display() -> Result<()> { test_fixed_point! { case (x | FixedPoint, expected | &str) => { assert_eq!(format!("{}", x), String::from(expected)); #[cfg(feature = "serde")] assert_eq!(serde_json::to_string(&x).unwrap(), format!("\"{}\"", expected)); }, all { (fp!(0), "0.0"); (fp!(10.042), "10.042"); (fp!(-10.042), "-10.042"); (fp!(0.000000001), "0.000000001"); (fp!(-0.000000001), "-0.000000001"); (fp!(9223372036.854775807), "9223372036.854775807"); (fp!(-9223372036.854775808), "-9223372036.854775808"); }, fp128 { (fp!(0.000000000000000001), "0.000000000000000001"); (fp!(-0.000000000000000001), "-0.000000000000000001"); (fp!(170141183460469231731.687303715884105727), "170141183460469231731.687303715884105727"); (fp!(-170141183460469231731.687303715884105728), "-170141183460469231731.687303715884105728"); }, }; Ok(()) } #[test] #[allow(overflowing_literals)] fn from_good_str() -> Result<()> { test_fixed_point! { case (input | &str, expected | Layout) => { let expected = FixedPoint::from_bits(expected); let input: FixedPoint = input.parse()?; assert_eq!(input, expected); #[cfg(all(feature = "std", feature = "serde"))] assert_eq!( serde_json::from_str::<FixedPoint>(&format!("\"{}\"", input)).unwrap(), expected ); }, fp64 { ("1", 1000000000); ("1.1", 1100000000); ("1.02", 1020000000); ("-1.02", -1020000000); ("+1.02", 1020000000); ("0.1234", 123400000); ("-0.1234", -123400000); ("123456789.123456789", 123456789123456789); ("9223372036.854775807", 9223372036854775807); ("-9223372036.854775808", -9223372036854775808); }, fp128 { ("1", 1000000000000000000); ("1.1", 1100000000000000000); ("1.02", 1020000000000000000); ("-1.02", -1020000000000000000); ("+1.02", 1020000000000000000); ("0.1234", 123400000000000000); ("-0.1234", -123400000000000000); ("123456789.123456789", 123456789123456789000000000); ("9223372036.854775807", 9223372036854775807000000000); ("-9223372036.854775808", -9223372036854775808000000000); ("170141183460469231731.687303715884105727", 170141183460469231731687303715884105727); ("-170141183460469231731.687303715884105728", -170141183460469231731687303715884105728); }, }; Ok(()) } #[test] fn from_bad_str() -> Result<()> { test_fixed_point! { case (bad_str | &str) => { let result: Result<FixedPoint, ConvertError> = bad_str.parse(); assert!(result.is_err(), "must not parse '{}'", bad_str); #[cfg(all(feature = "std", feature = "serde"))] assert!(serde_json::from_str::<FixedPoint>(&format!("\"{}\"", bad_str)).is_err()); }, all { (""); ("7.02e5"); ("a.12"); ("12.a"); ("13.9999999999999999999999999999999999999999999999999999999999999"); ("100000000000000000000000"); ("170141183460469231731687303715.884105728"); ("13.0000000000000000001"); ("13.1000000000000000001"); ("9223372036.8547758204856183567"); }, fp64 { ("13.0000000001"); ("13.1000000001"); ("9223372036.854775808"); }, }; Ok(()) } #[test] #[cfg(all(feature = "std", feature = "serde"))] fn serde_with() -> Result<()> { test_fixed_point! { case (input | f64, expected | FixedPoint) => { #[derive(::serde::Serialize, ::serde::Deserialize)] struct Struct { #[serde(with = "crate::serde::as_f64")] number: FixedPoint, } let actual = serde_json::from_str::<Struct>(&format!(r#"{{"number":{}}}"#, input)).unwrap().number; assert_eq!(expected, actual); }, all { (1., fp!(1.0)); (1.1, fp!(1.1)); (1.02, fp!(1.02)); (-1.02, fp!(-1.02)); (0.1234, fp!(0.1234)); (-0.1234, fp!(-0.1234)); }, }; Ok(()) } #[test] #[allow(clippy::assertions_on_constants)] fn exp_and_coef_should_agree() -> Result<()> { test_fixed_point! { case () => { assert!(FixedPoint::PRECISION > 0); const TEN: Layout = 10; assert_eq!(FixedPoint::COEF, TEN.pow(FixedPoint::PRECISION as u32)); }, }; Ok(()) } #[test] fn cmul_overflow() -> Result<()> { test_fixed_point! { case () => { let result = FixedPoint::MAX.cmul(Layout::MAX); assert_eq!(result, Err(ArithmeticError::Overflow)); let result = FixedPoint::MAX.cmul(Layout::MIN); assert_eq!(result, Err(ArithmeticError::Overflow)); }, }; Ok(()) } #[test] fn rmul_exact() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { // Check the result assert_eq!(a.rmul(b, Floor)?, expected); // Check the commutative property assert_eq!(b.rmul(a, Floor)?, expected); // Check that round mode doesn't matter assert_eq!(a.rmul(b, Ceil)?, expected); assert_eq!(b.rmul(a, Ceil)?, expected); }, all { (fp!(525), fp!(10), fp!(5250)); (fp!(-525), fp!(10), fp!(-5250)); (fp!(-525), fp!(-10), fp!(5250)); (fp!(525), fp!(-10), fp!(-5250)); (fp!(525), fp!(0.0001), fp!(0.0525)); (fp!(-525), fp!(0.0001), fp!(-0.0525)); (fp!(-525), fp!(-0.0001), fp!(0.0525)); (FixedPoint::MAX, FixedPoint::ONE, FixedPoint::MAX); (FixedPoint::MIN, FixedPoint::ONE, FixedPoint::MIN); (FixedPoint::ONE, fp!(0.000000001), fp!(0.000000001)); (fp!(-1), fp!(-0.000000001), fp!(0.000000001)); ( FixedPoint::from_bits(Layout::MAX / 10 * 10), fp!(0.1), FixedPoint::from_bits(Layout::MAX / 10), ); ( FixedPoint::from_bits(Layout::MIN / 10 * 10), fp!(0.1), FixedPoint::from_bits(Layout::MIN / 10), ); }, fp128 { (fp!(13043817825.332782), fp!(13043817825.332782), fp!(170141183460469226191.989043859524)); }, }; Ok(()) } #[test] fn rmul_round() -> Result<()> { test_fixed_point! { case ( a | FixedPoint, b | FixedPoint, expected_floor | FixedPoint, expected_ceil | FixedPoint, ) => { // Check the result assert_eq!(a.rmul(b, Floor)?, expected_floor); assert_eq!(a.rmul(b, Ceil)?, expected_ceil); // Check the commutative property assert_eq!(b.rmul(a, Floor)?, expected_floor); assert_eq!(b.rmul(a, Ceil)?, expected_ceil); // Arguments' negation doesn't change the result assert_eq!(b.cneg()?.rmul(a.cneg()?, Floor)?, expected_floor); assert_eq!(b.cneg()?.rmul(a.cneg()?, Ceil)?, expected_ceil); }, fp64 { (fp!(0.1), fp!(0.000000001), fp!(0), fp!(0.000000001)); (fp!(-0.1), fp!(0.000000001), fp!(-0.000000001), fp!(0)); (fp!(0.000000001), fp!(0.000000001), fp!(0), fp!(0.000000001)); (fp!(-0.000000001), fp!(0.000000001), fp!(-0.000000001), fp!(0)); }, fp128 { (fp!(0.1), fp!(0.000000000000000001), FixedPoint::ZERO, fp!(0.000000000000000001)); (fp!(-0.1), fp!(0.000000000000000001), fp!(-0.000000000000000001), FixedPoint::ZERO); (fp!(0.000000000000000001), fp!(0.000000000000000001), FixedPoint::ZERO, fp!(0.000000000000000001)); (fp!(-0.000000000000000001), fp!(0.000000000000000001), fp!(-0.000000000000000001), FixedPoint::ZERO); }, }; Ok(()) } #[test] fn rmul_overflow() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint) => { assert_eq!(a.rmul(b, Ceil), Err(ArithmeticError::Overflow)); }, all { (FixedPoint::MAX, fp!(1.000000001)); }, fp64 { (fp!(96038.388349945), fp!(96038.388349945)); (fp!(-97000), fp!(96100)) }, fp128 { (FixedPoint::MAX, fp!(1.000000000000000001)); (fp!(13043817825.332783), fp!(13043817825.332783)); (fp!(-13043817826), fp!(13043817826)) }, }; Ok(()) } #[test] fn rdiv_exact() -> Result<()> { test_fixed_point! { case (numerator | FixedPoint, denominator | FixedPoint, expected | FixedPoint) => { assert_eq!(numerator.rdiv(denominator, Ceil)?, expected); assert_eq!(numerator.rdiv(denominator, Floor)?, expected); }, all { (FixedPoint::MAX, FixedPoint::MAX, FixedPoint::ONE); (fp!(5), fp!(2), fp!(2.5)); (fp!(-5), fp!(2), fp!(-2.5)); (fp!(5), fp!(-2), fp!(-2.5)); (fp!(-5), fp!(-2), fp!(2.5)); (fp!(5), fp!(0.2), fp!(25)); (fp!(0.00000001), fp!(10), fp!(0.000000001)); (fp!(0.000000001), fp!(0.1), fp!(0.00000001)); }, fp128 { (fp!(0.00000000000000001), fp!(10), fp!(0.000000000000000001)); (fp!(0.000000000000000001), fp!(0.1), fp!(0.00000000000000001)); }, }; Ok(()) } #[test] fn rdiv_by_layout() -> Result<()> { test_fixed_point! { case ( a | FixedPoint, b | Layout, expected_floor | FixedPoint, expected_ceil | FixedPoint, ) => { assert_eq!(a.rdiv(b, Floor)?, expected_floor); assert_eq!(a.rdiv(b, Ceil)?, expected_ceil); }, all { (fp!(2.4), 2, fp!(1.2), fp!(1.2)); (fp!(0), 5, FixedPoint::ZERO, FixedPoint::ZERO); }, fp64 { (fp!(7), 3, fp!(2.333333333), fp!(2.333333334)); (fp!(-7), 3, fp!(-2.333333334), fp!(-2.333333333)); (fp!(-7), -3, fp!(2.333333333), fp!(2.333333334)); (fp!(7), -3, fp!(-2.333333334), fp!(-2.333333333)); (fp!(0.000000003), 2, fp!(0.000000001), fp!(0.000000002)); (fp!(0.000000003), 7, fp!(0), fp!(0.000000001)); (fp!(0.000000001), 7, fp!(0), fp!(0.000000001)); }, fp128 { (fp!(7), 3, fp!(2.333333333333333333), fp!(2.333333333333333334)); (fp!(-7), 3, fp!(-2.333333333333333334), fp!(-2.333333333333333333)); (fp!(-7), -3, fp!(2.333333333333333333), fp!(2.333333333333333334)); (fp!(7), -3, fp!(-2.333333333333333334), fp!(-2.333333333333333333)); (fp!(0.000000000000000003), 2, fp!(0.000000000000000001), fp!(0.000000000000000002)); (fp!(0.000000000000000003), 7, fp!(0), fp!(0.000000000000000001)); (fp!(0.000000000000000001), 7, fp!(0), fp!(0.000000000000000001)); }, }; Ok(()) } #[test] fn rdiv_round() -> Result<()> { test_fixed_point! { case ( numerator | FixedPoint, denominator | FixedPoint, expected_ceil | FixedPoint, expected_floor | FixedPoint, ) => { assert_eq!(numerator.rdiv(denominator, Ceil)?, expected_ceil); assert_eq!(numerator.rdiv(denominator, Floor)?, expected_floor); }, fp64 { (fp!(100), fp!(3), fp!(33.333333334), fp!(33.333333333)); (fp!(-100), fp!(-3), fp!(33.333333334), fp!(33.333333333)); (fp!(-100), fp!(3), fp!(-33.333333333), fp!(-33.333333334)); (fp!(100), fp!(-3), fp!(-33.333333333), fp!(-33.333333334)); }, fp128 { (fp!(100), fp!(3), fp!(33.333333333333333334), fp!(33.333333333333333333)); (fp!(-100), fp!(-3), fp!(33.333333333333333334), fp!(33.333333333333333333)); (fp!(-100), fp!(3), fp!(-33.333333333333333333), fp!(-33.333333333333333334)); (fp!(100), fp!(-3), fp!(-33.333333333333333333), fp!(-33.333333333333333334)); }, }; Ok(()) } #[test] fn rdiv_layout() -> Result<()> { test_fixed_point! { case ( a | Layout, b | Layout, expected_floor | Layout, expected_ceil | Layout, ) => { assert_eq!(a.rdiv(b, Floor)?, expected_floor); assert_eq!(a.rdiv(b, Ceil)?, expected_ceil); assert_eq!(a.rdiv(-b, Floor)?, -expected_ceil); assert_eq!((-a).rdiv(b, Floor)?, -expected_ceil); assert_eq!(a.rdiv(-b, Ceil)?, -expected_floor); assert_eq!((-a).rdiv(b, Ceil)?, -expected_floor); assert_eq!((-a).rdiv(-b, Floor)?, expected_floor); assert_eq!((-a).rdiv(-b, Ceil)?, expected_ceil); }, all { (5, 2, 2, 3); (0, 5, 0, 0); }, }; Ok(()) } #[test] fn rdiv_division_by_zero() -> Result<()> { test_fixed_point! { case (x | FixedPoint) => { let expected = Err(ArithmeticError::DivisionByZero); assert_eq!(x.rdiv(FixedPoint::ZERO, Floor), expected); assert_eq!(x.rdiv(FixedPoint::ZERO, Ceil), expected); }, all { (fp!(0)); (fp!(1)); (fp!(-1)); (FixedPoint::MAX); (FixedPoint::MIN); }, }; Ok(()) } #[test] fn rdiv_overflow() -> Result<()> { test_fixed_point! { case (denominator | FixedPoint) => { assert_eq!( FixedPoint::MAX.rdiv(denominator, Ceil), Err(ArithmeticError::Overflow) ); }, all { (fp!(0.999999999)); }, fp128 { (fp!(0.999999999999999999)); }, }; Ok(()) } #[test] fn float_mul() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(a.rmul(b, Ceil)?, expected); }, all { (fp!(525), fp!(10), fp!(5250)); (fp!(525), fp!(0.0001), fp!(0.0525)); (FixedPoint::MAX, FixedPoint::ONE, FixedPoint::MAX); ( FixedPoint::from_bits(Layout::MAX / 10 * 10), fp!(0.1), FixedPoint::from_bits(Layout::MAX / 10), ); }, }; Ok(()) } #[test] fn float_mul_overflow() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint) => { assert!(a.rmul(b, Ceil).is_err()); }, fp64 { (fp!(140000), fp!(140000)); (fp!(-140000), fp!(140000)); }, fp128 { (fp!(13043817826), fp!(13043817825)); (fp!(-13043817826), fp!(13043817825)); }, }; Ok(()) } #[test] fn half_sum_exact() -> Result<()> { test_fixed_point! { case (expected | FixedPoint) => { assert_eq!(FixedPoint::half_sum(expected, expected, Floor), expected); assert_eq!(FixedPoint::half_sum(expected, expected, Ceil), expected); }, all { (fp!(0)); (fp!(1)); (fp!(-1)); (FixedPoint::MAX); (FixedPoint::MIN); }, }; test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(FixedPoint::half_sum(a, b, Floor), expected); assert_eq!(FixedPoint::half_sum(b, a, Floor), expected); assert_eq!(FixedPoint::half_sum(a, b, Ceil), expected); assert_eq!(FixedPoint::half_sum(b, a, Ceil), expected); }, all { (fp!(1), fp!(3), fp!(2)); (fp!(1), fp!(2), fp!(1.5)); (fp!(7.123456789), fp!(7.123456783), fp!(7.123456786)); (fp!(9000), fp!(9050), fp!(9025)); (fp!(9000), fp!(-9000), fp!(0)); (fp!(9000000000), fp!(9000000002), fp!(9000000001)); ( fp!(9000000000.000000001), fp!(-9000000000.000000005), fp!(-0.000000002), ); (FixedPoint::MAX, FixedPoint::MIN.cadd(FixedPoint::EPSILON)?, fp!(0)); }, fp128 { (fp!(7.123456789123456789), fp!(7.123456789123456783), fp!(7.123456789123456786)); }, }; Ok(()) } #[test] fn half_sum_rounded() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected_floor | FixedPoint, expected_ceil | FixedPoint) => { assert_eq!(FixedPoint::half_sum(a, b, Floor), expected_floor); assert_eq!(FixedPoint::half_sum(b, a, Floor), expected_floor); assert_eq!(FixedPoint::half_sum(a, b, Ceil), expected_ceil); assert_eq!(FixedPoint::half_sum(b, a, Ceil), expected_ceil); }, all { (FixedPoint::MIN, FixedPoint::MAX, FixedPoint::EPSILON.cneg()?, fp!(0)); }, fp64 { (fp!(9000000000.000000394), fp!(9000000001.000000397), fp!(9000000000.500000395), fp!(9000000000.500000396)); ( fp!(9000000000.000000001), fp!(-9000000000.000000006), fp!(-0.000000003), fp!(-0.000000002), ); (fp!(7.123456789), fp!(7.123456788), fp!(7.123456788), fp!(7.123456789)); }, fp128 { (fp!(7.123456789123456789), fp!(7.123456789123456788), fp!(7.123456789123456788), fp!(7.123456789123456789)); }, }; Ok(()) } #[test] fn integral() -> Result<()> { test_fixed_point! { case (a | FixedPoint, expected_floor | Layout, expected_ceil | Layout) => { assert_eq!(a.integral(Floor), expected_floor); assert_eq!(a.integral(Ceil), expected_ceil); }, all { (FixedPoint::ZERO, 0, 0); (fp!(0.0001), 0, 1); (fp!(-0.0001), -1, 0); (fp!(2.0001), 2, 3); (fp!(-2.0001), -3, -2); }, }; Ok(()) } #[test] fn round_towards_zero_by() -> Result<()> { test_fixed_point! { case (x | FixedPoint, rounder | FixedPoint, expected | FixedPoint) => { assert_eq!(x.round_towards_zero_by(rounder), expected); assert_eq!(x.cneg()?.round_towards_zero_by(rounder), expected.cneg()?); }, all { (fp!(1234.56789), fp!(100), fp!(1200)); (fp!(1234.56789), fp!(10), fp!(1230)); (fp!(1234.56789), fp!(1), fp!(1234)); (fp!(1234.56789), fp!(0.1), fp!(1234.5)); (fp!(1234.56789), fp!(0.01), fp!(1234.56)); (fp!(1234.56789), fp!(0.001), fp!(1234.567)); (fp!(1234.56789), fp!(0.0001), fp!(1234.5678)); (fp!(1234.56789), fp!(0.00001), fp!(1234.56789)); }, fp128 { (fp!(1234.56789123456789), fp!(0.0000000000001), fp!(1234.5678912345678)); (fp!(1234.56789123456789), fp!(0.00000000000001), fp!(1234.56789123456789)); }, }; Ok(()) } #[test] #[allow(clippy::cognitive_complexity)] fn next_power_of_ten() -> Result<()> { test_fixed_point! { case (x | FixedPoint, expected | FixedPoint) => { assert_eq!(x.next_power_of_ten()?, expected); assert_eq!(x.cneg()?.next_power_of_ten()?, expected.cneg()?); }, all { (fp!(0.000000001), fp!(0.000000001)); (fp!(0.000000002), fp!(0.00000001)); (fp!(0.000000009), fp!(0.00000001)); (fp!(0.0000001), fp!(0.0000001)); (fp!(0.0000002), fp!(0.000001)); (fp!(0.1), fp!(0.1)); (fp!(0.100000001), fp!(1)); (fp!(1), fp!(1)); (fp!(2), fp!(10)); (fp!(1234567), fp!(10000000)); (fp!(923372036.654775807), fp!(1000000000)); (fp!(-0.000000001), fp!(-0.000000001)); (fp!(-0.000000002), fp!(-0.00000001)); (fp!(-0.000000009), fp!(-0.00000001)); (fp!(-0.00000001), fp!(-0.00000001)); (fp!(-0.00000002), fp!(-0.0000001)); (fp!(-0.100000001), fp!(-1)); (fp!(-923372021.854775808), fp!(-1000000000)); }, fp128 { (fp!(0.000000000000000001), fp!(0.000000000000000001)); (fp!(0.000000000000000002), fp!(0.00000000000000001)); (fp!(0.000000000000000009), fp!(0.00000000000000001)); (fp!(0.00000000000000001), fp!(0.00000000000000001)); (fp!(0.00000000000000002), fp!(0.0000000000000001)); (fp!(0.100000000000000001), fp!(1)); (fp!(1234567891234567), fp!(10000000000000000)); (fp!(923372036987654321.854775807), fp!(1000000000000000000)); (fp!(-0.000000000000000001), fp!(-0.000000000000000001)); (fp!(-0.000000000000000002), fp!(-0.00000000000000001)); (fp!(-0.000000000000000009), fp!(-0.00000000000000001)); (fp!(-0.00000000000000001), fp!(-0.00000000000000001)); (fp!(-0.00000000000000002), fp!(-0.0000000000000001)); (fp!(-0.100000000000000001), fp!(-1)); (fp!(-923372036987654321.854775808), fp!(-1000000000000000000)); }, }; test_fixed_point! { case (x | FixedPoint, expected | FixedPoint) => { assert_eq!(x.next_power_of_ten()?, expected); }, fp64 { (fp!(0), fp!(0.000000001)); }, fp128 { (fp!(0), fp!(0.000000000000000001)); }, }; test_fixed_point! { case (x | FixedPoint) => { assert_eq!(x.next_power_of_ten(), Err(ArithmeticError::Overflow)); }, all { (FixedPoint::MAX); (FixedPoint::MIN); }, fp64 { (fp!(9223372036.654775807)); (fp!(-9223372036.654775807)); }, fp128 { (fp!(150000000000000000000.0)); (fp!(-150000000000000000000.854775807)); }, }; Ok(()) } #[test] fn rounding_to_i64() -> Result<()> { test_fixed_point! { case (x | FixedPoint, expected | i64) => { assert_eq!(x.rounding_to_i64(), expected); }, all { (fp!(0), 0); (fp!(42), 42); (fp!(1.4), 1); (fp!(1.6), 2); (fp!(-1.4), -1); (fp!(-1.6), -2); (fp!(0.4999), 0); (fp!(0.5), 1); (fp!(0.5001), 1); }, }; Ok(()) } #[test] #[cfg(feature = "std")] #[allow(clippy::float_cmp)] fn to_f64() -> Result<()> { test_fixed_point! { case (x | FixedPoint, expected | f64) => { assert_eq!(f64::from(x), expected); }, all { (fp!(0), 0.0); (fp!(0.1), 0.1); (fp!(1), 1.0); (fp!(1.5), 1.5); (fp!(-5), -5.); (fp!(-14.14), -14.14); (fp!(42.123456789), 42.123456789); (fp!(-42.123456789), -42.123456789); (fp!(8003332421.536753168), 8003332421.536753); (fp!(-8003332421.536753168), -8003332421.536753); (fp!(9223372036.854775807), 9223372036.854776); (fp!(-9223372036.854775807), -9223372036.854776); (fp!(922337203.685477581), 922337203.6854776); (fp!(-922337203.685477581), -922337203.6854776); (fp!(92233720.36854775), 92233720.36854775); (fp!(-92233720.36854775), -92233720.36854775); }, fp128 { (fp!(0.0000000000025), 25e-13); (fp!(1000000.0000000000025), 1e6); (fp!(-1000000.0000000000025), -1e6); (fp!(0.000000000000000025), 25e-18); (fp!(-0.000000000000000025), -25e-18); (fp!(2.1234567890123457), 2.1234567890123457); (fp!(-2.1234567890123457), -2.1234567890123457); }, }; Ok(()) } #[test] #[cfg(feature = "std")] #[allow(clippy::float_cmp)] fn from_f64() -> Result<()> { test_fixed_point! { case (x | FixedPoint, expected | f64) => { assert_eq!(Ok(x), expected.try_into()); assert_eq!(Ok(x.cneg().unwrap()), (-expected).try_into()); }, all { (fp!(0), 0.0); (fp!(0.5), 0.5); (fp!(1), 1.0); (fp!(1.5), 1.5); (fp!(42.123456789), 42.123456789); (fp!(803332.421536753), 803332.421536753); (fp!(8033324.21536753), 8033324.21536753); (fp!(80333242.1536753), 80333242.1536753); (fp!(803332421.536753), 803332421.536753); (fp!(8033324215.36753), 8033324215.36753); (fp!(9223372036.85477), 9223372036.85477); }, fp128 { (fp!(0.803332421536753), 0.803332421536753); (fp!(8.03332421536753), 8.03332421536753); (fp!(8.03332421536753), 8.03332421536753); (fp!(80.3332421536753), 80.3332421536753); (fp!(803.332421536753), 803.332421536753); (fp!(8033.32421536753), 8033.32421536753); (fp!(80333.2421536753), 80333.2421536753); // <see part of cases in `all` sections> (fp!(80333242153.6753), 80333242153.6753); (fp!(803332421536.753), 803332421536.753); (fp!(8033324215367.53), 8033324215367.53); (fp!(80333242153675.3), 80333242153675.3); (fp!(803332421536753), 803332421536753.); (fp!(8033324215367530), 8033324215367530.); (fp!(8033324215367533), 8033324215367533.); }, }; Ok(()) } #[test] fn saturating_add() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(a.saturating_add(b), expected); assert_eq!(b.saturating_add(a), expected); assert_eq!(a.cneg()?.saturating_add(b.cneg()?), expected.cneg()?); }, all { (fp!(0), fp!(0), fp!(0)); (fp!(0), fp!(3000.0000006), fp!(3000.0000006)); (fp!(-1000.0000002), fp!(0), fp!(-1000.0000002)); (fp!(-1000.0000002), fp!(3000.0000006), fp!(2000.0000004)); (fp!(-1000.0000002), fp!(-3000.0000006), fp!(-4000.0000008)); (fp!(4611686018.427387903), fp!(4611686018.427387903), fp!(9223372036.854775806)); }, fp128 { (fp!(0), fp!(3000000000000.0000000000000006), fp!(3000000000000.0000000000000006)); (fp!(-1000000000000.0000000000000002), fp!(0), fp!(-1000000000000.0000000000000002)); (fp!(-1000000000000.0000000000000002), fp!(3000000000000.0000000000000006), fp!(2000000000000.0000000000000004)); (fp!(-1000000000000.0000000000000002), fp!(-3000000000000.0000000000000006), fp!(-4000000000000.0000000000000008)); (fp!(4611686018000000000.000000000427387903), fp!(4611686018000000000.000000000427387903), fp!(9223372036000000000.000000000854775806)); }, }; test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(a.saturating_add(b), expected); }, fp64 { (fp!(9222222222), fp!(9222222222), FixedPoint::MAX); (fp!(4611686019), fp!(4611686018.427387903), FixedPoint::MAX); (fp!(-9222222222), fp!(-9222222222), FixedPoint::MIN); (fp!(-4611686019), fp!(-4611686018.427387903), FixedPoint::MIN); }, fp128 { (fp!(85550005550005550005), fp!(85550005550005550005), FixedPoint::MAX); (fp!(85550005550005550005), fp!(85550005550005550005.000000000427387), FixedPoint::MAX); (fp!(-85550005550005550005), fp!(-85550005550005550005), FixedPoint::MIN); (fp!(-85550005550005550005), fp!(-85550005550005550005.000000000427387), FixedPoint::MIN); }, }; Ok(()) } #[test] fn saturating_mul() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | Layout, expected | FixedPoint) => { assert_eq!(a.saturating_mul(b), expected); assert_eq!(CheckedMul::saturating_mul(b, a), expected); assert_eq!(a.cneg()?.saturating_mul(b), expected.cneg()?); assert_eq!(a.saturating_mul(-b), expected.cneg()?); assert_eq!(a.cneg()?.saturating_mul(-b), expected); }, all { (fp!(0), 0, fp!(0)); (fp!(3000.0000006), 0, fp!(0)); (fp!(3000.0000006), 1, fp!(3000.0000006)); (fp!(-1000.0000002), 0, fp!(0)); (fp!(-1000.0000002), 3, fp!(-3000.0000006)); (fp!(-1000.0000002), -4, fp!(4000.0000008)); (fp!(68601.48179), -468, fp!(-32105493.47772)); }, fp128 { (fp!(3000000000000.0000000000000006), 0, FixedPoint::ZERO); (fp!(3000000000000.0000000000000006), 1, fp!(3000000000000.0000000000000006)); (fp!(-1000000000000.0000000000000002), 0, FixedPoint::ZERO); (fp!(-1000000000000.0000000000000002), 3, fp!(-3000000000000.0000000000000006)); (fp!(-1000000000000.0000000000000002), -4, fp!(4000000000000.0000000000000008)); (fp!(68603957391461.48475635294179), -85204, fp!(-5845331585582084347.18029605227516)); }, }; test_fixed_point! { case (a | FixedPoint, b | i128, expected | FixedPoint) => { let b = b as Layout; assert_eq!(a.saturating_mul(b), expected); }, fp64 { (fp!(9222222222), 9222222222, FixedPoint::MAX); (fp!(4611686019.427387903), 4611686019, FixedPoint::MAX); (fp!(-9222222222), 9222222222, FixedPoint::MIN); (fp!(4611686019.427387903), -4611686019, FixedPoint::MIN); }, fp128 { (fp!(85550005550005550005), 85550005550005550005, FixedPoint::MAX); (fp!(14000444000.427387), 14000444000, FixedPoint::MAX); (fp!(-85550005550005550005), 85550005550005550005, FixedPoint::MIN); (fp!(14000444000.427387), -14000444000, FixedPoint::MIN); }, }; Ok(()) } #[test] fn saturating_rmul() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(a.saturating_rmul(b, Floor), expected); assert_eq!(b.saturating_rmul(a, Floor), expected); assert_eq!(a.cneg()?.saturating_rmul(b, Floor), expected.cneg()?); assert_eq!(a.saturating_rmul(b.cneg()?, Floor), expected.cneg()?); assert_eq!(a.cneg()?.saturating_rmul(b.cneg()?, Floor), expected); }, all { (fp!(0), fp!(0), fp!(0)); (fp!(0), fp!(3000.0000006), fp!(0)); (fp!(1), fp!(3000.0000006), fp!(3000.0000006)); (fp!(-1000.0000002), fp!(0), fp!(0)); (fp!(-1000.0000002), fp!(3), fp!(-3000.0000006)); (fp!(-1000.0000002), fp!(-4), fp!(4000.0000008)); (fp!(68601.48179), fp!(-468.28), fp!(-32124701.8926212)); }, fp128 { (fp!(0), fp!(3000000000000.0000000000000006), fp!(0)); (fp!(1), fp!(3000000000000.0000000000000006), fp!(3000000000000.0000000000000006)); (fp!(-1000000000000.0000000000000002), fp!(0), fp!(0)); (fp!(-1000000000000.0000000000000002), fp!(3), fp!(-3000000000000.0000000000000006)); (fp!(-1000000000000.0000000000000002), fp!(-4), fp!(4000000000000.0000000000000008)); }, }; test_fixed_point! { case (a | FixedPoint, b | FixedPoint, mode | RoundMode, expected | FixedPoint) => { assert_eq!(a.saturating_rmul(b, mode), expected); }, fp64 { (fp!(0.000000001), fp!(-0.1), Floor, fp!(-0.000000001)); (fp!(0.000000001), fp!(0.1), Ceil, fp!(0.000000001)); (fp!(0.000000001), fp!(0.1), Floor, fp!(0)); (fp!(-0.000000001), fp!(0.1), Ceil, fp!(0)); (fp!(9222222222), fp!(9222222222), Floor, FixedPoint::MAX); (fp!(4611686019), fp!(4611686018.427387903), Floor, FixedPoint::MAX); (fp!(-9222222222), fp!(9222222222), Floor, FixedPoint::MIN); (fp!(4611686019), fp!(-4611686018.427387903), Floor, FixedPoint::MIN); }, fp128 { (fp!(0.000000000000000001), fp!(0.1), Floor, fp!(0)); (fp!(0.000000000000000001), fp!(-0.1), Floor, fp!(-0.000000000000000001)); (fp!(0.000000000000000001), fp!(0.1), Ceil, fp!(0.000000000000000001)); (fp!(-0.000000000000000001), fp!(0.1), Ceil, fp!(0)); (fp!(85550005550005550005), fp!(85550005550005550005), Floor, FixedPoint::MAX); (fp!(4611686019), fp!(4611686018000000000.000000000427387903), Floor, FixedPoint::MAX); (fp!(-85550005550005550005), fp!(85550005550005550005), Floor, FixedPoint::MIN); (fp!(4611686019), fp!(-4611686018000000000.000000000427387903), Floor, FixedPoint::MIN); }, }; Ok(()) } #[test] fn saturating_sub() -> Result<()> { test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(a.saturating_sub(b), expected); assert_eq!(b.saturating_sub(a), expected.cneg()?); assert_eq!(a.cneg()?.saturating_sub(b.cneg()?), expected.cneg()?); }, all { (fp!(0), fp!(0), fp!(0)); (fp!(0), fp!(3000.0000006), fp!(-3000.0000006)); (fp!(-1000.0000002), fp!(0), fp!(-1000.0000002)); (fp!(-1000.0000002), fp!(3000.0000006), fp!(-4000.0000008)); (fp!(-1000.0000002), fp!(-3000.0000006), fp!(2000.0000004)); (fp!(4611686018.427387903), fp!(-4611686018.427387903), fp!(9223372036.854775806)); }, fp128 { (fp!(0), fp!(3000000000000.0000000000000006), fp!(-3000000000000.0000000000000006)); (fp!(-1000000000000.0000000000000002), fp!(0), fp!(-1000000000000.0000000000000002)); (fp!(-1000000000000.0000000000000002), fp!(3000000000000.0000000000000006), fp!(-4000000000000.0000000000000008)); (fp!(-1000000000000.0000000000000002), fp!(-3000000000000.0000000000000006), fp!(2000000000000.0000000000000004)); (fp!(4611686018000000000.000000000427387903), fp!(-4611686018000000000.000000000427387903), fp!(9223372036000000000.000000000854775806)); }, }; test_fixed_point! { case (a | FixedPoint, b | FixedPoint, expected | FixedPoint) => { assert_eq!(a.saturating_sub(b), expected); }, fp64 { (fp!(9222222222), fp!(-9222222222), FixedPoint::MAX); (fp!(4611686019), fp!(-4611686018.27387903), FixedPoint::MAX); (fp!(-9222222222), fp!(9222222222), FixedPoint::MIN); (fp!(-4611686019), fp!(4611686018.47387903), FixedPoint::MIN); }, fp128 { (fp!(85550005550005550005), fp!(-85550005550005550005), FixedPoint::MAX); (fp!(85550005550005550005), fp!(-85550005550005550005.000000000427387903), FixedPoint::MAX); (fp!(-85550005550005550005), fp!(85550005550005550005), FixedPoint::MIN); (fp!(-85550005550005550005), fp!(85550005550005550005.000000000427387903), FixedPoint::MIN); }, }; Ok(()) } #[test] fn sqrt_exact() -> Result<()> { test_fixed_point! { case (expected | FixedPoint) => { let square = expected.rmul(expected, Floor)?; assert_eq!(expected.rmul(expected, Ceil)?, square); assert_eq!(square.rsqrt(Floor)?, expected); assert_eq!(square.rsqrt(Ceil)?, expected); }, all { (fp!(0)); (fp!(1)); (fp!(2)); (fp!(3)); (fp!(1000)); (fp!(96038)); (FixedPoint::MAX.rsqrt(Floor)?.integral(Floor).try_into()?); }, fp128 { (fp!(10431725)); (fp!(13043817825)); }, }; Ok(()) } #[test] fn sqrt_approx() -> Result<()> { test_fixed_point! { case (x | FixedPoint, expected_floor | FixedPoint) => { assert_eq!(x.rsqrt(Floor)?, expected_floor); assert_eq!(x.rsqrt(Ceil)?.inner, expected_floor.inner + 1); }, fp64 { (fp!(2), fp!(1.414213562)); (FixedPoint::MAX, fp!(96038.388349944)); }, fp128 { (fp!(2), fp!(1.414213562373095048)); (fp!(3.14159265358979323), fp!(1.772453850905516024)); (fp!(5), fp!(2.236067977499789696)); (FixedPoint::MAX, fp!(13043817825.332782212349571806)); }, }; Ok(()) } #[test] fn sqrt_negative() -> Result<()> { test_fixed_point! { case (x | FixedPoint) => { let expected = Err(ArithmeticError::DomainViolation); assert_eq!(x.rsqrt(Floor), expected); assert_eq!(x.rsqrt(Ceil), expected); }, all { (fp!(-1)); (FixedPoint::EPSILON.cneg()?); (FixedPoint::MIN); }, }; Ok(()) } #[test] fn const_fn() { let test_cases = trybuild::TestCases::new(); test_cases.compile_fail( "src/tests/const_fn/01_fixnum_const_bad_str_with_too_long_fractional_part.rs", ); }
37.035543
149
0.514188
f40f6ebd209b01506f9be5f7a9be451528d366ea
591
// Note: this requires the `cargo` feature use clap::{arg, command}; fn main() { let matches = cmd().get_matches(); // Note, it's safe to call unwrap() because the arg is required let port: usize = matches .value_of_t("PORT") .expect("'PORT' is required and parsing will fail if its missing"); println!("PORT = {}", port); } fn cmd() -> clap::Command<'static> { command!().arg( arg!(<PORT>) .help("Network port to use") .validator(|s| s.parse::<usize>()), ) } #[test] fn verify_app() { cmd().debug_assert(); }
21.888889
75
0.563452
ed9ea1d1b685082e52a1e0496dd80cb5284d93eb
400,633
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, API_VERSION}; #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] DatabaseAccounts_Get(#[from] database_accounts::get::Error), #[error(transparent)] DatabaseAccounts_CreateOrUpdate(#[from] database_accounts::create_or_update::Error), #[error(transparent)] DatabaseAccounts_Update(#[from] database_accounts::update::Error), #[error(transparent)] DatabaseAccounts_Delete(#[from] database_accounts::delete::Error), #[error(transparent)] DatabaseAccounts_FailoverPriorityChange(#[from] database_accounts::failover_priority_change::Error), #[error(transparent)] DatabaseAccounts_List(#[from] database_accounts::list::Error), #[error(transparent)] DatabaseAccounts_ListByResourceGroup(#[from] database_accounts::list_by_resource_group::Error), #[error(transparent)] DatabaseAccounts_ListKeys(#[from] database_accounts::list_keys::Error), #[error(transparent)] DatabaseAccounts_ListConnectionStrings(#[from] database_accounts::list_connection_strings::Error), #[error(transparent)] DatabaseAccounts_OfflineRegion(#[from] database_accounts::offline_region::Error), #[error(transparent)] DatabaseAccounts_OnlineRegion(#[from] database_accounts::online_region::Error), #[error(transparent)] DatabaseAccounts_GetReadOnlyKeys(#[from] database_accounts::get_read_only_keys::Error), #[error(transparent)] DatabaseAccounts_ListReadOnlyKeys(#[from] database_accounts::list_read_only_keys::Error), #[error(transparent)] DatabaseAccounts_RegenerateKey(#[from] database_accounts::regenerate_key::Error), #[error(transparent)] DatabaseAccounts_CheckNameExists(#[from] database_accounts::check_name_exists::Error), #[error(transparent)] Operations_List(#[from] operations::list::Error), #[error(transparent)] DatabaseAccounts_ListMetrics(#[from] database_accounts::list_metrics::Error), #[error(transparent)] Database_ListMetrics(#[from] database::list_metrics::Error), #[error(transparent)] Collection_ListMetrics(#[from] collection::list_metrics::Error), #[error(transparent)] CollectionRegion_ListMetrics(#[from] collection_region::list_metrics::Error), #[error(transparent)] DatabaseAccountRegion_ListMetrics(#[from] database_account_region::list_metrics::Error), #[error(transparent)] PercentileSourceTarget_ListMetrics(#[from] percentile_source_target::list_metrics::Error), #[error(transparent)] PercentileTarget_ListMetrics(#[from] percentile_target::list_metrics::Error), #[error(transparent)] Percentile_ListMetrics(#[from] percentile::list_metrics::Error), #[error(transparent)] CollectionPartitionRegion_ListMetrics(#[from] collection_partition_region::list_metrics::Error), #[error(transparent)] CollectionPartition_ListMetrics(#[from] collection_partition::list_metrics::Error), #[error(transparent)] PartitionKeyRangeId_ListMetrics(#[from] partition_key_range_id::list_metrics::Error), #[error(transparent)] PartitionKeyRangeIdRegion_ListMetrics(#[from] partition_key_range_id_region::list_metrics::Error), #[error(transparent)] DatabaseAccounts_ListUsages(#[from] database_accounts::list_usages::Error), #[error(transparent)] Database_ListUsages(#[from] database::list_usages::Error), #[error(transparent)] Collection_ListUsages(#[from] collection::list_usages::Error), #[error(transparent)] CollectionPartition_ListUsages(#[from] collection_partition::list_usages::Error), #[error(transparent)] Database_ListMetricDefinitions(#[from] database::list_metric_definitions::Error), #[error(transparent)] Collection_ListMetricDefinitions(#[from] collection::list_metric_definitions::Error), #[error(transparent)] DatabaseAccounts_ListMetricDefinitions(#[from] database_accounts::list_metric_definitions::Error), #[error(transparent)] SqlResources_ListSqlDatabases(#[from] sql_resources::list_sql_databases::Error), #[error(transparent)] SqlResources_GetSqlDatabase(#[from] sql_resources::get_sql_database::Error), #[error(transparent)] SqlResources_CreateUpdateSqlDatabase(#[from] sql_resources::create_update_sql_database::Error), #[error(transparent)] SqlResources_DeleteSqlDatabase(#[from] sql_resources::delete_sql_database::Error), #[error(transparent)] SqlResources_GetSqlDatabaseThroughput(#[from] sql_resources::get_sql_database_throughput::Error), #[error(transparent)] SqlResources_UpdateSqlDatabaseThroughput(#[from] sql_resources::update_sql_database_throughput::Error), #[error(transparent)] SqlResources_ListSqlContainers(#[from] sql_resources::list_sql_containers::Error), #[error(transparent)] SqlResources_GetSqlContainer(#[from] sql_resources::get_sql_container::Error), #[error(transparent)] SqlResources_CreateUpdateSqlContainer(#[from] sql_resources::create_update_sql_container::Error), #[error(transparent)] SqlResources_DeleteSqlContainer(#[from] sql_resources::delete_sql_container::Error), #[error(transparent)] SqlResources_GetSqlContainerThroughput(#[from] sql_resources::get_sql_container_throughput::Error), #[error(transparent)] SqlResources_UpdateSqlContainerThroughput(#[from] sql_resources::update_sql_container_throughput::Error), #[error(transparent)] SqlResources_ListSqlStoredProcedures(#[from] sql_resources::list_sql_stored_procedures::Error), #[error(transparent)] SqlResources_GetSqlStoredProcedure(#[from] sql_resources::get_sql_stored_procedure::Error), #[error(transparent)] SqlResources_CreateUpdateSqlStoredProcedure(#[from] sql_resources::create_update_sql_stored_procedure::Error), #[error(transparent)] SqlResources_DeleteSqlStoredProcedure(#[from] sql_resources::delete_sql_stored_procedure::Error), #[error(transparent)] SqlResources_ListSqlUserDefinedFunctions(#[from] sql_resources::list_sql_user_defined_functions::Error), #[error(transparent)] SqlResources_GetSqlUserDefinedFunction(#[from] sql_resources::get_sql_user_defined_function::Error), #[error(transparent)] SqlResources_CreateUpdateSqlUserDefinedFunction(#[from] sql_resources::create_update_sql_user_defined_function::Error), #[error(transparent)] SqlResources_DeleteSqlUserDefinedFunction(#[from] sql_resources::delete_sql_user_defined_function::Error), #[error(transparent)] SqlResources_ListSqlTriggers(#[from] sql_resources::list_sql_triggers::Error), #[error(transparent)] SqlResources_GetSqlTrigger(#[from] sql_resources::get_sql_trigger::Error), #[error(transparent)] SqlResources_CreateUpdateSqlTrigger(#[from] sql_resources::create_update_sql_trigger::Error), #[error(transparent)] SqlResources_DeleteSqlTrigger(#[from] sql_resources::delete_sql_trigger::Error), #[error(transparent)] MongoDbResources_ListMongoDbDatabases(#[from] mongo_db_resources::list_mongo_db_databases::Error), #[error(transparent)] MongoDbResources_GetMongoDbDatabase(#[from] mongo_db_resources::get_mongo_db_database::Error), #[error(transparent)] MongoDbResources_CreateUpdateMongoDbDatabase(#[from] mongo_db_resources::create_update_mongo_db_database::Error), #[error(transparent)] MongoDbResources_DeleteMongoDbDatabase(#[from] mongo_db_resources::delete_mongo_db_database::Error), #[error(transparent)] MongoDbResources_GetMongoDbDatabaseThroughput(#[from] mongo_db_resources::get_mongo_db_database_throughput::Error), #[error(transparent)] MongoDbResources_UpdateMongoDbDatabaseThroughput(#[from] mongo_db_resources::update_mongo_db_database_throughput::Error), #[error(transparent)] MongoDbResources_ListMongoDbCollections(#[from] mongo_db_resources::list_mongo_db_collections::Error), #[error(transparent)] MongoDbResources_GetMongoDbCollection(#[from] mongo_db_resources::get_mongo_db_collection::Error), #[error(transparent)] MongoDbResources_CreateUpdateMongoDbCollection(#[from] mongo_db_resources::create_update_mongo_db_collection::Error), #[error(transparent)] MongoDbResources_DeleteMongoDbCollection(#[from] mongo_db_resources::delete_mongo_db_collection::Error), #[error(transparent)] MongoDbResources_GetMongoDbCollectionThroughput(#[from] mongo_db_resources::get_mongo_db_collection_throughput::Error), #[error(transparent)] MongoDbResources_UpdateMongoDbCollectionThroughput(#[from] mongo_db_resources::update_mongo_db_collection_throughput::Error), #[error(transparent)] TableResources_ListTables(#[from] table_resources::list_tables::Error), #[error(transparent)] TableResources_GetTable(#[from] table_resources::get_table::Error), #[error(transparent)] TableResources_CreateUpdateTable(#[from] table_resources::create_update_table::Error), #[error(transparent)] TableResources_DeleteTable(#[from] table_resources::delete_table::Error), #[error(transparent)] TableResources_GetTableThroughput(#[from] table_resources::get_table_throughput::Error), #[error(transparent)] TableResources_UpdateTableThroughput(#[from] table_resources::update_table_throughput::Error), #[error(transparent)] CassandraResources_ListCassandraKeyspaces(#[from] cassandra_resources::list_cassandra_keyspaces::Error), #[error(transparent)] CassandraResources_GetCassandraKeyspace(#[from] cassandra_resources::get_cassandra_keyspace::Error), #[error(transparent)] CassandraResources_CreateUpdateCassandraKeyspace(#[from] cassandra_resources::create_update_cassandra_keyspace::Error), #[error(transparent)] CassandraResources_DeleteCassandraKeyspace(#[from] cassandra_resources::delete_cassandra_keyspace::Error), #[error(transparent)] CassandraResources_GetCassandraKeyspaceThroughput(#[from] cassandra_resources::get_cassandra_keyspace_throughput::Error), #[error(transparent)] CassandraResources_UpdateCassandraKeyspaceThroughput(#[from] cassandra_resources::update_cassandra_keyspace_throughput::Error), #[error(transparent)] CassandraResources_ListCassandraTables(#[from] cassandra_resources::list_cassandra_tables::Error), #[error(transparent)] CassandraResources_GetCassandraTable(#[from] cassandra_resources::get_cassandra_table::Error), #[error(transparent)] CassandraResources_CreateUpdateCassandraTable(#[from] cassandra_resources::create_update_cassandra_table::Error), #[error(transparent)] CassandraResources_DeleteCassandraTable(#[from] cassandra_resources::delete_cassandra_table::Error), #[error(transparent)] CassandraResources_GetCassandraTableThroughput(#[from] cassandra_resources::get_cassandra_table_throughput::Error), #[error(transparent)] CassandraResources_UpdateCassandraTableThroughput(#[from] cassandra_resources::update_cassandra_table_throughput::Error), #[error(transparent)] GremlinResources_ListGremlinDatabases(#[from] gremlin_resources::list_gremlin_databases::Error), #[error(transparent)] GremlinResources_GetGremlinDatabase(#[from] gremlin_resources::get_gremlin_database::Error), #[error(transparent)] GremlinResources_CreateUpdateGremlinDatabase(#[from] gremlin_resources::create_update_gremlin_database::Error), #[error(transparent)] GremlinResources_DeleteGremlinDatabase(#[from] gremlin_resources::delete_gremlin_database::Error), #[error(transparent)] GremlinResources_GetGremlinDatabaseThroughput(#[from] gremlin_resources::get_gremlin_database_throughput::Error), #[error(transparent)] GremlinResources_UpdateGremlinDatabaseThroughput(#[from] gremlin_resources::update_gremlin_database_throughput::Error), #[error(transparent)] GremlinResources_ListGremlinGraphs(#[from] gremlin_resources::list_gremlin_graphs::Error), #[error(transparent)] GremlinResources_GetGremlinGraph(#[from] gremlin_resources::get_gremlin_graph::Error), #[error(transparent)] GremlinResources_CreateUpdateGremlinGraph(#[from] gremlin_resources::create_update_gremlin_graph::Error), #[error(transparent)] GremlinResources_DeleteGremlinGraph(#[from] gremlin_resources::delete_gremlin_graph::Error), #[error(transparent)] GremlinResources_GetGremlinGraphThroughput(#[from] gremlin_resources::get_gremlin_graph_throughput::Error), #[error(transparent)] GremlinResources_UpdateGremlinGraphThroughput(#[from] gremlin_resources::update_gremlin_graph_throughput::Error), #[error(transparent)] NotebookWorkspaces_ListByDatabaseAccount(#[from] notebook_workspaces::list_by_database_account::Error), #[error(transparent)] NotebookWorkspaces_Get(#[from] notebook_workspaces::get::Error), #[error(transparent)] NotebookWorkspaces_CreateOrUpdate(#[from] notebook_workspaces::create_or_update::Error), #[error(transparent)] NotebookWorkspaces_Delete(#[from] notebook_workspaces::delete::Error), #[error(transparent)] NotebookWorkspaces_ListConnectionInfo(#[from] notebook_workspaces::list_connection_info::Error), #[error(transparent)] NotebookWorkspaces_RegenerateAuthToken(#[from] notebook_workspaces::regenerate_auth_token::Error), #[error(transparent)] NotebookWorkspaces_Start(#[from] notebook_workspaces::start::Error), } pub mod database_accounts { use super::{models, API_VERSION}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::DatabaseAccountGetResults, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountGetResults = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, create_update_parameters: &models::DatabaseAccountCreateUpdateParameters, ) -> std::result::Result<models::DatabaseAccountGetResults, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(create_or_update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, update_parameters: &models::DatabaseAccountUpdateParameters, ) -> std::result::Result<models::DatabaseAccountGetResults, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_parameters).map_err(update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountGetResults = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn failover_priority_change( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, failover_parameters: &models::FailoverPolicies, ) -> std::result::Result<failover_priority_change::Response, failover_priority_change::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/failoverPriorityChange", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(failover_priority_change::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(failover_priority_change::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(failover_parameters).map_err(failover_priority_change::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(failover_priority_change::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(failover_priority_change::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(failover_priority_change::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(failover_priority_change::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(failover_priority_change::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod failover_priority_change { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<models::DatabaseAccountsListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DocumentDB/databaseAccounts", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountsListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<models::DatabaseAccountsListResult, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_resource_group::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::DatabaseAccountListKeysResult, list_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/listKeys", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_keys::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_keys::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_keys::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_keys::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountListKeysResult = serde_json::from_slice(rsp_body).map_err(|source| list_keys::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_keys::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_keys { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_connection_strings( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::DatabaseAccountListConnectionStringsResult, list_connection_strings::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/listConnectionStrings", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_connection_strings::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_connection_strings::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_connection_strings::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_connection_strings::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountListConnectionStringsResult = serde_json::from_slice(rsp_body) .map_err(|source| list_connection_strings::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_connection_strings::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_connection_strings { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn offline_region( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, region_parameter_for_offline: &models::RegionForOnlineOffline, ) -> std::result::Result<offline_region::Response, offline_region::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/offlineRegion", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(offline_region::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(offline_region::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(region_parameter_for_offline).map_err(offline_region::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(offline_region::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(offline_region::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(offline_region::Response::Ok200), http::StatusCode::ACCEPTED => Ok(offline_region::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| offline_region::Error::DeserializeError(source, rsp_body.clone()))?; Err(offline_region::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod offline_region { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn online_region( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, region_parameter_for_online: &models::RegionForOnlineOffline, ) -> std::result::Result<online_region::Response, online_region::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/onlineRegion", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(online_region::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(online_region::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(region_parameter_for_online).map_err(online_region::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(online_region::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(online_region::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(online_region::Response::Ok200), http::StatusCode::ACCEPTED => Ok(online_region::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| online_region::Error::DeserializeError(source, rsp_body.clone()))?; Err(online_region::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod online_region { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_read_only_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::DatabaseAccountListReadOnlyKeysResult, get_read_only_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/readonlykeys", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(get_read_only_keys::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_read_only_keys::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_read_only_keys::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_read_only_keys::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountListReadOnlyKeysResult = serde_json::from_slice(rsp_body) .map_err(|source| get_read_only_keys::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_read_only_keys::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_read_only_keys { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_read_only_keys( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::DatabaseAccountListReadOnlyKeysResult, list_read_only_keys::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/readonlykeys", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_read_only_keys::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_read_only_keys::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_read_only_keys::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_read_only_keys::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::DatabaseAccountListReadOnlyKeysResult = serde_json::from_slice(rsp_body) .map_err(|source| list_read_only_keys::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_read_only_keys::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_read_only_keys { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn regenerate_key( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, key_to_regenerate: &models::DatabaseAccountRegenerateKeyParameters, ) -> std::result::Result<regenerate_key::Response, regenerate_key::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/regenerateKey", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(regenerate_key::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(regenerate_key::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(key_to_regenerate).map_err(regenerate_key::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(regenerate_key::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(regenerate_key::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(regenerate_key::Response::Ok200), http::StatusCode::ACCEPTED => Ok(regenerate_key::Response::Accepted202), status_code => { let rsp_body = rsp.body(); Err(regenerate_key::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod regenerate_key { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn check_name_exists( operation_config: &crate::OperationConfig, account_name: &str, ) -> std::result::Result<(), check_name_exists::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/providers/Microsoft.DocumentDB/databaseAccountNames/{}", operation_config.base_path(), account_name ); let mut url = url::Url::parse(url_str).map_err(check_name_exists::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::HEAD); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(check_name_exists::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(check_name_exists::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(check_name_exists::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), http::StatusCode::NOT_FOUND => Err(check_name_exists::Error::NotFound404 {}), status_code => { let rsp_body = rsp.body(); Err(check_name_exists::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod check_name_exists { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] NotFound404 {}, #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: &str, ) -> std::result::Result<models::MetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/metrics", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_usages( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, ) -> std::result::Result<models::UsagesResult, list_usages::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/usages", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_usages::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_usages::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_usages::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_usages::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::UsagesResult = serde_json::from_slice(rsp_body).map_err(|source| list_usages::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_usages::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_usages { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_metric_definitions( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::MetricDefinitionsListResult, list_metric_definitions::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/metricDefinitions", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_metric_definitions::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metric_definitions::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_metric_definitions::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metric_definitions::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricDefinitionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metric_definitions::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metric_definitions { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod operations { use super::{models, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<models::OperationListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.DocumentDB/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::OperationListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod database { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, filter: &str, ) -> std::result::Result<models::MetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/metrics", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_rid ); let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_usages( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, filter: Option<&str>, ) -> std::result::Result<models::UsagesResult, list_usages::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/usages", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_rid ); let mut url = url::Url::parse(url_str).map_err(list_usages::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_usages::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_usages::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_usages::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::UsagesResult = serde_json::from_slice(rsp_body).map_err(|source| list_usages::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_usages::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_usages { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_metric_definitions( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, ) -> std::result::Result<models::MetricDefinitionsListResult, list_metric_definitions::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/metricDefinitions", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_rid ); let mut url = url::Url::parse(url_str).map_err(list_metric_definitions::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metric_definitions::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_metric_definitions::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metric_definitions::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricDefinitionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metric_definitions::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metric_definitions { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod collection { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, collection_rid: &str, filter: &str, ) -> std::result::Result<models::MetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/collections/{}/metrics", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_rid, collection_rid ); let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_usages( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, collection_rid: &str, filter: Option<&str>, ) -> std::result::Result<models::UsagesResult, list_usages::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/collections/{}/usages", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_rid, collection_rid ); let mut url = url::Url::parse(url_str).map_err(list_usages::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_usages::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_usages::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_usages::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::UsagesResult = serde_json::from_slice(rsp_body).map_err(|source| list_usages::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_usages::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_usages { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_metric_definitions( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, collection_rid: &str, ) -> std::result::Result<models::MetricDefinitionsListResult, list_metric_definitions::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/collections/{}/metricDefinitions" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_rid , collection_rid) ; let mut url = url::Url::parse(url_str).map_err(list_metric_definitions::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metric_definitions::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_metric_definitions::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metric_definitions::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricDefinitionsListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_metric_definitions::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metric_definitions::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metric_definitions { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod collection_region { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, region: &str, database_rid: &str, collection_rid: &str, filter: &str, ) -> std::result::Result<models::MetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/region/{}/databases/{}/collections/{}/metrics" , operation_config . base_path () , subscription_id , resource_group_name , account_name , region , database_rid , collection_rid) ; let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod database_account_region { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, region: &str, filter: &str, ) -> std::result::Result<models::MetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/region/{}/metrics", operation_config.base_path(), subscription_id, resource_group_name, account_name, region ); let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod percentile_source_target { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, source_region: &str, target_region: &str, filter: &str, ) -> std::result::Result<models::PercentileMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sourceRegion/{}/targetRegion/{}/percentile/metrics" , operation_config . base_path () , subscription_id , resource_group_name , account_name , source_region , target_region) ; let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PercentileMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod percentile_target { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, target_region: &str, filter: &str, ) -> std::result::Result<models::PercentileMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/targetRegion/{}/percentile/metrics", operation_config.base_path(), subscription_id, resource_group_name, account_name, target_region ); let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PercentileMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod percentile { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: &str, ) -> std::result::Result<models::PercentileMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/percentile/metrics", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PercentileMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod collection_partition_region { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, region: &str, database_rid: &str, collection_rid: &str, filter: &str, ) -> std::result::Result<models::PartitionMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/region/{}/databases/{}/collections/{}/partitions/metrics" , operation_config . base_path () , subscription_id , resource_group_name , account_name , region , database_rid , collection_rid) ; let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PartitionMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod collection_partition { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, collection_rid: &str, filter: &str, ) -> std::result::Result<models::PartitionMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/collections/{}/partitions/metrics" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_rid , collection_rid) ; let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PartitionMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_usages( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, collection_rid: &str, filter: Option<&str>, ) -> std::result::Result<models::PartitionUsagesResult, list_usages::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/collections/{}/partitions/usages" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_rid , collection_rid) ; let mut url = url::Url::parse(url_str).map_err(list_usages::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_usages::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_usages::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_usages::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PartitionUsagesResult = serde_json::from_slice(rsp_body).map_err(|source| list_usages::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_usages::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_usages { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod partition_key_range_id { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_rid: &str, collection_rid: &str, partition_key_range_id: &str, filter: &str, ) -> std::result::Result<models::PartitionMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/databases/{}/collections/{}/partitionKeyRangeId/{}/metrics" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_rid , collection_rid , partition_key_range_id) ; let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PartitionMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod partition_key_range_id_region { use super::{models, API_VERSION}; pub async fn list_metrics( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, region: &str, database_rid: &str, collection_rid: &str, partition_key_range_id: &str, filter: &str, ) -> std::result::Result<models::PartitionMetricListResult, list_metrics::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/region/{}/databases/{}/collections/{}/partitionKeyRangeId/{}/metrics" , operation_config . base_path () , subscription_id , resource_group_name , account_name , region , database_rid , collection_rid , partition_key_range_id) ; let mut url = url::Url::parse(url_str).map_err(list_metrics::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_metrics::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); url.query_pairs_mut().append_pair("$filter", filter); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_metrics::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_metrics::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::PartitionMetricListResult = serde_json::from_slice(rsp_body).map_err(|source| list_metrics::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_metrics::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_metrics { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod sql_resources { use super::{models, API_VERSION}; pub async fn list_sql_databases( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::SqlDatabaseListResult, list_sql_databases::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_sql_databases::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_sql_databases::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_sql_databases::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_sql_databases::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlDatabaseListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_sql_databases::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_sql_databases::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_sql_databases { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::SqlDatabaseGetResults, get_sql_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(get_sql_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_sql_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlDatabaseGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_database::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_database { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_sql_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, create_update_sql_database_parameters: &models::SqlDatabaseCreateUpdateParameters, ) -> std::result::Result<create_update_sql_database::Response, create_update_sql_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(create_update_sql_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_sql_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_sql_database_parameters).map_err(create_update_sql_database::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_sql_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_sql_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_sql_database::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlDatabaseGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_sql_database::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_sql_database::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_sql_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_sql_database { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::SqlDatabaseGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_sql_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<delete_sql_database::Response, delete_sql_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(delete_sql_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_sql_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete_sql_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_sql_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_sql_database::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_sql_database::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_sql_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_sql_database { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_database_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_sql_database_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name) ; let mut url = url::Url::parse(url_str).map_err(get_sql_database_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_database_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_sql_database_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_database_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_database_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_database_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_database_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_sql_database_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_sql_database_throughput::Response, update_sql_database_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name) ; let mut url = url::Url::parse(url_str).map_err(update_sql_database_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_sql_database_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_sql_database_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_sql_database_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_sql_database_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_sql_database_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_sql_database_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_sql_database_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_sql_database_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_sql_database_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_sql_containers( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::SqlContainerListResult, list_sql_containers::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(list_sql_containers::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_sql_containers::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_sql_containers::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_sql_containers::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlContainerListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_sql_containers::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_sql_containers::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_sql_containers { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_container( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, ) -> std::result::Result<models::SqlContainerGetResults, get_sql_container::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, container_name ); let mut url = url::Url::parse(url_str).map_err(get_sql_container::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_container::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_sql_container::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_container::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlContainerGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_container::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_container::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_container { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_sql_container( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, create_update_sql_container_parameters: &models::SqlContainerCreateUpdateParameters, ) -> std::result::Result<create_update_sql_container::Response, create_update_sql_container::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, container_name ); let mut url = url::Url::parse(url_str).map_err(create_update_sql_container::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_sql_container::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_sql_container_parameters).map_err(create_update_sql_container::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_sql_container::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_sql_container::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_sql_container::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlContainerGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_sql_container::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_sql_container::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_sql_container::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_sql_container { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::SqlContainerGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_sql_container( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, ) -> std::result::Result<delete_sql_container::Response, delete_sql_container::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, container_name ); let mut url = url::Url::parse(url_str).map_err(delete_sql_container::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_sql_container::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete_sql_container::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_sql_container::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_sql_container::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_sql_container::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_sql_container::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_sql_container { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_container_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_sql_container_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name) ; let mut url = url::Url::parse(url_str).map_err(get_sql_container_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_container_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_sql_container_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_container_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_container_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_container_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_container_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_sql_container_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_sql_container_throughput::Response, update_sql_container_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name) ; let mut url = url::Url::parse(url_str).map_err(update_sql_container_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_sql_container_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_sql_container_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_sql_container_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_sql_container_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_sql_container_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_sql_container_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_sql_container_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_sql_container_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_sql_container_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_sql_stored_procedures( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, ) -> std::result::Result<models::SqlStoredProcedureListResult, list_sql_stored_procedures::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/storedProcedures" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name) ; let mut url = url::Url::parse(url_str).map_err(list_sql_stored_procedures::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_sql_stored_procedures::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_sql_stored_procedures::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_sql_stored_procedures::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlStoredProcedureListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_sql_stored_procedures::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_sql_stored_procedures::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_sql_stored_procedures { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_stored_procedure( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, stored_procedure_name: &str, ) -> std::result::Result<models::SqlStoredProcedureGetResults, get_sql_stored_procedure::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/storedProcedures/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , stored_procedure_name) ; let mut url = url::Url::parse(url_str).map_err(get_sql_stored_procedure::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_stored_procedure::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_sql_stored_procedure::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_stored_procedure::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlStoredProcedureGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_stored_procedure::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_stored_procedure::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_stored_procedure { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_sql_stored_procedure( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, stored_procedure_name: &str, create_update_sql_stored_procedure_parameters: &models::SqlStoredProcedureCreateUpdateParameters, ) -> std::result::Result<create_update_sql_stored_procedure::Response, create_update_sql_stored_procedure::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/storedProcedures/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , stored_procedure_name) ; let mut url = url::Url::parse(url_str).map_err(create_update_sql_stored_procedure::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_sql_stored_procedure::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_sql_stored_procedure_parameters) .map_err(create_update_sql_stored_procedure::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_sql_stored_procedure::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_sql_stored_procedure::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_sql_stored_procedure::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlStoredProcedureGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_sql_stored_procedure::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_sql_stored_procedure::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_sql_stored_procedure::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_sql_stored_procedure { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::SqlStoredProcedureGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_sql_stored_procedure( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, stored_procedure_name: &str, ) -> std::result::Result<delete_sql_stored_procedure::Response, delete_sql_stored_procedure::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/storedProcedures/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , stored_procedure_name) ; let mut url = url::Url::parse(url_str).map_err(delete_sql_stored_procedure::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_sql_stored_procedure::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_sql_stored_procedure::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_sql_stored_procedure::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_sql_stored_procedure::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_sql_stored_procedure::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_sql_stored_procedure::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_sql_stored_procedure { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_sql_user_defined_functions( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, ) -> std::result::Result<models::SqlUserDefinedFunctionListResult, list_sql_user_defined_functions::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/userDefinedFunctions" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name) ; let mut url = url::Url::parse(url_str).map_err(list_sql_user_defined_functions::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_sql_user_defined_functions::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_sql_user_defined_functions::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_sql_user_defined_functions::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlUserDefinedFunctionListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_sql_user_defined_functions::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_sql_user_defined_functions::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_sql_user_defined_functions { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_user_defined_function( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, user_defined_function_name: &str, ) -> std::result::Result<models::SqlUserDefinedFunctionGetResults, get_sql_user_defined_function::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/userDefinedFunctions/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , user_defined_function_name) ; let mut url = url::Url::parse(url_str).map_err(get_sql_user_defined_function::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_user_defined_function::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_sql_user_defined_function::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_user_defined_function::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlUserDefinedFunctionGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_user_defined_function::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_user_defined_function::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_user_defined_function { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_sql_user_defined_function( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, user_defined_function_name: &str, create_update_sql_user_defined_function_parameters: &models::SqlUserDefinedFunctionCreateUpdateParameters, ) -> std::result::Result<create_update_sql_user_defined_function::Response, create_update_sql_user_defined_function::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/userDefinedFunctions/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , user_defined_function_name) ; let mut url = url::Url::parse(url_str).map_err(create_update_sql_user_defined_function::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_sql_user_defined_function::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_sql_user_defined_function_parameters) .map_err(create_update_sql_user_defined_function::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_sql_user_defined_function::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_sql_user_defined_function::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_sql_user_defined_function::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlUserDefinedFunctionGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_sql_user_defined_function::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_sql_user_defined_function::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_sql_user_defined_function::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_sql_user_defined_function { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::SqlUserDefinedFunctionGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_sql_user_defined_function( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, user_defined_function_name: &str, ) -> std::result::Result<delete_sql_user_defined_function::Response, delete_sql_user_defined_function::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/userDefinedFunctions/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , user_defined_function_name) ; let mut url = url::Url::parse(url_str).map_err(delete_sql_user_defined_function::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_sql_user_defined_function::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_sql_user_defined_function::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_sql_user_defined_function::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_sql_user_defined_function::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_sql_user_defined_function::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_sql_user_defined_function::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_sql_user_defined_function { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_sql_triggers( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, ) -> std::result::Result<models::SqlTriggerListResult, list_sql_triggers::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/triggers" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name) ; let mut url = url::Url::parse(url_str).map_err(list_sql_triggers::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_sql_triggers::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_sql_triggers::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_sql_triggers::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlTriggerListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_sql_triggers::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_sql_triggers::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_sql_triggers { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_sql_trigger( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, trigger_name: &str, ) -> std::result::Result<models::SqlTriggerGetResults, get_sql_trigger::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/triggers/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , trigger_name) ; let mut url = url::Url::parse(url_str).map_err(get_sql_trigger::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_sql_trigger::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_sql_trigger::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_sql_trigger::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlTriggerGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_sql_trigger::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_sql_trigger::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_sql_trigger { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_sql_trigger( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, trigger_name: &str, create_update_sql_trigger_parameters: &models::SqlTriggerCreateUpdateParameters, ) -> std::result::Result<create_update_sql_trigger::Response, create_update_sql_trigger::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/triggers/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , trigger_name) ; let mut url = url::Url::parse(url_str).map_err(create_update_sql_trigger::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_sql_trigger::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_sql_trigger_parameters).map_err(create_update_sql_trigger::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_sql_trigger::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_sql_trigger::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_sql_trigger::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::SqlTriggerGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_sql_trigger::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_sql_trigger::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_sql_trigger::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_sql_trigger { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::SqlTriggerGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_sql_trigger( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, container_name: &str, trigger_name: &str, ) -> std::result::Result<delete_sql_trigger::Response, delete_sql_trigger::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/sqlDatabases/{}/containers/{}/triggers/{}" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , container_name , trigger_name) ; let mut url = url::Url::parse(url_str).map_err(delete_sql_trigger::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_sql_trigger::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete_sql_trigger::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_sql_trigger::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_sql_trigger::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_sql_trigger::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_sql_trigger::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_sql_trigger { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod mongo_db_resources { use super::{models, API_VERSION}; pub async fn list_mongo_db_databases( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::MongoDbDatabaseListResult, list_mongo_db_databases::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_mongo_db_databases::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_mongo_db_databases::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_mongo_db_databases::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_mongo_db_databases::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MongoDbDatabaseListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_mongo_db_databases::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_mongo_db_databases::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_mongo_db_databases { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_mongo_db_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::MongoDbDatabaseGetResults, get_mongo_db_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(get_mongo_db_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_mongo_db_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_mongo_db_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_mongo_db_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MongoDbDatabaseGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_mongo_db_database::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_mongo_db_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_mongo_db_database { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_mongo_db_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, create_update_mongo_db_database_parameters: &models::MongoDbDatabaseCreateUpdateParameters, ) -> std::result::Result<create_update_mongo_db_database::Response, create_update_mongo_db_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(create_update_mongo_db_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_mongo_db_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_mongo_db_database_parameters) .map_err(create_update_mongo_db_database::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_mongo_db_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_mongo_db_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_mongo_db_database::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MongoDbDatabaseGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_mongo_db_database::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_mongo_db_database::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_mongo_db_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_mongo_db_database { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::MongoDbDatabaseGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_mongo_db_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<delete_mongo_db_database::Response, delete_mongo_db_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(delete_mongo_db_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_mongo_db_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_mongo_db_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_mongo_db_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_mongo_db_database::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_mongo_db_database::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_mongo_db_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_mongo_db_database { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_mongo_db_database_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_mongo_db_database_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name) ; let mut url = url::Url::parse(url_str).map_err(get_mongo_db_database_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_mongo_db_database_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_mongo_db_database_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_mongo_db_database_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_mongo_db_database_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_mongo_db_database_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_mongo_db_database_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_mongo_db_database_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_mongo_db_database_throughput::Response, update_mongo_db_database_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name) ; let mut url = url::Url::parse(url_str).map_err(update_mongo_db_database_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_mongo_db_database_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_mongo_db_database_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_mongo_db_database_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_mongo_db_database_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_mongo_db_database_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_mongo_db_database_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_mongo_db_database_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_mongo_db_database_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_mongo_db_database_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_mongo_db_collections( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::MongoDbCollectionListResult, list_mongo_db_collections::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/collections", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(list_mongo_db_collections::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_mongo_db_collections::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_mongo_db_collections::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_mongo_db_collections::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MongoDbCollectionListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_mongo_db_collections::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_mongo_db_collections::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_mongo_db_collections { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_mongo_db_collection( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, collection_name: &str, ) -> std::result::Result<models::MongoDbCollectionGetResults, get_mongo_db_collection::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/collections/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, collection_name ); let mut url = url::Url::parse(url_str).map_err(get_mongo_db_collection::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_mongo_db_collection::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_mongo_db_collection::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_mongo_db_collection::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MongoDbCollectionGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_mongo_db_collection::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_mongo_db_collection::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_mongo_db_collection { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_mongo_db_collection( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, collection_name: &str, create_update_mongo_db_collection_parameters: &models::MongoDbCollectionCreateUpdateParameters, ) -> std::result::Result<create_update_mongo_db_collection::Response, create_update_mongo_db_collection::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/collections/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, collection_name ); let mut url = url::Url::parse(url_str).map_err(create_update_mongo_db_collection::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_mongo_db_collection::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_mongo_db_collection_parameters) .map_err(create_update_mongo_db_collection::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_mongo_db_collection::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_mongo_db_collection::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_mongo_db_collection::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::MongoDbCollectionGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_mongo_db_collection::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_mongo_db_collection::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_mongo_db_collection::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_mongo_db_collection { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::MongoDbCollectionGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_mongo_db_collection( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, collection_name: &str, ) -> std::result::Result<delete_mongo_db_collection::Response, delete_mongo_db_collection::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/collections/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, collection_name ); let mut url = url::Url::parse(url_str).map_err(delete_mongo_db_collection::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_mongo_db_collection::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_mongo_db_collection::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_mongo_db_collection::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_mongo_db_collection::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_mongo_db_collection::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_mongo_db_collection::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_mongo_db_collection { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_mongo_db_collection_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, collection_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_mongo_db_collection_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/collections/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , collection_name) ; let mut url = url::Url::parse(url_str).map_err(get_mongo_db_collection_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_mongo_db_collection_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_mongo_db_collection_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_mongo_db_collection_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_mongo_db_collection_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_mongo_db_collection_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_mongo_db_collection_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_mongo_db_collection_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, collection_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_mongo_db_collection_throughput::Response, update_mongo_db_collection_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/mongodbDatabases/{}/collections/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , collection_name) ; let mut url = url::Url::parse(url_str).map_err(update_mongo_db_collection_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_mongo_db_collection_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_mongo_db_collection_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_mongo_db_collection_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_mongo_db_collection_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_mongo_db_collection_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_mongo_db_collection_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_mongo_db_collection_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_mongo_db_collection_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_mongo_db_collection_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod table_resources { use super::{models, API_VERSION}; pub async fn list_tables( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::TableListResult, list_tables::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/tables", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_tables::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_tables::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_tables::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_tables::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TableListResult = serde_json::from_slice(rsp_body).map_err(|source| list_tables::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_tables::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_tables { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_table( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, table_name: &str, ) -> std::result::Result<models::TableGetResults, get_table::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/tables/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, table_name ); let mut url = url::Url::parse(url_str).map_err(get_table::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_table::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_table::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_table::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TableGetResults = serde_json::from_slice(rsp_body).map_err(|source| get_table::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_table::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_table { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_table( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, table_name: &str, create_update_table_parameters: &models::TableCreateUpdateParameters, ) -> std::result::Result<create_update_table::Response, create_update_table::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/tables/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, table_name ); let mut url = url::Url::parse(url_str).map_err(create_update_table::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_table::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_table_parameters).map_err(create_update_table::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_update_table::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_table::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_table::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::TableGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_table::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_table::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_table::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_table { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::TableGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_table( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, table_name: &str, ) -> std::result::Result<delete_table::Response, delete_table::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/tables/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, table_name ); let mut url = url::Url::parse(url_str).map_err(delete_table::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_table::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete_table::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_table::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_table::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_table::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_table::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_table { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_table_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, table_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_table_throughput::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/tables/{}/throughputSettings/default", operation_config.base_path(), subscription_id, resource_group_name, account_name, table_name ); let mut url = url::Url::parse(url_str).map_err(get_table_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_table_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_table_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_table_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_table_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_table_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_table_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_table_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, table_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_table_throughput::Response, update_table_throughput::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/tables/{}/throughputSettings/default", operation_config.base_path(), subscription_id, resource_group_name, account_name, table_name ); let mut url = url::Url::parse(url_str).map_err(update_table_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_table_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_table_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_table_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_table_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_table_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_table_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_table_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_table_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_table_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod cassandra_resources { use super::{models, API_VERSION}; pub async fn list_cassandra_keyspaces( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::CassandraKeyspaceListResult, list_cassandra_keyspaces::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_cassandra_keyspaces::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_cassandra_keyspaces::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_cassandra_keyspaces::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_cassandra_keyspaces::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CassandraKeyspaceListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_cassandra_keyspaces::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_cassandra_keyspaces::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_cassandra_keyspaces { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_cassandra_keyspace( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, ) -> std::result::Result<models::CassandraKeyspaceGetResults, get_cassandra_keyspace::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name ); let mut url = url::Url::parse(url_str).map_err(get_cassandra_keyspace::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_cassandra_keyspace::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_cassandra_keyspace::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_cassandra_keyspace::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CassandraKeyspaceGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_cassandra_keyspace::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_cassandra_keyspace::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_cassandra_keyspace { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_cassandra_keyspace( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, create_update_cassandra_keyspace_parameters: &models::CassandraKeyspaceCreateUpdateParameters, ) -> std::result::Result<create_update_cassandra_keyspace::Response, create_update_cassandra_keyspace::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name ); let mut url = url::Url::parse(url_str).map_err(create_update_cassandra_keyspace::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_cassandra_keyspace::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_cassandra_keyspace_parameters) .map_err(create_update_cassandra_keyspace::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_cassandra_keyspace::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_cassandra_keyspace::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_cassandra_keyspace::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CassandraKeyspaceGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_cassandra_keyspace::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_cassandra_keyspace::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_cassandra_keyspace::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_cassandra_keyspace { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::CassandraKeyspaceGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_cassandra_keyspace( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, ) -> std::result::Result<delete_cassandra_keyspace::Response, delete_cassandra_keyspace::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name ); let mut url = url::Url::parse(url_str).map_err(delete_cassandra_keyspace::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_cassandra_keyspace::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_cassandra_keyspace::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_cassandra_keyspace::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_cassandra_keyspace::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_cassandra_keyspace::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_cassandra_keyspace::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_cassandra_keyspace { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_cassandra_keyspace_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_cassandra_keyspace_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , keyspace_name) ; let mut url = url::Url::parse(url_str).map_err(get_cassandra_keyspace_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_cassandra_keyspace_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_cassandra_keyspace_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_cassandra_keyspace_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_cassandra_keyspace_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_cassandra_keyspace_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_cassandra_keyspace_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_cassandra_keyspace_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_cassandra_keyspace_throughput::Response, update_cassandra_keyspace_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , keyspace_name) ; let mut url = url::Url::parse(url_str).map_err(update_cassandra_keyspace_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_cassandra_keyspace_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_cassandra_keyspace_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_cassandra_keyspace_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_cassandra_keyspace_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_cassandra_keyspace_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_cassandra_keyspace_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_cassandra_keyspace_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_cassandra_keyspace_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_cassandra_keyspace_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_cassandra_tables( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, ) -> std::result::Result<models::CassandraTableListResult, list_cassandra_tables::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/tables", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name ); let mut url = url::Url::parse(url_str).map_err(list_cassandra_tables::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_cassandra_tables::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_cassandra_tables::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_cassandra_tables::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CassandraTableListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_cassandra_tables::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_cassandra_tables::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_cassandra_tables { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_cassandra_table( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, table_name: &str, ) -> std::result::Result<models::CassandraTableGetResults, get_cassandra_table::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/tables/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name, table_name ); let mut url = url::Url::parse(url_str).map_err(get_cassandra_table::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_cassandra_table::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_cassandra_table::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_cassandra_table::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CassandraTableGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_cassandra_table::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_cassandra_table::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_cassandra_table { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_cassandra_table( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, table_name: &str, create_update_cassandra_table_parameters: &models::CassandraTableCreateUpdateParameters, ) -> std::result::Result<create_update_cassandra_table::Response, create_update_cassandra_table::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/tables/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name, table_name ); let mut url = url::Url::parse(url_str).map_err(create_update_cassandra_table::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_cassandra_table::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_cassandra_table_parameters).map_err(create_update_cassandra_table::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_cassandra_table::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_cassandra_table::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_cassandra_table::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::CassandraTableGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_cassandra_table::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_cassandra_table::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_cassandra_table::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_cassandra_table { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::CassandraTableGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_cassandra_table( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, table_name: &str, ) -> std::result::Result<delete_cassandra_table::Response, delete_cassandra_table::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/tables/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, keyspace_name, table_name ); let mut url = url::Url::parse(url_str).map_err(delete_cassandra_table::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_cassandra_table::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_cassandra_table::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_cassandra_table::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_cassandra_table::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_cassandra_table::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_cassandra_table::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_cassandra_table { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_cassandra_table_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, table_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_cassandra_table_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/tables/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , keyspace_name , table_name) ; let mut url = url::Url::parse(url_str).map_err(get_cassandra_table_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_cassandra_table_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_cassandra_table_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_cassandra_table_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_cassandra_table_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_cassandra_table_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_cassandra_table_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_cassandra_table_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, keyspace_name: &str, table_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_cassandra_table_throughput::Response, update_cassandra_table_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/cassandraKeyspaces/{}/tables/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , keyspace_name , table_name) ; let mut url = url::Url::parse(url_str).map_err(update_cassandra_table_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_cassandra_table_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_cassandra_table_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_cassandra_table_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_cassandra_table_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_cassandra_table_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_cassandra_table_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_cassandra_table_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_cassandra_table_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_cassandra_table_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod gremlin_resources { use super::{models, API_VERSION}; pub async fn list_gremlin_databases( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::GremlinDatabaseListResult, list_gremlin_databases::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_gremlin_databases::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_gremlin_databases::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_gremlin_databases::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_gremlin_databases::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GremlinDatabaseListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_gremlin_databases::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_gremlin_databases::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_gremlin_databases { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_gremlin_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::GremlinDatabaseGetResults, get_gremlin_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(get_gremlin_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_gremlin_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_gremlin_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_gremlin_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GremlinDatabaseGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_gremlin_database::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_gremlin_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_gremlin_database { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_gremlin_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, create_update_gremlin_database_parameters: &models::GremlinDatabaseCreateUpdateParameters, ) -> std::result::Result<create_update_gremlin_database::Response, create_update_gremlin_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(create_update_gremlin_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_gremlin_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_gremlin_database_parameters) .map_err(create_update_gremlin_database::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_gremlin_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_gremlin_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_gremlin_database::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GremlinDatabaseGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_gremlin_database::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_gremlin_database::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_gremlin_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_gremlin_database { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::GremlinDatabaseGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_gremlin_database( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<delete_gremlin_database::Response, delete_gremlin_database::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(delete_gremlin_database::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_gremlin_database::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(delete_gremlin_database::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_gremlin_database::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_gremlin_database::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_gremlin_database::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_gremlin_database::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_gremlin_database { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_gremlin_database_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_gremlin_database_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name) ; let mut url = url::Url::parse(url_str).map_err(get_gremlin_database_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_gremlin_database_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_gremlin_database_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_gremlin_database_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_gremlin_database_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_gremlin_database_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_gremlin_database_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_gremlin_database_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_gremlin_database_throughput::Response, update_gremlin_database_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name) ; let mut url = url::Url::parse(url_str).map_err(update_gremlin_database_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_gremlin_database_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_gremlin_database_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_gremlin_database_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_gremlin_database_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_gremlin_database_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_gremlin_database_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_gremlin_database_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_gremlin_database_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_gremlin_database_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_gremlin_graphs( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, ) -> std::result::Result<models::GremlinGraphListResult, list_gremlin_graphs::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/graphs", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name ); let mut url = url::Url::parse(url_str).map_err(list_gremlin_graphs::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_gremlin_graphs::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_gremlin_graphs::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_gremlin_graphs::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GremlinGraphListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_gremlin_graphs::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_gremlin_graphs::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_gremlin_graphs { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_gremlin_graph( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, graph_name: &str, ) -> std::result::Result<models::GremlinGraphGetResults, get_gremlin_graph::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/graphs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, graph_name ); let mut url = url::Url::parse(url_str).map_err(get_gremlin_graph::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_gremlin_graph::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_gremlin_graph::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_gremlin_graph::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GremlinGraphGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_gremlin_graph::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_gremlin_graph::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_gremlin_graph { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_update_gremlin_graph( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, graph_name: &str, create_update_gremlin_graph_parameters: &models::GremlinGraphCreateUpdateParameters, ) -> std::result::Result<create_update_gremlin_graph::Response, create_update_gremlin_graph::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/graphs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, graph_name ); let mut url = url::Url::parse(url_str).map_err(create_update_gremlin_graph::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_update_gremlin_graph::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(create_update_gremlin_graph_parameters).map_err(create_update_gremlin_graph::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(create_update_gremlin_graph::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_update_gremlin_graph::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(create_update_gremlin_graph::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::GremlinGraphGetResults = serde_json::from_slice(rsp_body) .map_err(|source| create_update_gremlin_graph::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create_update_gremlin_graph::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_update_gremlin_graph::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_update_gremlin_graph { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::GremlinGraphGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete_gremlin_graph( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, graph_name: &str, ) -> std::result::Result<delete_gremlin_graph::Response, delete_gremlin_graph::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/graphs/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, database_name, graph_name ); let mut url = url::Url::parse(url_str).map_err(delete_gremlin_graph::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete_gremlin_graph::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete_gremlin_graph::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(delete_gremlin_graph::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete_gremlin_graph::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete_gremlin_graph::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete_gremlin_graph::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete_gremlin_graph { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_gremlin_graph_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, graph_name: &str, ) -> std::result::Result<models::ThroughputSettingsGetResults, get_gremlin_graph_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/graphs/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , graph_name) ; let mut url = url::Url::parse(url_str).map_err(get_gremlin_graph_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_gremlin_graph_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_gremlin_graph_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_gremlin_graph_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| get_gremlin_graph_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get_gremlin_graph_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get_gremlin_graph_throughput { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update_gremlin_graph_throughput( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, database_name: &str, graph_name: &str, update_throughput_parameters: &models::ThroughputSettingsUpdateParameters, ) -> std::result::Result<update_gremlin_graph_throughput::Response, update_gremlin_graph_throughput::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/gremlinDatabases/{}/graphs/{}/throughputSettings/default" , operation_config . base_path () , subscription_id , resource_group_name , account_name , database_name , graph_name) ; let mut url = url::Url::parse(url_str).map_err(update_gremlin_graph_throughput::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update_gremlin_graph_throughput::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(update_throughput_parameters).map_err(update_gremlin_graph_throughput::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(update_gremlin_graph_throughput::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(update_gremlin_graph_throughput::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(update_gremlin_graph_throughput::Response::Accepted202), http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::ThroughputSettingsGetResults = serde_json::from_slice(rsp_body) .map_err(|source| update_gremlin_graph_throughput::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update_gremlin_graph_throughput::Response::Ok200(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(update_gremlin_graph_throughput::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod update_gremlin_graph_throughput { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, Ok200(models::ThroughputSettingsGetResults), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod notebook_workspaces { use super::{models, API_VERSION}; pub async fn list_by_database_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<models::NotebookWorkspaceListResult, list_by_database_account::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_by_database_account::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_database_account::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_database_account::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_database_account::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NotebookWorkspaceListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_database_account::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_database_account::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_database_account::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_database_account { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, notebook_workspace_name: &str, ) -> std::result::Result<models::NotebookWorkspace, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, notebook_workspace_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NotebookWorkspace = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, notebook_workspace_name: &str, notebook_create_update_parameters: &models::NotebookWorkspaceCreateUpdateParameters, ) -> std::result::Result<models::NotebookWorkspace, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, notebook_workspace_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(notebook_create_update_parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NotebookWorkspace = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, notebook_workspace_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, notebook_workspace_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_connection_info( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, notebook_workspace_name: &str, ) -> std::result::Result<models::NotebookWorkspaceConnectionInfoResult, list_connection_info::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces/{}/listConnectionInfo" , operation_config . base_path () , subscription_id , resource_group_name , account_name , notebook_workspace_name) ; let mut url = url::Url::parse(url_str).map_err(list_connection_info::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_connection_info::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_connection_info::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_connection_info::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: models::NotebookWorkspaceConnectionInfoResult = serde_json::from_slice(rsp_body) .map_err(|source| list_connection_info::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_connection_info::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_connection_info::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_connection_info { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn regenerate_auth_token( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, notebook_workspace_name: &str, ) -> std::result::Result<regenerate_auth_token::Response, regenerate_auth_token::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces/{}/regenerateAuthToken" , operation_config . base_path () , subscription_id , resource_group_name , account_name , notebook_workspace_name) ; let mut url = url::Url::parse(url_str).map_err(regenerate_auth_token::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(regenerate_auth_token::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(regenerate_auth_token::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(regenerate_auth_token::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(regenerate_auth_token::Response::Ok200), http::StatusCode::ACCEPTED => Ok(regenerate_auth_token::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| regenerate_auth_token::Error::DeserializeError(source, rsp_body.clone()))?; Err(regenerate_auth_token::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod regenerate_auth_token { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn start( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, notebook_workspace_name: &str, ) -> std::result::Result<start::Response, start::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DocumentDB/databaseAccounts/{}/notebookWorkspaces/{}/start", operation_config.base_path(), subscription_id, resource_group_name, account_name, notebook_workspace_name ); let mut url = url::Url::parse(url_str).map_err(start::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(start::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(start::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(start::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(start::Response::Ok200), http::StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let rsp_body = rsp.body(); let rsp_value: models::ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| start::Error::DeserializeError(source, rsp_body.clone()))?; Err(start::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod start { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
50.280246
351
0.61125
ff1051421e1ea0c44936e254a3896ae24aa32b25
1,103
#[doc = "Reader of register SYNC"] pub type R = crate::R<u32, super::SYNC>; #[doc = "Writer for register SYNC"] pub type W = crate::W<u32, super::SYNC>; #[doc = "Register SYNC `reset()`'s with value 0"] impl crate::ResetValue for super::SYNC { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `SYNCTRIG`"] pub type SYNCTRIG_R = crate::R<u8, u8>; #[doc = "Write proxy for field `SYNCTRIG`"] pub struct SYNCTRIG_W<'a> { w: &'a mut W, } impl<'a> SYNCTRIG_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0xff) | ((value as u32) & 0xff); self.w } } impl R { #[doc = "Bits 0:7 - Synchronization Trigger"] #[inline(always)] pub fn synctrig(&self) -> SYNCTRIG_R { SYNCTRIG_R::new((self.bits & 0xff) as u8) } } impl W { #[doc = "Bits 0:7 - Synchronization Trigger"] #[inline(always)] pub fn synctrig(&mut self) -> SYNCTRIG_W { SYNCTRIG_W { w: self } } }
26.902439
70
0.576609
d9e25cdac09f39ba83988cb30ae30f97f225cbda
7,941
//! Wrapper for frontiered trace. //! //! Wraps a trace with a frontier so that all exposed timestamps are first advanced by the frontier. //! This ensures that even for traces that have been advanced, all views provided through cursors //! present deterministic times, independent of the compaction strategy. use timely::progress::Timestamp; use trace::{TraceReader, BatchReader, Description}; use trace::cursor::Cursor; use crate::lattice::Lattice; /// Wrapper to provide trace to nested scope. pub struct TraceFrontier<Tr> where Tr: TraceReader, { trace: Tr, frontier: Vec<Tr::Time>, } impl<Tr> Clone for TraceFrontier<Tr> where Tr: TraceReader+Clone, Tr::Time: Clone, { fn clone(&self) -> Self { TraceFrontier { trace: self.trace.clone(), frontier: self.frontier.clone(), } } } impl<Tr> TraceReader for TraceFrontier<Tr> where Tr: TraceReader, Tr::Batch: Clone, Tr::Key: 'static, Tr::Val: 'static, Tr::Time: Timestamp+Lattice, Tr::R: 'static, { type Key = Tr::Key; type Val = Tr::Val; type Time = Tr::Time; type R = Tr::R; type Batch = BatchFrontier<Tr::Key, Tr::Val, Tr::Time, Tr::R, Tr::Batch>; type Cursor = CursorFrontier<Tr::Key, Tr::Val, Tr::Time, Tr::R, Tr::Cursor>; fn map_batches<F: FnMut(&Self::Batch)>(&mut self, mut f: F) { let frontier = &self.frontier[..]; self.trace.map_batches(|batch| f(&Self::Batch::make_from(batch.clone(), frontier))) } fn advance_by(&mut self, frontier: &[Tr::Time]) { self.trace.advance_by(frontier) } fn advance_frontier(&mut self) -> &[Tr::Time] { self.trace.advance_frontier() } fn distinguish_since(&mut self, frontier: &[Tr::Time]) { self.trace.distinguish_since(frontier) } fn distinguish_frontier(&mut self) -> &[Tr::Time] { self.trace.distinguish_frontier() } fn cursor_through(&mut self, upper: &[Tr::Time]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Tr::Key, Tr::Val, Tr::Time, Tr::R>>::Storage)> { let frontier = &self.frontier[..]; self.trace.cursor_through(upper).map(|(x,y)| (CursorFrontier::new(x, frontier), y)) } } impl<Tr> TraceFrontier<Tr> where Tr: TraceReader, Tr::Time: Timestamp, { /// Makes a new trace wrapper pub fn make_from(trace: Tr, frontier: &[Tr::Time]) -> Self { TraceFrontier { trace, frontier: frontier.to_vec(), } } } /// Wrapper to provide batch to nested scope. pub struct BatchFrontier<K, V, T, R, B> { phantom: ::std::marker::PhantomData<(K, V, T, R)>, batch: B, frontier: Vec<T>, } impl<K, V, T: Clone, R, B: Clone> Clone for BatchFrontier<K, V, T, R, B> { fn clone(&self) -> Self { BatchFrontier { phantom: ::std::marker::PhantomData, batch: self.batch.clone(), frontier: self.frontier.clone(), } } } impl<K, V, T, R, B> BatchReader<K, V, T, R> for BatchFrontier<K, V, T, R, B> where B: BatchReader<K, V, T, R>, T: Timestamp+Lattice, { type Cursor = BatchCursorFrontier<K, V, T, R, B>; fn cursor(&self) -> Self::Cursor { BatchCursorFrontier::new(self.batch.cursor(), &self.frontier[..]) } fn len(&self) -> usize { self.batch.len() } fn description(&self) -> &Description<T> { &self.batch.description() } } impl<K, V, T, R, B> BatchFrontier<K, V, T, R, B> where B: BatchReader<K, V, T, R>, T: Timestamp+Lattice, { /// Makes a new batch wrapper pub fn make_from(batch: B, frontier: &[T]) -> Self { BatchFrontier { phantom: ::std::marker::PhantomData, batch, frontier: frontier.to_vec(), } } } /// Wrapper to provide cursor to nested scope. pub struct CursorFrontier<K, V, T, R, C: Cursor<K, V, T, R>> { phantom: ::std::marker::PhantomData<(K, V, T, R)>, cursor: C, frontier: Vec<T>, } impl<K, V, T: Clone, R, C: Cursor<K, V, T, R>> CursorFrontier<K, V, T, R, C> { fn new(cursor: C, frontier: &[T]) -> Self { CursorFrontier { phantom: ::std::marker::PhantomData, cursor, frontier: frontier.to_vec(), } } } impl<K, V, T, R, C> Cursor<K, V, T, R> for CursorFrontier<K, V, T, R, C> where C: Cursor<K, V, T, R>, T: Timestamp+Lattice, { type Storage = C::Storage; #[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(storage) } #[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(storage) } #[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(storage) } #[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(storage) } #[inline] fn map_times<L: FnMut(&T,&R)>(&mut self, storage: &Self::Storage, mut logic: L) { let frontier = &self.frontier[..]; let mut temp: T = Default::default(); self.cursor.map_times(storage, |time, diff| { temp.clone_from(time); temp.advance_by(frontier); logic(&temp, diff); }) } #[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(storage) } #[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(storage, key) } #[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(storage) } #[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(storage, val) } #[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(storage) } #[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(storage) } } /// Wrapper to provide cursor to nested scope. pub struct BatchCursorFrontier<K, V, T, R, B: BatchReader<K, V, T, R>> { phantom: ::std::marker::PhantomData<(K, V, R)>, cursor: B::Cursor, frontier: Vec<T>, } impl<K, V, T: Clone, R, B: BatchReader<K, V, T, R>> BatchCursorFrontier<K, V, T, R, B> { fn new(cursor: B::Cursor, frontier: &[T]) -> Self { BatchCursorFrontier { phantom: ::std::marker::PhantomData, cursor, frontier: frontier.to_vec(), } } } impl<K, V, T, R, B: BatchReader<K, V, T, R>> Cursor<K, V, T, R> for BatchCursorFrontier<K, V, T, R, B> where T: Timestamp+Lattice, { type Storage = BatchFrontier<K, V, T, R, B>; #[inline] fn key_valid(&self, storage: &Self::Storage) -> bool { self.cursor.key_valid(&storage.batch) } #[inline] fn val_valid(&self, storage: &Self::Storage) -> bool { self.cursor.val_valid(&storage.batch) } #[inline] fn key<'a>(&self, storage: &'a Self::Storage) -> &'a K { self.cursor.key(&storage.batch) } #[inline] fn val<'a>(&self, storage: &'a Self::Storage) -> &'a V { self.cursor.val(&storage.batch) } #[inline] fn map_times<L: FnMut(&T,&R)>(&mut self, storage: &Self::Storage, mut logic: L) { let frontier = &self.frontier[..]; let mut temp: T = Default::default(); self.cursor.map_times(&storage.batch, |time, diff| { temp.clone_from(time); temp.advance_by(frontier); logic(&temp, diff); }) } #[inline] fn step_key(&mut self, storage: &Self::Storage) { self.cursor.step_key(&storage.batch) } #[inline] fn seek_key(&mut self, storage: &Self::Storage, key: &K) { self.cursor.seek_key(&storage.batch, key) } #[inline] fn step_val(&mut self, storage: &Self::Storage) { self.cursor.step_val(&storage.batch) } #[inline] fn seek_val(&mut self, storage: &Self::Storage, val: &V) { self.cursor.seek_val(&storage.batch, val) } #[inline] fn rewind_keys(&mut self, storage: &Self::Storage) { self.cursor.rewind_keys(&storage.batch) } #[inline] fn rewind_vals(&mut self, storage: &Self::Storage) { self.cursor.rewind_vals(&storage.batch) } }
34.228448
150
0.606095
22f6f60d567aa0f6a9495b35ce2b14673d096380
3,306
// Copyright 2014 Johannes Köster. // Licensed under the MIT license (http://opensource.org/licenses/MIT) // This file may not be copied, modified, or distributed // except according to those terms. extern crate bindgen; extern crate cc; extern crate fs_utils; use fs_utils::copy::copy_directory; use std::env; use std::fs; use std::path::PathBuf; use std::process::Command; fn sed_htslib_makefile(out: &PathBuf, patterns: &Vec<&str>, feature: &str) { for pattern in patterns { if Command::new("sed") .current_dir(out.join("htslib")) .arg("-i") .arg("-e") .arg(pattern) .arg("Makefile") .status() .unwrap() .success() != true { panic!("failed to strip {} support", feature); } } } fn main() { let out = PathBuf::from(env::var("OUT_DIR").unwrap()); let mut cfg = cc::Build::new(); cfg.warnings(false).static_flag(true).pic(true); if let Ok(z_inc) = env::var("DEP_Z_INCLUDE") { cfg.include(z_inc); } if !out.join("htslib").exists() { copy_directory("htslib", &out).unwrap(); } let use_bzip2 = env::var("CARGO_FEATURE_BZIP2").is_ok(); if !use_bzip2 { let bzip2_patterns = vec!["s/ -lbz2//", "/#define HAVE_LIBBZ2/d"]; sed_htslib_makefile(&out, &bzip2_patterns, "bzip2"); } else if let Ok(inc) = env::var("DEP_BZIP2_ROOT") .map(PathBuf::from) .map(|path| path.join("include")) { cfg.include(inc); } let use_lzma = env::var("CARGO_FEATURE_LZMA").is_ok(); if !use_lzma { let lzma_patterns = vec!["s/ -llzma//", "/#define HAVE_LIBLZMA/d"]; sed_htslib_makefile(&out, &lzma_patterns, "lzma"); } else if let Ok(inc) = env::var("DEP_LZMA_INCLUDE").map(PathBuf::from) { cfg.include(inc); } let tool = cfg.get_compiler(); let (cc_path, cflags_env) = (tool.path(), tool.cflags_env()); let cc_cflags = cflags_env.to_string_lossy().replace("-O0", ""); if Command::new("make") .current_dir(out.join("htslib")) .arg(format!("CC={}", cc_path.display())) .arg(format!("CFLAGS={}", cc_cflags)) .arg("lib-static") .arg("-B") .status() .unwrap() .success() != true { panic!("failed to build htslib"); } cfg.file("wrapper.c").compile("wrapper"); bindgen::Builder::default() .header("wrapper.h") .generate_comments(false) .blacklist_type("max_align_t") .generate() .expect("Unable to generate bindings.") .write_to_file(out.join("bindings.rs")) .expect("Could not write bindings."); let include = out.join("include"); fs::create_dir_all(&include).unwrap(); if include.join("htslib").exists() { fs::remove_dir_all(include.join("htslib")).expect("remove exist include dir"); } copy_directory(out.join("htslib").join("htslib"), &include).unwrap(); fs::copy(out.join("htslib").join("libhts.a"), out.join("libhts.a")).unwrap(); println!("cargo:root={}", out.display()); println!("cargo:include={}", include.display()); println!("cargo:libdir={}", out.display()); println!("cargo:rustc-link-lib=static=hts"); }
30.611111
86
0.58046
38e17bce6c710580a59da06681964dc77621f21a
1,214
#[cfg(not(target_arch = "wasm32"))] use std::io::{ self, Write, }; #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; #[cfg(target_arch = "wasm32")] #[wasm_bindgen] extern "C" { #[wasm_bindgen(js_namespace = console, js_name = "log")] pub fn js_log(s: &str); } /// Logs to js console when compiled with debug #[cfg(target_arch = "wasm32")] #[cfg(debug_assertions)] pub fn debug(s: &str) { js_log(&["[DEBUG]", s].join(" ")); } #[cfg(not(debug_assertions))] pub fn debug(_: &str) { // ignore } /// Logs to js console #[cfg(target_arch = "wasm32")] pub fn log(s: &str) { js_log(&["[INFO]", s].join(" ")); } #[macro_export] macro_rules! log { ($($arg:tt)*) => ($crate::logging::log(&format!($($arg)*))); } // Not WASM /// Logs when compiled with debug #[cfg(debug_assertions)] #[cfg(not(target_arch = "wasm32"))] pub fn debug(s: &str) { io::stdout().write_all(["[DEBUG]", s, "\n"].join(" ").as_bytes()).unwrap(); } #[macro_export] macro_rules! debug { ($($arg:tt)*) => ($crate::logging::debug(&format!($($arg)*))); } /// Logs info to std out #[cfg(not(target_arch = "wasm32"))] pub fn log(s: &str) { io::stdout().write_all(["[INFO]", s, "\n"].join(" ").as_bytes()).unwrap(); }
22.481481
77
0.594728