hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
e85275d356c204a9a56ec800198abce68dadc84e
14,307
// Generated from definition io.k8s.api.authorization.v1.SelfSubjectAccessReview /// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means "in all namespaces". Self is a special case, because users should always be able to check whether they can perform an action #[derive(Clone, Debug, Default, PartialEq)] pub struct SelfSubjectAccessReview { /// Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta, /// Spec holds information about the request being evaluated. user and groups must be empty pub spec: crate::api::authorization::v1::SelfSubjectAccessReviewSpec, /// Status is filled in by the server and indicates whether the request is allowed or not pub status: Option<crate::api::authorization::v1::SubjectAccessReviewStatus>, } // Begin authorization.k8s.io/v1/SelfSubjectAccessReview // Generated from operation createAuthorizationV1SelfSubjectAccessReview impl SelfSubjectAccessReview { /// create a SelfSubjectAccessReview /// /// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response. /// /// # Arguments /// /// * `body` /// /// * `optional` /// /// Optional parameters. Use `Default::default()` to not pass any. #[cfg(feature = "api")] pub fn create_self_subject_access_review( body: &crate::api::authorization::v1::SelfSubjectAccessReview, optional: crate::CreateOptional<'_>, ) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> { let __url = "/apis/authorization.k8s.io/v1/selfsubjectaccessreviews?".to_owned(); let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url); optional.__serialize(&mut __query_pairs); let __url = __query_pairs.finish(); let __request = crate::http::Request::post(__url); let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?; let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json")); match __request.body(__body) { Ok(request) => Ok((request, crate::ResponseBody::new)), Err(err) => Err(crate::RequestError::Http(err)), } } } // End authorization.k8s.io/v1/SelfSubjectAccessReview impl crate::Resource for SelfSubjectAccessReview { const API_VERSION: &'static str = "authorization.k8s.io/v1"; const GROUP: &'static str = "authorization.k8s.io"; const KIND: &'static str = "SelfSubjectAccessReview"; const VERSION: &'static str = "v1"; const URL_PATH_SEGMENT: &'static str = "selfsubjectaccessreviews"; type Scope = crate::ClusterResourceScope; } impl crate::Metadata for SelfSubjectAccessReview { type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta; fn metadata(&self) -> &<Self as crate::Metadata>::Ty { &self.metadata } fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty { &mut self.metadata } } impl<'de> crate::serde::Deserialize<'de> for SelfSubjectAccessReview { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { #[allow(non_camel_case_types)] enum Field { Key_api_version, Key_kind, Key_metadata, Key_spec, Key_status, Other, } impl<'de> crate::serde::Deserialize<'de> for Field { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> { struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = Field; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str("field identifier") } fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error { Ok(match v { "apiVersion" => Field::Key_api_version, "kind" => Field::Key_kind, "metadata" => Field::Key_metadata, "spec" => Field::Key_spec, "status" => Field::Key_status, _ => Field::Other, }) } } deserializer.deserialize_identifier(Visitor) } } struct Visitor; impl<'de> crate::serde::de::Visitor<'de> for Visitor { type Value = SelfSubjectAccessReview; fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.write_str(<Self::Value as crate::Resource>::KIND) } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> { let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None; let mut value_spec: Option<crate::api::authorization::v1::SelfSubjectAccessReviewSpec> = None; let mut value_status: Option<crate::api::authorization::v1::SubjectAccessReviewStatus> = None; while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? { match key { Field::Key_api_version => { let value_api_version: String = crate::serde::de::MapAccess::next_value(&mut map)?; if value_api_version != <Self::Value as crate::Resource>::API_VERSION { return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION)); } }, Field::Key_kind => { let value_kind: String = crate::serde::de::MapAccess::next_value(&mut map)?; if value_kind != <Self::Value as crate::Resource>::KIND { return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND)); } }, Field::Key_metadata => value_metadata = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_spec => value_spec = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Key_status => value_status = crate::serde::de::MapAccess::next_value(&mut map)?, Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; }, } } Ok(SelfSubjectAccessReview { metadata: value_metadata.unwrap_or_default(), spec: value_spec.unwrap_or_default(), status: value_status, }) } } deserializer.deserialize_struct( <Self as crate::Resource>::KIND, &[ "apiVersion", "kind", "metadata", "spec", "status", ], Visitor, ) } } impl crate::serde::Serialize for SelfSubjectAccessReview { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer { let mut state = serializer.serialize_struct( <Self as crate::Resource>::KIND, 4 + self.status.as_ref().map_or(0, |_| 1), )?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?; crate::serde::ser::SerializeStruct::serialize_field(&mut state, "spec", &self.spec)?; if let Some(value) = &self.status { crate::serde::ser::SerializeStruct::serialize_field(&mut state, "status", value)?; } crate::serde::ser::SerializeStruct::end(state) } } #[cfg(feature = "schemars")] impl crate::schemars::JsonSchema for SelfSubjectAccessReview { fn schema_name() -> String { "io.k8s.api.authorization.v1.SelfSubjectAccessReview".to_owned() } fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema { crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))), object: Some(Box::new(crate::schemars::schema::ObjectValidation { properties: IntoIterator::into_iter([ ( "apiVersion".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))), ..Default::default() }), ), ( "kind".to_owned(), crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject { metadata: Some(Box::new(crate::schemars::schema::Metadata { description: Some("Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds".to_owned()), ..Default::default() })), instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))), ..Default::default() }), ), ( "metadata".to_owned(), { let mut schema_obj = __gen.subschema_for::<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta>().into_object(); schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata { description: Some("Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata".to_owned()), ..Default::default() })); crate::schemars::schema::Schema::Object(schema_obj) }, ), ( "spec".to_owned(), { let mut schema_obj = __gen.subschema_for::<crate::api::authorization::v1::SelfSubjectAccessReviewSpec>().into_object(); schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata { description: Some("Spec holds information about the request being evaluated. user and groups must be empty".to_owned()), ..Default::default() })); crate::schemars::schema::Schema::Object(schema_obj) }, ), ( "status".to_owned(), { let mut schema_obj = __gen.subschema_for::<crate::api::authorization::v1::SubjectAccessReviewStatus>().into_object(); schema_obj.metadata = Some(Box::new(crate::schemars::schema::Metadata { description: Some("Status is filled in by the server and indicates whether the request is allowed or not".to_owned()), ..Default::default() })); crate::schemars::schema::Schema::Object(schema_obj) }, ), ]).collect(), required: IntoIterator::into_iter([ "metadata", "spec", ]).map(std::borrow::ToOwned::to_owned).collect(), ..Default::default() })), ..Default::default() }) } }
52.988889
355
0.557
f7ade7cb5bd90bbc348c87bc306da684e0de2251
2,498
use crate::pack; use std::{io, path::PathBuf, sync::Arc}; use tempfile::NamedTempFile; /// Configuration for [write_stream_to_directory][pack::Bundle::write_stream_to_directory()] or /// [write_to_directory_eagerly][pack::Bundle::write_to_directory_eagerly()] #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Options { /// The amount of threads to use at most when resolving the pack. If `None`, all logical cores are used. pub thread_limit: Option<usize>, /// Determine how much processing to spend on protecting against corruption or recovering from errors. pub iteration_mode: pack::data::iter::Mode, /// The version of pack index to write, should be [`pack::index::Version::default()`] pub index_kind: pack::index::Version, } /// Returned by [write_stream_to_directory][pack::Bundle::write_stream_to_directory()] or /// [write_to_directory_eagerly][pack::Bundle::write_to_directory_eagerly()] #[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone)] #[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))] pub struct Outcome { /// The successful result of the index write operation pub index: pack::index::write::Outcome, /// The version of the pack pub pack_kind: pack::data::Version, /// The path to the pack index file pub index_path: Option<PathBuf>, /// The path to the pack data file pub data_path: Option<PathBuf>, } impl Outcome { /// Instantiate a bundle from the newly written index and data file that are represented by this `Outcome` pub fn to_bundle(&self) -> Option<Result<pack::Bundle, pack::bundle::Error>> { self.index_path.as_ref().map(pack::Bundle::at) } } pub(crate) struct PassThrough<R> { pub reader: R, pub writer: Option<Arc<parking_lot::Mutex<NamedTempFile>>>, } impl<R> io::Read for PassThrough<R> where R: io::Read, { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { let bytes_read = self.reader.read(buf)?; if let Some(writer) = self.writer.as_mut() { use io::Write; writer.lock().write_all(&buf[..bytes_read])?; } Ok(bytes_read) } } impl<R> io::BufRead for PassThrough<R> where R: io::BufRead, { fn fill_buf(&mut self) -> io::Result<&[u8]> { self.reader.fill_buf() } fn consume(&mut self, amt: usize) { self.reader.consume(amt) } }
35.183099
110
0.674139
1ed2e439a658877f23888424ff4621cd00303a1b
6,561
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use invalid_mutations::bounds::{ ApplyCodeUnitBoundsContext, ApplyOutOfBoundsContext, CodeUnitBoundsMutation, OutOfBoundsMutation, }; use libra_types::{account_address::AccountAddress, byte_array::ByteArray, vm_error::StatusCode}; use move_core_types::identifier::Identifier; use proptest::{collection::vec, prelude::*}; use vm::{check_bounds::BoundsChecker, file_format::*, proptest_types::CompiledModuleStrategyGen}; #[test] fn empty_module_no_errors() { basic_test_module().freeze().unwrap(); } #[test] fn invalid_type_param_in_fn_return_types() { use SignatureToken::*; let mut m = basic_test_module(); m.function_signatures[0].return_types = vec![TypeParameter(0)]; m.freeze().unwrap_err(); } #[test] fn invalid_type_param_in_fn_arg_types() { use SignatureToken::*; let mut m = basic_test_module(); m.function_signatures[0].arg_types = vec![TypeParameter(0)]; m.freeze().unwrap_err(); } #[test] fn invalid_struct_in_fn_return_types() { use SignatureToken::*; let mut m = basic_test_module(); m.function_signatures[0].return_types = vec![Struct(StructHandleIndex::new(1), vec![])]; m.freeze().unwrap_err(); } #[test] fn invalid_type_param_in_field() { use SignatureToken::*; let mut m = basic_test_module(); m.type_signatures[0].0 = TypeParameter(0); m.freeze().unwrap_err(); } #[test] fn invalid_struct_in_field() { use SignatureToken::*; let mut m = basic_test_module(); m.type_signatures[0].0 = Struct(StructHandleIndex::new(3), vec![]); m.freeze().unwrap_err(); } #[test] fn invalid_struct_with_actuals_in_field() { use SignatureToken::*; let mut m = basic_test_module(); m.type_signatures[0].0 = Struct(StructHandleIndex::new(0), vec![TypeParameter(0)]); m.freeze().unwrap_err(); } #[test] fn invalid_locals_id_in_call() { use Bytecode::*; let mut m = basic_test_module(); m.function_defs[0].code.code = vec![Call( FunctionHandleIndex::new(0), LocalsSignatureIndex::new(1), )]; m.freeze().unwrap_err(); } #[test] fn invalid_type_param_in_call() { use Bytecode::*; use SignatureToken::*; let mut m = basic_test_module(); m.locals_signatures .push(LocalsSignature(vec![TypeParameter(0)])); m.function_defs[0].code.code = vec![Call( FunctionHandleIndex::new(0), LocalsSignatureIndex::new(1), )]; m.freeze().unwrap_err(); } #[test] fn invalid_struct_as_type_actual_in_exists() { use Bytecode::*; use SignatureToken::*; let mut m = basic_test_module(); m.locals_signatures.push(LocalsSignature(vec![Struct( StructHandleIndex::new(3), vec![], )])); m.function_defs[0].code.code = vec![Call( FunctionHandleIndex::new(0), LocalsSignatureIndex::new(1), )]; m.freeze().unwrap_err(); } proptest! { #[test] fn valid_bounds(_module in CompiledModule::valid_strategy(20)) { // valid_strategy will panic if there are any bounds check issues. } } /// Ensure that valid modules that don't have any members (e.g. function args, struct fields) pass /// bounds checks. /// /// There are some potentially tricky edge cases around ranges that are captured here. #[test] fn valid_bounds_no_members() { let mut gen = CompiledModuleStrategyGen::new(20); gen.member_count(0); proptest!(|(_module in gen.generate())| { // gen.generate() will panic if there are any bounds check issues. }); } proptest! { #[test] fn invalid_out_of_bounds( module in CompiledModule::valid_strategy(20), oob_mutations in vec(OutOfBoundsMutation::strategy(), 0..40), ) { let (module, mut expected_violations) = { let oob_context = ApplyOutOfBoundsContext::new(module, oob_mutations); oob_context.apply() }; expected_violations.sort(); let bounds_checker = BoundsChecker::new(&module); let mut actual_violations = bounds_checker.verify(); actual_violations.sort(); for violation in actual_violations.iter_mut() { violation.set_message("".to_string()) } for violation in expected_violations.iter_mut() { violation.set_message("".to_string()) } prop_assert_eq!(expected_violations, actual_violations); } #[test] fn code_unit_out_of_bounds( module in CompiledModule::valid_strategy(20), mutations in vec(CodeUnitBoundsMutation::strategy(), 0..40), ) { let mut module = module.into_inner(); let mut expected_violations = { let context = ApplyCodeUnitBoundsContext::new(&mut module, mutations); context.apply() }; expected_violations.sort(); let bounds_checker = BoundsChecker::new(&module); let mut actual_violations = bounds_checker.verify(); actual_violations.sort(); for violation in actual_violations.iter_mut() { violation.set_message("".to_string()) } for violation in expected_violations.iter_mut() { violation.set_message("".to_string()) } prop_assert_eq!(expected_violations, actual_violations); } #[test] fn no_module_handles( identifiers in vec(any::<Identifier>(), 0..20), address_pool in vec(any::<AccountAddress>(), 0..20), byte_array_pool in vec(any::<ByteArray>(), 0..20), ) { // If there are no module handles, the only other things that can be stored are intrinsic // data. let mut module = CompiledModuleMut::default(); module.identifiers = identifiers; module.address_pool = address_pool; module.byte_array_pool = byte_array_pool; let bounds_checker = BoundsChecker::new(&module); let actual_violations: Vec<StatusCode> = bounds_checker.verify().into_iter().map(|status| status.major_status).collect(); prop_assert_eq!( actual_violations, vec![StatusCode::NO_MODULE_HANDLES] ); } } proptest! { // Generating arbitrary compiled modules is really slow, possibly because of // https://github.com/AltSysrq/proptest/issues/143. #![proptest_config(ProptestConfig::with_cases(16))] /// Make sure that garbage inputs don't crash the bounds checker. #[test] fn garbage_inputs(module in any_with::<CompiledModuleMut>(16)) { let _ = module.freeze(); } }
30.235023
129
0.658741
56db546a3f3c7712e15e528aa17344fc50707492
80
trait visitor { u8 myInt; fn myfunc(&self) -> u8 { return self.myInt; } }
10
25
0.6
2919de28d016216fdaecd30e24da78f9b5209617
1,001
trait Max<T> { fn max(&self) -> T; } struct ThreeTuple<T> { first: T, second: T, third: T, } impl<T: PartialOrd + Copy> Max<T> for ThreeTuple<T> { fn max(&self) -> T { if self.first >= self.second && self.first >= self.third { self.first } else if self.second >= self.first && self.second >= self.third { self.second } else { self.third } } } struct TwoTuple<T> { first: T, second: T, } impl<T: PartialOrd + Copy> Max<T> for TwoTuple<T> { fn max(&self) -> T { if self.first >= self.second { self.first } else { self.second } } } fn main() { let two_tuple: TwoTuple<u32> = TwoTuple { first: 4u32, second: 2u32, }; let three_tuple: ThreeTuple<u64> = ThreeTuple { first: 6u64, second: 5u64, third: 10u64, }; println!("{}", two_tuple.max()); println!("{}", three_tuple.max()); }
19.25
74
0.495504
21b72f812f095116d2f98fbac7691623711b8a2c
953
extern crate chttp; extern crate env_logger; extern crate rouille; use std::env; use std::time::Duration; use std::thread; /// Issue #3 #[test] fn request_errors_if_read_timeout_is_reached() { setup(); // Spawn a slow server. thread::spawn(|| { rouille::start_server("localhost:18080", |_| { thread::sleep(Duration::from_secs(3)); rouille::Response::text("hello world") }); }); // Create an impatient client. let mut options = chttp::Options::default(); options.timeout = Some(Duration::from_secs(2)); let client = chttp::Client::with_options(options); // Send a request. let result = client.post("http://localhost:18080", "hello world"); // Client should time-out. assert!(match result { Err(chttp::Error::Timeout) => true, _ => false, }); } fn setup() { env::set_var("RUST_LOG", "chttp=trace,curl=trace"); env_logger::init(); }
23.243902
70
0.612802
fb5b0e9e1e485ecd1c0da2f13cfc841f0d94f437
2,412
use super::auto_release::*; use cubeb_backend::ffi; use std::os::raw::{c_long, c_uint, c_void}; use std::ptr; #[derive(Debug)] pub struct Resampler(AutoRelease<ffi::cubeb_resampler>); impl Resampler { #[allow(clippy::too_many_arguments)] pub fn new( stream: *mut ffi::cubeb_stream, mut input_params: Option<ffi::cubeb_stream_params>, mut output_params: Option<ffi::cubeb_stream_params>, target_rate: c_uint, data_callback: ffi::cubeb_data_callback, user_ptr: *mut c_void, quality: ffi::cubeb_resampler_quality, reclock: ffi::cubeb_resampler_reclock, ) -> Self { let raw_resampler = unsafe { let in_params = if input_params.is_some() { input_params.as_mut().unwrap() as *mut ffi::cubeb_stream_params } else { ptr::null_mut() }; let out_params = if output_params.is_some() { output_params.as_mut().unwrap() as *mut ffi::cubeb_stream_params } else { ptr::null_mut() }; ffi::cubeb_resampler_create( stream, in_params, out_params, target_rate, data_callback, user_ptr, quality, reclock, ) }; assert!(!raw_resampler.is_null(), "Failed to create resampler"); let resampler = AutoRelease::new(raw_resampler, ffi::cubeb_resampler_destroy); Self(resampler) } pub fn fill( &mut self, input_buffer: *mut c_void, input_frame_count: *mut c_long, output_buffer: *mut c_void, output_frames_needed: c_long, ) -> c_long { unsafe { ffi::cubeb_resampler_fill( self.0.as_mut(), input_buffer, input_frame_count, output_buffer, output_frames_needed, ) } } pub fn destroy(&mut self) { if !self.0.as_ptr().is_null() { self.0.reset(ptr::null_mut()); } } } impl Drop for Resampler { fn drop(&mut self) { self.destroy(); } } impl Default for Resampler { fn default() -> Self { Self(AutoRelease::new( ptr::null_mut(), ffi::cubeb_resampler_destroy, )) } }
27.724138
86
0.536899
f46bc5dab5d0acdccf3699d5e0a4cb736288d1d5
3,068
extern crate termion; extern crate tui; use std::io; use termion::event; use termion::input::TermRead; use tui::Terminal; use tui::backend::MouseBackend; use tui::widgets::{Block, Borders, Row, Table, Widget}; use tui::layout::{Direction, Group, Rect, Size}; use tui::style::{Color, Modifier, Style}; struct App<'a> { size: Rect, items: Vec<Vec<&'a str>>, selected: usize, } impl<'a> App<'a> { fn new() -> App<'a> { App { size: Rect::default(), items: vec![ vec!["Row12", "Row12", "Row13"], vec!["Row21", "Row22", "Row23"], vec!["Row31", "Row32", "Row33"], vec!["Row41", "Row42", "Row43"], vec!["Row51", "Row52", "Row53"], vec!["Row61", "Row62", "Row63"], ], selected: 0, } } } fn main() { // Terminal initialization let backend = MouseBackend::new().unwrap(); let mut terminal = Terminal::new(backend).unwrap(); // App let mut app = App::new(); // First draw call terminal.clear().unwrap(); terminal.hide_cursor().unwrap(); app.size = terminal.size().unwrap(); draw(&mut terminal, &app); // Input let stdin = io::stdin(); for c in stdin.keys() { let size = terminal.size().unwrap(); if size != app.size { terminal.resize(size).unwrap(); app.size = size; } let evt = c.unwrap(); match evt { event::Key::Char('q') => { break; } event::Key::Down => { app.selected += 1; if app.selected > app.items.len() - 1 { app.selected = 0; } } event::Key::Up => if app.selected > 0 { app.selected -= 1; } else { app.selected = app.items.len() - 1; }, _ => {} }; draw(&mut terminal, &app); } terminal.show_cursor().unwrap(); terminal.clear().unwrap(); } fn draw(t: &mut Terminal<MouseBackend>, app: &App) { Group::default() .direction(Direction::Horizontal) .sizes(&[Size::Percent(100)]) .margin(5) .render(t, &app.size, |t, chunks| { let selected_style = Style::default().fg(Color::Yellow).modifier(Modifier::Bold); let normal_style = Style::default().fg(Color::White); Table::new( ["Header1", "Header2", "Header3"].into_iter(), app.items.iter().enumerate().map(|(i, item)| { if i == app.selected { Row::StyledData(item.into_iter(), &selected_style) } else { Row::StyledData(item.into_iter(), &normal_style) } }), ).block(Block::default().borders(Borders::ALL).title("Table")) .widths(&[10, 10, 10]) .render(t, &chunks[0]); }); t.draw().unwrap(); }
27.890909
93
0.470339
f7952349efe7d0479999277f502f91f6aaa07e39
2,541
pub mod api_variable_config; use std::collections::HashMap; use crate::config::api_config::api_variable_config::ApiVariableConfig; use crate::config::fetch_value::FetchValue; #[derive(Debug, Clone)] pub struct ApiConfig { pub api_name: String, pub method: String, pub content_type: String, pub enable: bool, pub variable: HashMap<String, ApiVariableConfig>, } impl FetchValue for ApiConfig {} impl ApiConfig { pub fn load(config: &yaml_rust::yaml::Yaml) -> Result<Vec<Self>, i32> { let mut api_settings: Vec<Self> = Vec::new(); let api_vec = match config["api"].as_vec() { Some(result) => result, None => { println!("Not found api array"); return Err(1); } }; for config_element in api_vec { let api = ApiConfig::new( &ApiConfig::fetch_value(&config_element, &vec!["api_name"])?, &ApiConfig::fetch_value(&config_element, &vec!["method"])?, &ApiConfig::fetch_value(&config_element, &vec!["content_type"])?, match &ApiConfig::fetch_value_as_bool(&config_element, &vec!["enable"]) { Ok(result) => result, Err(_) => &true }, config_element, ); let api_config = match api { Ok(result) => result, Err(_) => { println!("Not found api element"); return Err(1); } }; api_settings.push(api_config); } Ok(api_settings) } pub fn new( api_name: &str, method: &str, content_type: &str, enable: &bool, config: &yaml_rust::yaml::Yaml, ) -> Result<Self, i32> { let api_variable_configs = match ApiVariableConfig::load(config) { Ok(result) => result, Err(_) => { println!("Not found api element"); return Err(1); } }; let mut api_variable_config_hash = HashMap::new(); for api_variable in api_variable_configs { api_variable_config_hash.insert(api_variable.name.to_string(), api_variable); } Ok(ApiConfig { api_name: api_name.to_string(), method: method.to_string(), content_type: content_type.to_string(), enable: enable.clone(), variable: api_variable_config_hash, }) } }
30.987805
89
0.532468
261285782557769a6cedef15022d57ac1659ac07
3,054
use std::path::Path; use std::fs; use std::io::Read; use crate::{Magna, SystemCall}; use crate::Instruction; use magnum_common::*; #[test] /// For lack of a better test name fn i_know_what_le_means() { let num = 0x100u16; let bytes = num.to_le_bytes(); assert_eq!(bytes, [0, 1]); } #[test] fn one_plus_one() { let path = "one-plus-one.magna"; // Required if test fails, then the file won't be deleted at the end. if Path::new(path).exists() { fs::remove_file(path).unwrap(); } let mut magna = Magna::new(); let insts = [ Instruction::LoadIB(1), Instruction::LoadIB(1), Instruction::Sys(SystemCall::PutB), Instruction::LoadIB('\n' as u8), Instruction::Sys(SystemCall::PutC), Instruction::Hlt ]; for i in insts { magna.add_inst(i); } magna.write_file(path).unwrap(); let mut file = fs::File::open(path).unwrap(); let mut file_sig = [0u8; 3]; file.read(&mut file_sig).unwrap(); assert_eq!(file_sig, "MVM".as_bytes()); let mut version = [0u8]; file.read(&mut version).unwrap(); assert_eq!(version, [0u8]); let mut text_loc = [0u8; 8]; file.read(&mut text_loc).unwrap(); assert_eq!(usize::from_le_bytes(text_loc), 0x35); let mut text_size = [0u8; 8]; file.read(&mut text_size).unwrap(); assert_eq!(usize::from_le_bytes(text_size), insts.len() * 4); // Note that there is not any global data in this example. let mut readonly_loc = [0u8; 8]; file.read(&mut readonly_loc).unwrap(); assert_eq!(usize::from_le_bytes(readonly_loc), 0x35 + insts.len()); let mut readonly_size = [0u8; 8]; file.read(&mut readonly_size).unwrap(); assert_eq!(usize::from_le_bytes(readonly_size), 0); let mut init_writable_loc = [0u8; 8]; file.read(&mut init_writable_loc).unwrap(); assert_eq!(usize::from_le_bytes(init_writable_loc), 0x35 + insts.len()); let mut init_writable_size = [0u8; 8]; file.read(&mut init_writable_size).unwrap(); assert_eq!(usize::from_le_bytes(init_writable_size), 0); let mut uninit_writable_size = [0u8; 8]; file.read(&mut uninit_writable_size).unwrap(); assert_eq!(usize::from_le_bytes(uninit_writable_size), 0); // Our instructions let mut loadi_b_1 = [0u8; 4]; file.read(&mut loadi_b_1).unwrap(); assert_eq!(u32::from_le_bytes(loadi_b_1), ((OPCODE_LOADI_B as u32) << (8 * 3)) + 1); let mut loadi_b_2 = [0u8; 4]; file.read(&mut loadi_b_2).unwrap(); assert_eq!(u32::from_le_bytes(loadi_b_2), ((OPCODE_LOADI_B as u32) << (8 * 3)) + 1); let mut sys1 = [0u8; 4]; file.read(&mut sys1).unwrap(); assert_eq!(u32::from_le_bytes(sys1), ((OPCODE_SYS as u32) << (8 * 3)) + OPCODE_PUT_B as u32); let mut loadi_b_3 = [0u8; 4]; file.read(&mut loadi_b_3).unwrap(); assert_eq!(u32::from_le_bytes(loadi_b_3), ((OPCODE_LOADI_B as u32) << (8 * 3)) + '\n' as u32); let mut sys2 = [0u8; 4]; file.read(&mut sys2).unwrap(); assert_eq!(u32::from_le_bytes(sys2), ((OPCODE_SYS as u32) << (8 * 3)) + OPCODE_PUT_C as u32); let mut hlt = [0u8; 4]; file.read(&mut hlt).unwrap(); assert_eq!(u32::from_le_bytes(hlt), (OPCODE_HLT as u32) << (8 * 3)); fs::remove_file(path).unwrap(); }
28.277778
95
0.676162
d55d1873841d44b205d034be2ccdc8045ff37b3c
3,810
// Copyright 2018 Amagicom AB. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Low level FFI bindings to [`libnftnl`], a userspace library providing a low-level netlink //! programming interface (API) to the in-kernel nf_tables subsystem. //! //! See [`nftnl`] for a higher level safe abstraction. //! //! # Linking to libmnl and libnftnl //! //! By default this crate uses pkg-config to find and link to its C dependencies, [`libmnl`] and //! [`libnftnl`]. To manually configure where to look for these libraries, set the environment //! variables `LIBMNL_LIB_DIR` and `LIBNFTNL_LIB_DIR` to point to the directories where `libmnl.so` //! (or `libmnl.a`) and `libnftnl.so` (or `libnftnl.a`) reside. //! //! # Selecting version of `libnftnl` //! //! This crate has bindings for most versions of [`libnftnl`]. All bindings are generated by //! [`bindgen`] via the `generate_bindings.sh` script in this repository. //! //! Only one version of `libnftnl` can be exposed via this crate. By default the crate exports the //! bindings for the oldest supported version (`libnftnl-1.0.7`). To get newer versions activate the //! corresponding features. See `Cargo.toml` for available features/versions. //! //! So for example, to get bindings to `libnftnl-1.0.9` depend on this crate like this: //! ```toml //! [dependencies] //! nftnl-sys = { version = "0.1", features = ["nftnl-1-0-9"] } //! ``` //! //! [`libnftnl`]: https://netfilter.org/projects/libnftnl/ //! [`libmnl`]: https://netfilter.org/projects/libmnl/ //! [`nftnl`]: https://crates.io/crates/nftnl //! [`bindgen`]: https://crates.io/crates/bindgen #![no_std] #![cfg(target_os = "linux")] #![allow(non_camel_case_types)] pub use libc; cfg_if::cfg_if! { if #[cfg(feature = "nftnl-1-1-2")] { mod nftnl_1_1_2; pub use self::nftnl_1_1_2::*; } else if #[cfg(feature = "nftnl-1-1-1")] { mod nftnl_1_1_1; pub use self::nftnl_1_1_1::*; } else if #[cfg(feature = "nftnl-1-1-0")] { mod nftnl_1_1_0; pub use self::nftnl_1_1_0::*; } else if #[cfg(feature = "nftnl-1-0-9")] { mod nftnl_1_0_9; pub use self::nftnl_1_0_9::*; } else if #[cfg(feature = "nftnl-1-0-8")] { mod nftnl_1_0_8; pub use self::nftnl_1_0_8::*; } else { mod nftnl_1_0_7; pub use self::nftnl_1_0_7::*; } } // TODO export "<netfilter/nf_tables.h>" pub const NFT_OBJECT_UNSPEC: libc::c_int = 0; pub const NFT_OBJECT_COUNTER: libc::c_int = 1; pub const NFT_OBJECT_QUOTA: libc::c_int = 2; pub const NFT_OBJECT_CT_HELPER: libc::c_int = 3; pub const NFT_OBJECT_LIMIT: libc::c_int = 4; pub const NFT_OBJECT_CONNLIMIT: libc::c_int = 5; pub const NFT_OBJECT_TUNNEL: libc::c_int = 6; pub const NFT_OBJECT_CT_TIMEOUT: libc::c_int = 7; pub const NFT_OBJECT_SECMARK: libc::c_int = 8; pub const NFT_OBJECT_CT_EXPECT: libc::c_int = 9; pub const NFT_OBJECT_SYNPROXY: libc::c_int = 10; pub const __NFT_OBJECT_MAX: libc::c_int = 11; pub const NFT_OBJECT_MAX: libc::c_int = __NFT_OBJECT_MAX - 1; pub const NFT_LOGLEVEL_EMERG: libc::c_int = 0; pub const NFT_LOGLEVEL_ALERT: libc::c_int = 1; pub const NFT_LOGLEVEL_CRIT: libc::c_int = 2; pub const NFT_LOGLEVEL_ERR: libc::c_int = 3; pub const NFT_LOGLEVEL_WARNING: libc::c_int = 4; pub const NFT_LOGLEVEL_NOTICE: libc::c_int = 5; pub const NFT_LOGLEVEL_INFO: libc::c_int = 6; pub const NFT_LOGLEVEL_DEBUG: libc::c_int = 7; pub const NFT_LOGLEVEL_AUDIT: libc::c_int = 8; pub const __NFT_LOGLEVEL_MAX: libc::c_int = 9; pub const NFT_LOGLEVEL_MAX: libc::c_int = __NFT_LOGLEVEL_MAX -1;
39.6875
100
0.691076
ed2f7e25dae6ce13b58da32e7a7f0fa23ef24f6d
506
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn pat_id_4() { let _x = 4i; }
36.142857
68
0.727273
e42429b59db5acbc7bb52eb1476d089d6a4355a9
4,916
#[cfg(feature = "postgres")] extern crate url; use support::{database, project}; #[test] fn reset_drops_the_database() { let p = project("reset_drops_the_database") .folder("migrations") .build(); let db = database(&p.database_url()).create(); db.execute("CREATE TABLE posts ( id INTEGER )"); assert!(db.table_exists("posts")); let result = p.command("database").arg("reset").run(); assert!(result.is_success(), "Result was unsuccessful {:?}", result); assert!(!db.table_exists("posts")); } #[test] fn reset_runs_database_setup() { let p = project("reset_runs_database_setup") .folder("migrations") .build(); let db = database(&p.database_url()).create(); db.execute("CREATE TABLE posts ( id INTEGER )"); db.execute("CREATE TABLE users ( id INTEGER )"); p.create_migration( "12345_create_users_table", "CREATE TABLE users ( id INTEGER )", "DROP TABLE users", ); assert!(db.table_exists("posts")); assert!(db.table_exists("users")); let result = p.command("database").arg("reset").run(); assert!(result.is_success(), "Result was unsuccessful {:?}", result); assert!(!db.table_exists("posts")); assert!(db.table_exists("users")); assert!(db.table_exists("__diesel_schema_migrations")); } #[test] #[cfg(feature = "postgres")] fn reset_handles_postgres_urls_with_username_and_password() { let p = project("handles_postgres_urls") .folder("migrations") .build(); let db = database(&p.database_url()).create(); db.execute("DROP ROLE IF EXISTS foo"); db.execute("CREATE ROLE foo WITH LOGIN SUPERUSER PASSWORD 'password'"); let database_url = { let mut new_url = url::Url::parse(&p.database_url()).expect("invalid url"); new_url.set_username("foo").expect("could not set username"); new_url .set_password(Some("password")) .expect("could not set password"); new_url.to_string() }; let result = p.command("database") .arg("reset") .env("DATABASE_URL", &database_url) .run(); assert!( result.is_success(), "Result was unsuccessful {:?}", result.stdout() ); assert!( result.stdout().contains("Dropping database:"), "Unexpected stdout {}", result.stdout() ); assert!( result.stdout().contains("Creating database:"), "Unexpected stdout {}", result.stdout() ); } #[test] fn reset_works_with_migration_dir_by_arg() { let p = project("reset_works_with_migration_dir_by_arg") .folder("foo") .build(); let db = database(&p.database_url()).create(); db.execute("CREATE TABLE posts ( id INTEGER )"); db.execute("CREATE TABLE users ( id INTEGER )"); p.create_migration_in_directory( "foo", "12345_create_users_table", "CREATE TABLE users ( id INTEGER )", "DROP TABLE users", ); assert!(db.table_exists("posts")); assert!(db.table_exists("users")); let result = p.command("database") .arg("reset") .arg("--migration-dir=foo") .run(); assert!(result.is_success(), "Result was unsuccessful {:?}", result); assert!(!db.table_exists("posts")); assert!(db.table_exists("users")); assert!(db.table_exists("__diesel_schema_migrations")); } #[test] fn reset_works_with_migration_dir_by_env() { let p = project("reset_works_with_migration_dir_by_env") .folder("bar") .build(); let db = database(&p.database_url()).create(); db.execute("CREATE TABLE posts ( id INTEGER )"); db.execute("CREATE TABLE users ( id INTEGER )"); p.create_migration_in_directory( "bar", "12345_create_users_table", "CREATE TABLE users ( id INTEGER )", "DROP TABLE users", ); assert!(db.table_exists("posts")); assert!(db.table_exists("users")); let result = p.command("database") .arg("reset") .env("MIGRATION_DIRECTORY", "bar") .run(); assert!(result.is_success(), "Result was unsuccessful {:?}", result); assert!(!db.table_exists("posts")); assert!(db.table_exists("users")); assert!(db.table_exists("__diesel_schema_migrations")); } #[test] fn reset_sanitize_database_name() { let p = project("name-with-dashes").folder("migrations").build(); let _db = database(&p.database_url()).create(); let result = p.command("database").arg("reset").run(); assert!( result.is_success(), "Result was unsuccessful {:?}", result.stdout() ); assert!( result.stdout().contains("Dropping database:"), "Unexpected stdout {}", result.stdout() ); assert!( result.stdout().contains("Creating database:"), "Unexpected stdout {}", result.stdout() ); }
28.416185
83
0.607404
091a8635aa084722bc0b64b907dc1bb18e19e5de
41,058
use crate::ast_types::{GenericsArgs, ImplHeader, Pat, TraitBounds, Ty, TypeParameter}; use crate::codecleaner; use crate::codeiter::StmtIndicesIter; use crate::matchers::ImportInfo; use crate::project_model::ProjectModelProvider; use rls_span; use std::cell::RefCell; use std::cmp::Ordering; use std::collections::HashMap; use std::fs::File; use std::io; use std::io::Read; use std::iter::{Fuse, Iterator}; use std::ops::{Deref, Range}; use std::rc::Rc; use std::{fmt, vec}; use std::{path, str}; use syntax::source_map; use crate::ast; use crate::fileres; use crate::nameres; use crate::primitive::PrimKind; use crate::scopes; use crate::util; /// Within a [`Match`], specifies what was matched /// /// [`Match`]: struct.Match.html #[derive(Clone, Debug, PartialEq)] pub enum MatchType { Struct(Box<GenericsArgs>), Module, MatchArm, Function, Method(Option<Box<GenericsArgs>>), Crate, Let(BytePos), IfLet(BytePos), WhileLet(BytePos), For(BytePos), StructField, Enum(Box<GenericsArgs>), /// EnumVariant needs to have Enum type to complete methods EnumVariant(Option<Box<Match>>), UseAlias(Box<Match>), AssocType, Type, FnArg(Box<(Pat, Option<Ty>)>), Trait, Const, Static, Macro, Builtin(PrimKind), /// fn f<T: Clone> or fn f(a: impl Clone) with its trait bounds TypeParameter(Box<TraitBounds>), } impl MatchType { pub fn is_function(&self) -> bool { match self { MatchType::Function | MatchType::Method(_) => true, _ => false, } } pub fn is_enum(&self) -> bool { match self { MatchType::Enum(_) => true, _ => false, } } pub fn is_struct(&self) -> bool { match self { MatchType::Struct(_) => true, _ => false, } } } impl fmt::Display for MatchType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { MatchType::Struct(_) => write!(f, "Struct"), MatchType::Method(_) => write!(f, "Method"), MatchType::IfLet(_) => write!(f, "IfLet"), MatchType::Let(_) => write!(f, "Let"), MatchType::WhileLet(_) => write!(f, "WhileLet"), MatchType::For(_) => write!(f, "For"), MatchType::Enum(_) => write!(f, "Enum"), MatchType::EnumVariant(_) => write!(f, "EnumVariant"), MatchType::TypeParameter(_) => write!(f, "TypeParameter"), MatchType::FnArg(_) => write!(f, "FnArg"), MatchType::Type => write!(f, "Type"), MatchType::UseAlias(_) => write!(f, "UseAlias"), _ => fmt::Debug::fmt(self, f), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum SearchType { ExactMatch, StartsWith, } mod declare_namespace { // (kngwyu) I reserved Crate, Mod or other names for future usage(like for #830) // but, currently they're not used and... I'm not sure they're useful:) #![allow(non_upper_case_globals, unused)] bitflags! { /// Type context pub struct Namespace: u32 { const Crate = 0b0000000000001; const Mod = 0b0000000000010; const Space = 0b0000000000011; const Enum = 0b0000000000100; const Struct = 0b0000000001000; const Union = 0b0000000010000; const Trait = 0b0000000100000; const TypeDef = 0b0000001000000; const HasField = 0b0000001011100; const Type = 0b0000001111100; const PathParen = 0b0000001111111; const Const = 0b0000010000000; const Static = 0b0000100000000; const Func = 0b0001000000000; // for use_extern_macros const Macro = 0b0010000000000; const Impl = 0b0001110000000; const PathChild = 0b0011110000000; const Path = 0b0011111111111; const Primitive = 0b0100000000000; const StdMacro = 0b1000000000000; const Global = 0b1100000000000; } } } pub use self::declare_namespace::Namespace; #[derive(Debug, Clone, Copy)] pub enum CompletionType { Field, Path, } /// 0-based byte offset in a file. #[derive( Clone, Copy, Debug, Default, Eq, PartialEq, Ord, PartialOrd, Hash, Index, From, Add, Sub, AddAssign, SubAssign, )] pub struct BytePos(pub usize); impl From<u32> for BytePos { fn from(u: u32) -> Self { BytePos(u as usize) } } impl BytePos { pub const ZERO: BytePos = BytePos(0); /// returns self - 1 pub fn decrement(&self) -> Self { BytePos(self.0 - 1) } pub fn checked_sub(&self, sub: impl Into<Self>) -> Option<Self> { self.0.checked_sub(sub.into().0).map(BytePos) } pub fn try_decrement(&self) -> Option<Self> { self.0.checked_sub(1).map(BytePos) } /// returns self + 1 pub fn increment(&self) -> Self { BytePos(self.0 + 1) } } impl fmt::Display for BytePos { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } /// 0-based byte range in a file. #[derive(Clone, Copy, Default, Eq, PartialEq, Hash)] pub struct ByteRange { /// start of byte position in codes(inclusive) pub start: BytePos, /// end of byte position in codes(exclusive) pub end: BytePos, } impl ByteRange { /// returns new ByteRange from start and end pub fn new<P: Into<BytePos>>(start: P, end: P) -> Self { ByteRange { start: start.into(), end: end.into(), } } /// returns the length of the range #[inline] pub fn len(&self) -> usize { (self.end - self.start).0 } /// returns if the range contains `point` or not #[inline] pub fn contains(&self, point: BytePos) -> bool { self.start <= point && point < self.end } /// returns if the range contains `point` (except its start point) #[inline] pub fn contains_exclusive(&self, point: BytePos) -> bool { self.start < point && point < self.end } /// returns the new range with which its start is `self.start + shift`, /// its end is `self.end + shift` #[inline] pub fn shift<P: Into<BytePos>>(&self, shift: P) -> Self { let shift = shift.into(); ByteRange { start: self.start + shift, end: self.end + shift, } } /// convert the range to `std::ops::Range` #[inline] pub fn to_range(&self) -> Range<usize> { self.start.0..self.end.0 } } impl PartialEq<BytePos> for ByteRange { fn eq(&self, other: &BytePos) -> bool { self.contains(*other) } } impl PartialOrd<BytePos> for ByteRange { fn partial_cmp(&self, other: &BytePos) -> Option<Ordering> { if *other < self.start { Some(Ordering::Greater) } else if *other >= self.end { Some(Ordering::Less) } else { Some(Ordering::Equal) } } } impl From<source_map::Span> for ByteRange { fn from(span: source_map::Span) -> Self { let (lo, hi) = ast::destruct_span(span); ByteRange::new(lo, hi) } } impl fmt::Debug for ByteRange { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "ByteRange({}..{})", self.start.0, self.end.0) } } /// Row and Column position in a file // for backward compatibility, we use 1-index row and 0-indexed column here #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] pub struct Coordinate { pub row: rls_span::Row<rls_span::OneIndexed>, pub col: rls_span::Column<rls_span::ZeroIndexed>, } impl Coordinate { /// construct new Coordinate pub fn new(row: u32, col: u32) -> Self { Coordinate { row: rls_span::Row::<rls_span::OneIndexed>::new_one_indexed(row), col: rls_span::Column::<rls_span::ZeroIndexed>::new_zero_indexed(col), } } /// start point of the file pub fn start() -> Self { Coordinate::new(1, 0) } } /// Context, source, and etc. for detected completion or definition #[derive(Clone, PartialEq)] pub struct Match { pub matchstr: String, pub filepath: path::PathBuf, pub point: BytePos, pub coords: Option<Coordinate>, pub local: bool, pub mtype: MatchType, pub contextstr: String, pub docs: String, } impl Match { /// Checks if two matches can be considered the same for deduplication purposes. /// /// This could be the basis for a `PartialEq` implementation in the future, /// but in the interest of minimizing the crate's public API surface it's exposed /// as a private method for now. fn is_same_as(&self, other: &Match) -> bool { self.point == other.point && self.matchstr == other.matchstr && self.filepath == other.filepath } pub(crate) fn to_generics(&self) -> Option<&GenericsArgs> { match &self.mtype { MatchType::Struct(gen_arg) | MatchType::Enum(gen_arg) => Some(gen_arg.as_ref()), MatchType::Method(gen_arg) => gen_arg.as_ref().map(AsRef::as_ref), _ => None, } } pub(crate) fn into_generics(self) -> Option<GenericsArgs> { match self.mtype { MatchType::Struct(gen_arg) | MatchType::Enum(gen_arg) => Some(*gen_arg), MatchType::Method(gen_arg) => gen_arg.map(|x| *x), _ => None, } } pub(crate) fn generics(&self) -> impl Iterator<Item = &TypeParameter> { let opt = match self.mtype { MatchType::Struct(ref gen_arg) | MatchType::Enum(ref gen_arg) => Some(gen_arg), MatchType::Method(ref gen_arg) => gen_arg.as_ref(), _ => None, }; opt.into_iter().flat_map(|gen_arg| gen_arg.args()) } pub(crate) fn resolved_generics(&self) -> impl Iterator<Item = &Ty> { let opt = match self.mtype { MatchType::Struct(ref gen_arg) | MatchType::Enum(ref gen_arg) => Some(gen_arg), MatchType::Method(ref gen_arg) => gen_arg.as_ref(), _ => None, }; opt.into_iter() .flat_map(|gen_arg| gen_arg.args()) .filter_map(|ty_param| ty_param.resolved.as_ref()) } pub(crate) fn resolve_generics(&mut self, types: &[Ty]) { match self.mtype { MatchType::Struct(ref mut gen_arg) | MatchType::Enum(ref mut gen_arg) => { gen_arg.apply_types(types); } _ => {} }; } // currently we can't resolve method's type parameter pub(crate) fn generics_mut(&mut self) -> impl Iterator<Item = &mut TypeParameter> { let opt = match &mut self.mtype { MatchType::Struct(gen_arg) | MatchType::Enum(gen_arg) => Some(&mut **gen_arg), _ => None, }; opt.into_iter().flat_map(|gen_arg| gen_arg.args_mut()) } } /// The cursor position used by public search methods #[derive(Debug, Clone, Copy)] pub enum Location { /// A byte offset in the file Point(BytePos), /// 1-based line and column indices. Coords(Coordinate), } impl From<BytePos> for Location { fn from(val: BytePos) -> Location { Location::Point(val) } } impl From<usize> for Location { fn from(val: usize) -> Location { Location::Point(BytePos(val)) } } impl From<Coordinate> for Location { fn from(val: Coordinate) -> Location { Location::Coords(val) } } /// Internal cursor methods pub trait LocationExt { fn to_point(&self, src: &RawSource) -> Option<BytePos>; fn to_coords(&self, src: &RawSource) -> Option<Coordinate>; } impl LocationExt for Location { fn to_point(&self, src: &RawSource) -> Option<BytePos> { match *self { Location::Point(val) => Some(val), Location::Coords(ref coords) => src.coords_to_point(coords), } } fn to_coords(&self, src: &RawSource) -> Option<Coordinate> { match *self { Location::Coords(val) => Some(val), Location::Point(point) => src.point_to_coords(point), } } } impl fmt::Debug for Match { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Match [{:?}, {:?}, {:?}, {:?}, {:?}, |{}|]", self.matchstr, self.filepath.display(), self.point, self.local, self.mtype, self.contextstr ) } } #[derive(Clone, PartialEq)] pub struct Scope { pub filepath: path::PathBuf, pub point: BytePos, } impl Scope { pub fn new(path: path::PathBuf, pos: BytePos) -> Self { Scope { filepath: path, point: pos, } } pub fn from_match(m: &Match) -> Scope { Scope { filepath: m.filepath.clone(), point: m.point, } } } impl fmt::Debug for Scope { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Scope [{:?}, {:?}]", self.filepath.display(), self.point) } } #[derive(Clone, Debug)] pub struct RawSource { pub code: String, pub lines: RefCell<Vec<ByteRange>>, } impl RawSource { pub fn new(s: String) -> Self { RawSource { code: s, lines: Default::default(), } } fn cache_lineoffsets(&self) { if self.lines.borrow().len() != 0 { return; } let mut before = 0; *self.lines.borrow_mut() = self .code .split('\n') .map(|line| { let len = line.len() + 1; let res = ByteRange::new(before, before + len); before += len; res }) .collect(); } pub fn coords_to_point(&self, coords: &Coordinate) -> Option<BytePos> { self.cache_lineoffsets(); self.lines .borrow() .get(coords.row.zero_indexed().0 as usize) .and_then(|&range| { let col = coords.col.0 as usize; if col < range.len() { Some(range.start + col.into()) } else { None } }) } pub fn point_to_coords(&self, point: BytePos) -> Option<Coordinate> { self.cache_lineoffsets(); let lines = self.lines.borrow(); lines .binary_search_by(|range| range.partial_cmp(&point).unwrap()) .ok() .map(|idx| Coordinate::new(idx as u32 + 1, (point - lines[idx].start).0 as u32)) } } #[derive(Clone, Debug)] pub struct MaskedSource { pub code: String, } #[derive(Clone, Copy, Debug)] pub struct Src<'c> { pub src: &'c MaskedSource, pub range: ByteRange, } impl MaskedSource { pub fn new(src: &str) -> MaskedSource { let idx: Vec<_> = codecleaner::code_chunks(&src).collect(); let code = scopes::mask_comments(src, &idx); MaskedSource { code } } pub fn as_src(&self) -> Src<'_> { self.get_src_from_start(BytePos::ZERO) } pub fn get_src_from_start(&self, new_start: BytePos) -> Src<'_> { Src { src: self, range: ByteRange::new(new_start, self.len().into()), } } } pub struct MatchIter<'c> { session: &'c Session<'c>, matches: vec::IntoIter<Match>, } impl<'c> Iterator for MatchIter<'c> { type Item = Match; fn next(&mut self) -> Option<Match> { self.matches.next().map(|mut m| { if m.coords.is_none() { let point = m.point; let src = self.session.load_raw_file(m.filepath.as_path()); m.coords = src.point_to_coords(point); } m }) } } #[test] fn coords_to_point_works() { let src = " fn myfn() { let a = 3; print(a); }"; let src = RawSource::new(src.into()); assert_eq!( src.coords_to_point(&Coordinate::new(3, 5)), Some(BytePos(18)) ); } #[test] fn coords_to_point_lf_newline() { let src = "\n\ fn myfn() {\n\ let a = 3;\n\ print(a);\n\ }\n"; let src = RawSource::new(src.into()); assert_eq!( src.coords_to_point(&Coordinate::new(3, 5)), Some(BytePos(18)) ); } #[test] fn coords_to_point_crlf_newline() { let src = "\r\n\ fn myfn() {\r\n\ let a = 3;\r\n\ print(a);\r\n\ }\r\n"; let src = RawSource::new(src.into()); assert_eq!( src.coords_to_point(&Coordinate::new(3, 5)), Some(BytePos(20)) ); } #[test] fn test_point_to_coords() { let src = " fn myfn(b:usize) { let a = 3; if b == 12 { let a = 24; do_something_with(a); } do_something_with(a); } "; fn round_trip_point_and_coords(src: &str, lineno: usize, charno: usize) { let raw_src = RawSource::new(src.to_owned()); let point = raw_src .coords_to_point(&Coordinate::new(lineno as u32, charno as u32)) .unwrap(); let coords = raw_src.point_to_coords(point).unwrap(); assert_eq!(coords, Coordinate::new(lineno as u32, charno as u32)); } round_trip_point_and_coords(src, 4, 5); } impl<'c> Src<'c> { pub fn iter_stmts(&self) -> Fuse<StmtIndicesIter<'_>> { StmtIndicesIter::from_parts(self) } pub fn shift_start(&self, shift: BytePos) -> Src<'c> { Src { src: self.src, range: ByteRange::new(self.range.start + shift, self.range.end), } } pub fn change_length(&self, new_length: BytePos) -> Src<'c> { Src { src: self.src, range: ByteRange::new(self.range.start, self.range.start + new_length), } } pub fn shift_range(&self, new_range: ByteRange) -> Src<'c> { Src { src: self.src, range: new_range.shift(self.range.start), } } } pub struct RangedRawSrc { inner: Rc<RawSource>, range: ByteRange, } impl Deref for RangedRawSrc { type Target = str; fn deref(&self) -> &str { &self.inner.code[self.range.to_range()] } } impl Deref for RawSource { type Target = str; fn deref(&self) -> &str { &self.code } } impl Deref for MaskedSource { type Target = str; fn deref(&self) -> &str { &self.code } } impl<'c> Deref for Src<'c> { type Target = str; fn deref(&self) -> &str { &self.src.code[self.range.to_range()] } } /// Caches file contents for re-use between sessions. /// /// The file cache is an opaque blob outside of racer which contains maps of loaded and masked /// files. pub struct FileCache { /// raw source for cached files raw_map: RefCell<HashMap<path::PathBuf, Rc<RawSource>>>, /// masked source for cached files /// /// a version with comments and strings replaced by spaces, so that they /// aren't found when scanning the source for signatures. masked_map: RefCell<HashMap<path::PathBuf, Rc<MaskedSource>>>, /// The file loader pub(crate) loader: Box<dyn FileLoader>, } /// Used by the FileCache for loading files /// /// Implement one of these and pass it to `FileCache::new()` to override Racer's /// file loading behavior. pub trait FileLoader { /// Load a single file fn load_file(&self, path: &path::Path) -> io::Result<String>; } /// Provide a blanket impl for Arc<T> since Rls uses that impl<T: FileLoader> FileLoader for ::std::sync::Arc<T> { fn load_file(&self, path: &path::Path) -> io::Result<String> { (&self as &T).load_file(path) } } /// The default file loader /// /// Private since this shouldn't be needed outside of racer struct DefaultFileLoader; impl FileLoader for DefaultFileLoader { fn load_file(&self, path: &path::Path) -> io::Result<String> { let mut rawbytes = Vec::new(); let mut f = File::open(path)?; f.read_to_end(&mut rawbytes)?; // skip BOM bytes, if present if rawbytes.len() > 2 && rawbytes[0..3] == [0xEF, 0xBB, 0xBF] { str::from_utf8(&rawbytes[3..]) .map(|s| s.to_owned()) .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } else { String::from_utf8(rawbytes).map_err(|err| io::Error::new(io::ErrorKind::Other, err)) } } } impl Default for FileCache { fn default() -> FileCache { FileCache::new(DefaultFileLoader) } } impl FileCache { /// Create a new file cache /// /// In order to load files into the cache, please see /// [`Session::cache_file_contents()`] /// /// [`Session::cache_file_contents()`]: struct.Session.html#method.cache_file_contents pub fn new<L: FileLoader + 'static>(loader: L) -> FileCache { FileCache { raw_map: RefCell::new(HashMap::new()), masked_map: RefCell::new(HashMap::new()), loader: Box::new(loader), } } /// Remove specific files from the cache /// /// Returns true if a file was removed pub fn remove_file<P: AsRef<path::Path>>(&self, path: &P) -> bool { let path = path.as_ref(); let mut raw = self.raw_map.borrow_mut(); let mut masked = self.masked_map.borrow_mut(); raw.remove(path).is_some() || masked.remove(path).is_some() } /// Add/Replace a file in both versions. fn cache_file_contents<P, T>(&self, filepath: P, buf: T) where T: Into<String>, P: Into<path::PathBuf>, { let pathbuf = filepath.into(); let src = buf.into(); let masked_src = MaskedSource::new(&src); self.raw_map .borrow_mut() .insert(pathbuf.clone(), Rc::new(RawSource::new(src))); self.masked_map .borrow_mut() .insert(pathbuf, Rc::new(masked_src)); } fn load_file(&self, filepath: &path::Path) -> Rc<RawSource> { if let Some(src) = self.raw_map.borrow().get(filepath) { return src.clone(); } // nothing found, insert into cache // Ugh, really need handle results on all these methods :( let source = self .loader .load_file(filepath) .expect(&format!("Failed load file {:?}", filepath)); let source = Rc::new(RawSource::new(source)); self.raw_map .borrow_mut() .insert(filepath.to_path_buf(), Rc::clone(&source)); source } fn load_file_and_mask_comments(&self, filepath: &path::Path) -> Rc<MaskedSource> { if let Some(src) = self.masked_map.borrow().get(filepath) { return src.clone(); } // nothing found, insert into cache let src = self.load_file(filepath); let msrc = Rc::new(MaskedSource::new(&src.code)); self.masked_map .borrow_mut() .insert(filepath.to_path_buf(), msrc.clone()); msrc } } /// Private methods for the Session type pub trait SessionExt { /// Request that a file is loaded into the cache /// /// This API is unstable and should not be used outside of Racer fn load_raw_file(&self, _: &path::Path) -> Rc<RawSource>; /// ranged version of load_raw_file fn load_raw_src_ranged(&self, src: &Src<'_>, _: &path::Path) -> RangedRawSrc; /// Request that a file is loaded into the cache with comments masked /// /// This API is unstable and should not be used outside of Racer fn load_source_file(&self, _: &path::Path) -> Rc<MaskedSource>; } /// Context for a Racer operation pub struct Session<'c> { /// Cache for files /// /// The file cache is used within a session to prevent multiple reads. It is /// borrowed here in order to support reuse across Racer operations. cache: &'c FileCache, /// Cache for generic impls pub generic_impls: RefCell<HashMap<(path::PathBuf, BytePos), Vec<Rc<ImplHeader>>>>, pub project_model: Box<dyn ProjectModelProvider + 'c>, } impl<'c> fmt::Debug for Session<'c> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Session {{ .. }}") } } impl<'c> Session<'c> { /// Create a Session for use in Racer operations /// /// * `cache` is a reference to a `FileCache`. It's take by reference for /// use across racer operations. /// /// # Examples /// /// ``` /// extern crate racer; /// /// let cache = racer::FileCache::default(); /// let session = racer::Session::new(&cache, None); /// ``` /// /// [`FileCache`]: struct.FileCache.html #[cfg(feature = "metadata")] pub fn new(cache: &'c FileCache, project_path: Option<&path::Path>) -> Session<'c> { let project_model = crate::metadata::project_model(project_path); Session::with_project_model(cache, project_model) } pub fn with_project_model( cache: &'c FileCache, project_model: Box<dyn ProjectModelProvider + 'c>, ) -> Session<'c> { Session { cache, generic_impls: Default::default(), project_model, } } /// Specify the contents of a file to be used in completion operations /// /// The path to the file and the file's contents must both be specified. /// /// # Examples /// /// ``` /// extern crate racer; /// /// let cache = racer::FileCache::default(); /// let session = racer::Session::new(&cache, None); /// /// session.cache_file_contents("foo.rs", "pub struct Foo;\\n"); /// ``` pub fn cache_file_contents<T, P>(&self, filepath: P, buf: T) where T: Into<String>, P: Into<path::PathBuf>, { self.cache.cache_file_contents(filepath, buf); } pub fn contains_file<P: AsRef<path::Path>>(&self, path: P) -> bool { let path = path.as_ref(); let raw = self.cache.raw_map.borrow(); let masked = self.cache.masked_map.borrow(); raw.contains_key(path) && masked.contains_key(path) } } impl<'c> SessionExt for Session<'c> { fn load_raw_file(&self, filepath: &path::Path) -> Rc<RawSource> { self.cache.load_file(filepath) } fn load_raw_src_ranged(&self, src: &Src<'_>, filepath: &path::Path) -> RangedRawSrc { let inner = self.cache.load_file(filepath); RangedRawSrc { inner, range: src.range, } } fn load_source_file(&self, filepath: &path::Path) -> Rc<MaskedSource> { self.cache.load_file_and_mask_comments(filepath) } } /// Get the racer point of a line/character number pair for a file. pub fn to_point<P>(coords: Coordinate, path: P, session: &Session<'_>) -> Option<BytePos> where P: AsRef<path::Path>, { Location::from(coords).to_point(&session.load_raw_file(path.as_ref())) } /// Get the racer point of a line/character number pair for a file. pub fn to_coords<P>(point: BytePos, path: P, session: &Session<'_>) -> Option<Coordinate> where P: AsRef<path::Path>, { Location::from(point).to_coords(&session.load_raw_file(path.as_ref())) } /// Find completions for a fully qualified name like `std::io::` /// /// Searchs are started relative to `path`. /// /// * `query` - is the fqn to search for /// * `path` - the directory to start searching in /// * `session` - reference to a racer::Session /// /// ```no_run /// extern crate racer; /// /// let path = std::path::Path::new("."); /// let cache = racer::FileCache::default(); /// let session = racer::Session::new(&cache, Some(path)); /// /// let m = racer::complete_fully_qualified_name( /// "std::fs::canon", /// &path, /// &session /// ).next().unwrap(); /// /// assert_eq!(&m.matchstr[..], "canonicalize"); /// assert_eq!(m.mtype, racer::MatchType::Function); /// ``` #[inline] pub fn complete_fully_qualified_name<'c, S, P>( query: S, path: P, session: &'c Session<'_>, ) -> MatchIter<'c> where S: AsRef<str>, P: AsRef<path::Path>, { let mut matches = complete_fully_qualified_name_(query.as_ref(), path.as_ref(), session); matches.dedup_by(|a, b| a.is_same_as(b)); MatchIter { matches: matches.into_iter(), session, } } /// Actual implementation without generic bounds fn complete_fully_qualified_name_( query: &str, path: &path::Path, session: &Session<'_>, ) -> Vec<Match> { let p: Vec<&str> = query.split("::").collect(); let mut matches = Vec::new(); for m in nameres::do_file_search(p[0], path, session) { if p.len() == 1 { matches.push(m); } else { let external_search_matches = nameres::do_external_search( &p[1..], &m.filepath, m.point, SearchType::StartsWith, Namespace::Path, &session, ); for m in external_search_matches { matches.push(m); } } } matches } /// Search for completion at position in a file /// /// * `src` - the file contents to search in /// * `filepath` - path to file containing `src` /// * `pos` - byte offset in file with path/expr to complete /// * `session` - a racer::Session /// /// # Examples /// /// ``` /// extern crate racer; /// /// # fn main() { /// let src = " /// fn apple() { /// } /// /// fn main() { /// let b = ap /// }"; /// /// println!("{:?}", src); /// /// let cache = racer::FileCache::default(); /// let session = racer::Session::new(&cache, None); /// /// session.cache_file_contents("lib.rs", src); /// /// let got = racer::complete_from_file("lib.rs", racer::Location::from(43), &session) /// .nth(0).unwrap(); /// assert_eq!("apple", got.matchstr); /// assert_eq!(got.mtype, racer::MatchType::Function); /// /// # } /// ``` pub fn complete_from_file<'c, P, C>( filepath: P, cursor: C, session: &'c Session<'_>, ) -> MatchIter<'c> where P: AsRef<path::Path>, C: Into<Location>, { let mut matches = complete_from_file_(filepath.as_ref(), cursor.into(), session); matches.sort_by(|a, b| a.matchstr.cmp(&b.matchstr).then(a.point.cmp(&b.point))); matches.dedup_by(|a, b| a.is_same_as(b)); MatchIter { matches: matches.into_iter(), session, } } fn complete_from_file_( filepath: &path::Path, cursor: Location, session: &Session<'_>, ) -> Vec<Match> { let src = session.load_source_file(filepath); let raw_src = session.load_raw_file(filepath); let src_text = &src.as_src()[..]; // TODO return result let pos = match cursor.to_point(&raw_src) { Some(pos) => pos, None => { debug!("Failed to convert cursor to point"); return Vec::new(); } }; let start = scopes::get_start_of_search_expr(src_text, pos); let expr = &src_text[start.0..pos.0]; let (contextstr, searchstr, completetype) = scopes::split_into_context_and_completion(expr); debug!( "{:?}: contextstr is |{}|, searchstr is |{}|", completetype, contextstr, searchstr ); let mut out = Vec::new(); match completetype { CompletionType::Path => { let (stmtstart, stmt) = &scopes::get_current_stmt(src.as_src(), pos); debug!("Complete path with stmt: {:?}", stmt); // when in the function ident position, only look for methods // from a trait to complete. if util::in_fn_name(stmt) { trace!("Path is in fn declaration: `{}`", expr); return nameres::resolve_method( pos, src.as_src(), expr, filepath, SearchType::StartsWith, session, &ImportInfo::default(), ); } let (path, namespace) = if let Some(use_start) = scopes::use_stmt_start(stmt) { let path = scopes::construct_path_from_use_tree(&stmt[use_start.0..]); (path, Namespace::Path) } else if scopes::is_extern_crate(stmt) { return fileres::search_crate_names( searchstr, SearchType::StartsWith, filepath, false, session, ); } else if let Some(str_path) = scopes::is_in_struct_ctor(src.as_src(), *stmtstart, pos) { let path = scopes::expr_to_path(&src[str_path.to_range()]).0; return nameres::get_struct_fields( &path, searchstr, filepath, pos, SearchType::StartsWith, session, ); } else { scopes::expr_to_path(expr) }; debug!("path: {:?}, prefix: {:?}", path, path.prefix); out.extend(nameres::resolve_path( &path, filepath, pos, SearchType::StartsWith, namespace, session, &ImportInfo::default(), )); } CompletionType::Field => { let context = ast::get_type_of(contextstr.to_owned(), filepath, pos, session); debug!("complete_from_file context is {:?}", context); if let Some(ty) = context { out.extend(nameres::get_field_matches_from_ty( ty, searchstr, SearchType::StartsWith, session, )); } } } out } /// Finds if the statement where cursor lies is a `use` statement. /// /// # Examples /// /// ``` /// extern crate racer; /// extern crate env_logger; /// /// /// # fn main() { /// let _ = env_logger::init(); /// let cache = racer::FileCache::default(); /// let session = racer::Session::new(&cache, None); /// /// // This is the file where we request completion from /// let src = stringify! { /// use sub::foo; /// use sub::{ /// bar /// }; /// pub(crate) use sub::baz; /// }; /// /// // Load files into cache to prevent trying to read from disk /// session.cache_file_contents("lib.rs", src); /// /// assert_eq!(racer::is_use_stmt("lib.rs", racer::Location::from(9), &session), true); /// assert_eq!(racer::is_use_stmt("lib.rs", racer::Location::from(28), &session), true); /// assert_eq!(racer::is_use_stmt("lib.rs", racer::Location::from(5000), &session), false); /// # } /// ``` pub fn is_use_stmt<P, C>(file_path: P, cursor: C, session: &Session<'_>) -> bool where P: AsRef<path::Path>, C: Into<Location>, { let file_path = file_path.as_ref(); let src = session.load_source_file(file_path); let raw_src = session.load_raw_file(file_path); let pos = match cursor.into().to_point(&raw_src) { Some(pos) => pos, None => return false, }; if src.bytes().len() <= pos.0 { return false; } let line = &scopes::get_current_stmt(src.as_src(), pos).1; scopes::use_stmt_start(line).is_some() } /// Find the definition for item at given a file, source, and cursor index /// /// # Examples /// /// ``` /// extern crate racer; /// extern crate env_logger; /// /// use std::path::Path; /// /// # fn main() { /// let _ = env_logger::init(); /// let cache = racer::FileCache::default(); /// let session = racer::Session::new(&cache, None); /// /// // This is the file where we request completion from /// let src = r" /// mod sub; /// use sub::foo; /// fn main() { /// foo(); /// }; /// "; /// /// // This is the submodule where the definition is found /// let sub = r"pub fn foo() {}"; /// /// // Load files into cache to prevent trying to read from disk /// session.cache_file_contents("sub.rs", sub); /// session.cache_file_contents("lib.rs", src); /// /// // Search for the definition. 52 is the byte offset in `src`. /// // Specifically, this asks for the definition of `foo()`. /// let m = racer::find_definition("lib.rs", racer::Location::from(52), &session) /// .expect("find definition returns a match"); /// /// // Should have found definition in the "sub.rs" file /// assert_eq!(m.filepath, Path::new("sub.rs")); /// // The definition should be for foo /// assert_eq!(&m.matchstr[..], "foo"); /// // The definition should be a function /// assert_eq!(m.mtype, racer::MatchType::Function); /// # } /// ``` pub fn find_definition<P, C>(filepath: P, cursor: C, session: &Session<'_>) -> Option<Match> where P: AsRef<path::Path>, C: Into<Location>, { find_definition_(filepath.as_ref(), cursor.into(), session).map(|mut m| { if m.coords.is_none() { let point = m.point; let src = session.load_raw_file(m.filepath.as_path()); m.coords = src.point_to_coords(point); } m }) } pub fn find_definition_( filepath: &path::Path, cursor: Location, session: &Session<'_>, ) -> Option<Match> { let src = session.load_source_file(filepath); let src_txt = &src[..]; // TODO return result let pos = match cursor.to_point(&session.load_raw_file(filepath)) { Some(pos) => pos, None => { debug!("Failed to convert cursor to point"); return None; } }; // Make sure `src` is in the cache let range = scopes::expand_search_expr(src_txt, pos); let expr = &src[range.to_range()]; let (contextstr, searchstr, completetype) = scopes::split_into_context_and_completion(expr); debug!( "find_definition_ for |{:?}| |{:?}| {:?}", contextstr, searchstr, completetype ); match completetype { CompletionType::Path => { let (stmtstart, stmt) = &scopes::get_current_stmt(src.as_src(), range.end); let (path, namespace) = if let Some(use_start) = scopes::use_stmt_start(stmt) { let path = scopes::construct_path_from_use_tree(&stmt[use_start.0..]); (path, Namespace::Path) } else if let Some(str_path) = scopes::is_in_struct_ctor(src.as_src(), *stmtstart, pos) { let path = scopes::expr_to_path(&src[str_path.to_range()]).0; return nameres::get_struct_fields( &path, searchstr, filepath, pos, SearchType::StartsWith, session, ) .into_iter() .next(); } else { scopes::expr_to_path(expr) }; debug!("[find_definition_] Path: {:?}", path); nameres::resolve_path( &path, filepath, pos, SearchType::ExactMatch, namespace, session, &ImportInfo::default(), ) .into_iter() .nth(0) } CompletionType::Field => { let context = ast::get_type_of(contextstr.to_owned(), filepath, pos, session); debug!("context is {:?}", context); let only_method = src[range.end.0..].starts_with('('); context.and_then(|ty| { nameres::get_field_matches_from_ty(ty, searchstr, SearchType::ExactMatch, session) .into_iter() .filter(|m| !only_method || m.mtype.is_function()) .next() }) } } } #[cfg(test)] mod tests { use super::FileCache; use super::{Session, SessionExt}; use std::path::Path; #[test] fn overwriting_cached_files() { let src1 = "src1"; let src2 = "src2"; let src3 = "src3"; let src4 = "src4"; // Need session and path to cache files let path = Path::new("not_on_disk"); let cache = FileCache::default(); // Cache contents for a file and assert that load_file and load_file_and_mask_comments return // the newly cached contents. macro_rules! cache_and_assert { ($src: ident) => {{ let session = Session::new(&cache, Some(path)); session.cache_file_contents(path, $src); assert_eq!($src, &session.load_raw_file(path)[..]); assert_eq!($src, &session.load_source_file(path).code[..]); }}; } // Check for all srcN cache_and_assert!(src1); cache_and_assert!(src2); cache_and_assert!(src3); cache_and_assert!(src4); } }
29.057325
101
0.557699
0af71817b3633ac87a47c35a1ad0e599abddfd10
347
#[doc = "Reader of register GPIO_IN"] pub type R = crate::R<u32, super::GPIO_IN>; #[doc = "Reader of field `GPIO_IN_DATA`"] pub type GPIO_IN_DATA_R = crate::R<u32, u32>; impl R { #[doc = "Bits 0:25"] #[inline(always)] pub fn gpio_in_data(&self) -> GPIO_IN_DATA_R { GPIO_IN_DATA_R::new((self.bits & 0x03ff_ffff) as u32) } }
28.916667
61
0.631124
e97ae70d6ce05b011de7cc7dd006e3e455da7e6d
25,823
// //! Copyright 2020 Alibaba Group Holding Limited. //! //! Licensed under the Apache License, Version 2.0 (the "License"); //! you may not use this file except in compliance with the License. //! You may obtain a copy of the License at //! //! http://www.apache.org/licenses/LICENSE-2.0 //! //! Unless required by applicable law or agreed to in writing, software //! distributed under the License is distributed on an "AS IS" BASIS, //! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //! See the License for the specific language governing permissions and //! limitations under the License. use std::convert::{TryFrom, TryInto}; use std::io; use dyn_type::{BorrowObject, Object, Primitives}; use pegasus_common::codec::{Decode, Encode, ReadExt, WriteExt}; use prost::Message; use crate::error::{ParsePbError, ParsePbResult}; use crate::generated::algebra as pb; use crate::generated::common as common_pb; use crate::generated::results as result_pb; pub mod error; pub mod expr_parse; #[macro_use] extern crate serde; #[cfg(feature = "proto_inplace")] pub mod generated { #[path = "algebra.rs"] pub mod algebra; #[path = "common.rs"] pub mod common; #[path = "results.rs"] pub mod results; #[path = "schema.rs"] pub mod schema; } #[cfg(not(feature = "proto_inplace"))] pub mod generated { pub mod common { tonic::include_proto!("common"); } pub mod algebra { tonic::include_proto!("algebra"); } pub mod results { tonic::include_proto!("results"); } pub mod schema { tonic::include_proto!("schema"); } } pub type KeyId = i32; pub const SPLITTER: &'static str = "."; pub const VAR_PREFIX: &'static str = "@"; /// Refer to a key of a relation or a graph element, by either a string-type name or an identifier #[derive(Debug, PartialEq, Eq, Hash, Clone, PartialOrd, Ord)] pub enum NameOrId { Str(String), Id(KeyId), } impl Default for NameOrId { fn default() -> Self { Self::Str("".to_string()) } } impl NameOrId { pub fn as_object(&self) -> Object { match self { NameOrId::Str(s) => s.to_string().into(), NameOrId::Id(id) => (*id as i32).into(), } } pub fn as_borrow_object(&self) -> BorrowObject { match self { NameOrId::Str(s) => BorrowObject::String(s.as_str()), NameOrId::Id(id) => (*id as i32).into(), } } } impl From<KeyId> for NameOrId { fn from(id: KeyId) -> Self { Self::Id(id) } } impl From<String> for NameOrId { fn from(str: String) -> Self { Self::Str(str) } } impl From<&str> for NameOrId { fn from(str: &str) -> Self { Self::Str(str.to_string()) } } impl Encode for NameOrId { fn write_to<W: WriteExt>(&self, writer: &mut W) -> io::Result<()> { match self { NameOrId::Id(id) => { writer.write_u8(0)?; writer.write_i32(*id)?; } NameOrId::Str(str) => { writer.write_u8(1)?; str.write_to(writer)?; } } Ok(()) } } impl Decode for NameOrId { fn read_from<R: ReadExt>(reader: &mut R) -> io::Result<Self> { let e = reader.read_u8()?; match e { 0 => { let id = reader.read_i32()?; Ok(NameOrId::Id(id)) } 1 => { let str = <String>::read_from(reader)?; Ok(NameOrId::Str(str)) } _ => Err(io::Error::new(io::ErrorKind::Other, "unreachable")), } } } impl TryFrom<common_pb::NameOrId> for NameOrId { type Error = ParsePbError; fn try_from(t: common_pb::NameOrId) -> ParsePbResult<Self> where Self: Sized, { use common_pb::name_or_id::Item; if let Some(item) = t.item { match item { Item::Name(name) => Ok(NameOrId::Str(name)), Item::Id(id) => { if id < 0 { Err(ParsePbError::from("key id must be positive number")) } else { Ok(NameOrId::Id(id as KeyId)) } } } } else { Err(ParsePbError::from("empty content provided")) } } } impl TryFrom<common_pb::NameOrId> for KeyId { type Error = ParsePbError; fn try_from(t: common_pb::NameOrId) -> ParsePbResult<Self> where Self: Sized, { use common_pb::name_or_id::Item; if let Some(item) = t.item { match item { Item::Name(_) => Err(ParsePbError::from("key must be a number")), Item::Id(id) => { if id < 0 { Err(ParsePbError::from("key id must be positive number")) } else { Ok(id as KeyId) } } } } else { Err(ParsePbError::from("empty content provided")) } } } impl From<NameOrId> for common_pb::NameOrId { fn from(tag: NameOrId) -> Self { let name_or_id = match tag { NameOrId::Str(name) => common_pb::name_or_id::Item::Name(name), NameOrId::Id(id) => common_pb::name_or_id::Item::Id(id), }; common_pb::NameOrId { item: Some(name_or_id) } } } impl From<NameOrId> for Object { fn from(tag: NameOrId) -> Self { match tag { NameOrId::Str(name) => Object::from(name), NameOrId::Id(id) => Object::from(id), } } } impl From<common_pb::Arithmetic> for common_pb::ExprOpr { fn from(arith: common_pb::Arithmetic) -> Self { common_pb::ExprOpr { item: Some(common_pb::expr_opr::Item::Arith(unsafe { std::mem::transmute::<common_pb::Arithmetic, i32>(arith) })), } } } impl From<common_pb::Logical> for common_pb::ExprOpr { fn from(logical: common_pb::Logical) -> Self { common_pb::ExprOpr { item: Some(common_pb::expr_opr::Item::Logical(unsafe { std::mem::transmute::<common_pb::Logical, i32>(logical) })), } } } impl From<common_pb::Value> for common_pb::ExprOpr { fn from(const_val: common_pb::Value) -> Self { common_pb::ExprOpr { item: Some(common_pb::expr_opr::Item::Const(const_val)) } } } impl From<common_pb::Variable> for common_pb::ExprOpr { fn from(var: common_pb::Variable) -> Self { common_pb::ExprOpr { item: Some(common_pb::expr_opr::Item::Var(var)) } } } /// An indicator for whether it is a map impl From<(common_pb::VariableKeys, bool)> for common_pb::ExprOpr { fn from(vars: (common_pb::VariableKeys, bool)) -> Self { if !vars.1 { // not a map common_pb::ExprOpr { item: Some(common_pb::expr_opr::Item::Vars(vars.0)) } } else { // is a map common_pb::ExprOpr { item: Some(common_pb::expr_opr::Item::VarMap(vars.0)) } } } } impl From<bool> for common_pb::Value { fn from(b: bool) -> Self { common_pb::Value { item: Some(common_pb::value::Item::Boolean(b)) } } } impl From<f64> for common_pb::Value { fn from(f: f64) -> Self { common_pb::Value { item: Some(common_pb::value::Item::F64(f)) } } } impl From<i32> for common_pb::Value { fn from(i: i32) -> Self { common_pb::Value { item: Some(common_pb::value::Item::I32(i)) } } } impl From<i64> for common_pb::Value { fn from(i: i64) -> Self { common_pb::Value { item: Some(common_pb::value::Item::I64(i)) } } } impl From<String> for common_pb::Value { fn from(s: String) -> Self { common_pb::Value { item: Some(common_pb::value::Item::Str(s)) } } } impl From<Vec<i64>> for common_pb::Value { fn from(item: Vec<i64>) -> Self { common_pb::Value { item: Some(common_pb::value::Item::I64Array(common_pb::I64Array { item })) } } } impl From<Vec<f64>> for common_pb::Value { fn from(item: Vec<f64>) -> Self { common_pb::Value { item: Some(common_pb::value::Item::F64Array(common_pb::DoubleArray { item })) } } } impl From<Vec<String>> for common_pb::Value { fn from(item: Vec<String>) -> Self { common_pb::Value { item: Some(common_pb::value::Item::StrArray(common_pb::StringArray { item })) } } } impl From<i32> for common_pb::NameOrId { fn from(i: i32) -> Self { common_pb::NameOrId { item: Some(common_pb::name_or_id::Item::Id(i)) } } } impl From<&str> for common_pb::NameOrId { fn from(str: &str) -> Self { common_pb::NameOrId { item: Some(common_pb::name_or_id::Item::Name(str.to_string())) } } } impl From<String> for common_pb::NameOrId { fn from(str: String) -> Self { common_pb::NameOrId { item: Some(common_pb::name_or_id::Item::Name(str)) } } } pub const ID_KEY: &'static str = "~id"; pub const LABEL_KEY: &'static str = "~label"; pub const LENGTH_KEY: &'static str = "~len"; pub const ALL_KEY: &'static str = "~all"; impl From<String> for common_pb::Property { fn from(str: String) -> Self { if str == ID_KEY { common_pb::Property { item: Some(common_pb::property::Item::Id(common_pb::IdKey {})) } } else if str == LABEL_KEY { common_pb::Property { item: Some(common_pb::property::Item::Label(common_pb::LabelKey {})) } } else if str == LENGTH_KEY { common_pb::Property { item: Some(common_pb::property::Item::Len(common_pb::LengthKey {})) } } else if str == ALL_KEY { common_pb::Property { item: Some(common_pb::property::Item::All(common_pb::AllKey {})) } } else { common_pb::Property { item: Some(common_pb::property::Item::Key(str.into())) } } } } fn str_as_tag(str: String) -> Option<common_pb::NameOrId> { if !str.is_empty() { Some(if let Ok(str_int) = str.parse::<i32>() { str_int.into() } else { str.into() }) } else { None } } impl From<String> for common_pb::Variable { fn from(str: String) -> Self { assert!(str.starts_with(VAR_PREFIX)); // skip the var variable let str: String = str.chars().skip(1).collect(); if !str.contains(SPLITTER) { common_pb::Variable { // If the tag is represented as an integer tag: str_as_tag(str), property: None, } } else { let mut splitter = str.split(SPLITTER); let tag: Option<common_pb::NameOrId> = if let Some(first) = splitter.next() { str_as_tag(first.to_string()) } else { None }; let property: Option<common_pb::Property> = if let Some(second) = splitter.next() { Some(second.to_string().into()) } else { None }; common_pb::Variable { tag, property } } } } impl From<i64> for pb::index_predicate::AndPredicate { fn from(id: i64) -> Self { pb::index_predicate::AndPredicate { predicates: vec![pb::index_predicate::Triplet { key: Some(common_pb::Property { item: Some(common_pb::property::Item::Id(common_pb::IdKey {})), }), value: Some(id.into()), cmp: None, }], } } } impl From<Vec<i64>> for pb::IndexPredicate { fn from(ids: Vec<i64>) -> Self { let or_predicates: Vec<pb::index_predicate::AndPredicate> = ids.into_iter().map(|id| id.into()).collect(); pb::IndexPredicate { or_predicates } } } impl From<String> for pb::index_predicate::AndPredicate { fn from(label: String) -> Self { pb::index_predicate::AndPredicate { predicates: vec![pb::index_predicate::Triplet { key: Some(common_pb::Property { item: Some(common_pb::property::Item::Label(common_pb::LabelKey {})), }), value: Some(label.into()), cmp: None, }], } } } impl From<Vec<String>> for pb::IndexPredicate { fn from(names: Vec<String>) -> Self { let or_predicates: Vec<pb::index_predicate::AndPredicate> = names .into_iter() .map(|name| name.into()) .collect(); pb::IndexPredicate { or_predicates } } } impl TryFrom<common_pb::Value> for Object { type Error = ParsePbError; fn try_from(value: common_pb::Value) -> Result<Self, Self::Error> { use common_pb::value::Item::*; if let Some(item) = value.item.as_ref() { return match item { Boolean(b) => Ok((*b).into()), I32(i) => Ok((*i).into()), I64(i) => Ok((*i).into()), F64(f) => Ok((*f).into()), Str(s) => Ok(s.clone().into()), Blob(blob) => Ok(blob.clone().into()), None(_) => Ok(Object::None), I32Array(v) => Ok(v.item.clone().into()), I64Array(v) => Ok(v.item.clone().into()), F64Array(v) => Ok(v.item.clone().into()), StrArray(v) => Ok(v.item.clone().into()), PairArray(pairs) => { let mut vec = Vec::<(Object, Object)>::with_capacity(pairs.item.len()); for item in pairs.item.clone().into_iter() { let (key_obj, val_obj) = (Object::try_from(item.key.unwrap())?, Object::try_from(item.val.unwrap())?); vec.push((key_obj, val_obj)); } Ok(vec.into()) } }; } Err(ParsePbError::from("empty value provided")) } } impl TryFrom<pb::IndexPredicate> for Vec<i64> { type Error = ParsePbError; fn try_from(value: pb::IndexPredicate) -> Result<Self, Self::Error> { let mut global_ids = vec![]; for and_predicate in value.or_predicates { let predicate = and_predicate .predicates .get(0) .ok_or(ParsePbError::EmptyFieldError("`AndCondition` is emtpy".to_string()))?; let (key, value) = (predicate.key.as_ref(), predicate.value.as_ref()); let key = key.ok_or("key is empty in kv_pair in indexed_scan")?; if let Some(common_pb::property::Item::Id(_id_key)) = key.item.as_ref() { let value = value.ok_or("value is empty in kv_pair in indexed_scan")?; match &value.item { Some(common_pb::value::Item::I64(v)) => { global_ids.push(*v); } Some(common_pb::value::Item::I64Array(arr)) => { global_ids.extend(arr.item.iter().cloned()) } Some(common_pb::value::Item::I32(v)) => { global_ids.push(*v as i64); } Some(common_pb::value::Item::I32Array(arr)) => { global_ids.extend(arr.item.iter().map(|i| *i as i64)); } _ => Err(ParsePbError::Unsupported( "indexed value other than integer (I32, I64) and integer array".to_string(), ))?, } } } Ok(global_ids) } } impl TryFrom<pb::IndexPredicate> for Vec<(NameOrId, Object)> { type Error = ParsePbError; fn try_from(value: pb::IndexPredicate) -> Result<Self, Self::Error> { let mut primary_key_values = vec![]; // for pk values, which should be a set of and_conditions. let and_predicates = value .or_predicates .get(0) .ok_or(ParsePbError::EmptyFieldError("`OrCondition` is emtpy".to_string()))?; for predicate in &and_predicates.predicates { let key_pb = predicate .key .clone() .ok_or("key is empty in kv_pair in indexed_scan")?; let value = predicate .value .clone() .ok_or("value is empty in kv_pair in indexed_scan")?; let key = match key_pb.item { Some(common_pb::property::Item::Key(prop_key)) => prop_key.try_into()?, _ => Err(ParsePbError::Unsupported( "Other keys rather than property key in kv_pair in indexed_scan".to_string(), ))?, }; let obj_val = Object::try_from(value)?; primary_key_values.push((key, obj_val)); } Ok(primary_key_values) } } impl From<pb::Project> for pb::logical_plan::Operator { fn from(opr: pb::Project) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Project(opr)) } } } impl From<pb::Select> for pb::logical_plan::Operator { fn from(opr: pb::Select) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Select(opr)) } } } impl From<pb::Join> for pb::logical_plan::Operator { fn from(opr: pb::Join) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Join(opr)) } } } impl From<pb::Union> for pb::logical_plan::Operator { fn from(opr: pb::Union) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Union(opr)) } } } impl From<pb::GroupBy> for pb::logical_plan::Operator { fn from(opr: pb::GroupBy) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::GroupBy(opr)) } } } impl From<pb::OrderBy> for pb::logical_plan::Operator { fn from(opr: pb::OrderBy) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::OrderBy(opr)) } } } impl From<pb::Dedup> for pb::logical_plan::Operator { fn from(opr: pb::Dedup) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Dedup(opr)) } } } impl From<pb::Unfold> for pb::logical_plan::Operator { fn from(opr: pb::Unfold) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Unfold(opr)) } } } impl From<pb::Apply> for pb::logical_plan::Operator { fn from(opr: pb::Apply) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Apply(opr)) } } } impl From<pb::SegmentApply> for pb::logical_plan::Operator { fn from(opr: pb::SegmentApply) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::SegApply(opr)) } } } impl From<pb::Scan> for pb::logical_plan::Operator { fn from(opr: pb::Scan) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Scan(opr)) } } } impl From<pb::Limit> for pb::logical_plan::Operator { fn from(opr: pb::Limit) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Limit(opr)) } } } impl From<pb::Auxilia> for pb::logical_plan::Operator { fn from(opr: pb::Auxilia) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Auxilia(opr)) } } } impl From<pb::As> for pb::logical_plan::Operator { fn from(opr: pb::As) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::As(opr)) } } } impl From<pb::EdgeExpand> for pb::logical_plan::Operator { fn from(opr: pb::EdgeExpand) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Edge(opr)) } } } impl From<pb::PathExpand> for pb::logical_plan::Operator { fn from(opr: pb::PathExpand) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Path(opr)) } } } impl From<pb::PathStart> for pb::logical_plan::Operator { fn from(opr: pb::PathStart) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::PathStart(opr)) } } } impl From<pb::PathEnd> for pb::logical_plan::Operator { fn from(opr: pb::PathEnd) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::PathEnd(opr)) } } } /* impl From<pb::ShortestPathExpand> for pb::logical_plan::Operator { fn from(opr: pb::ShortestPathExpand) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::ShortestPath(opr)) } } } */ impl From<pb::GetV> for pb::logical_plan::Operator { fn from(opr: pb::GetV) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Vertex(opr)) } } } impl From<pb::Pattern> for pb::logical_plan::Operator { fn from(opr: pb::Pattern) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Pattern(opr)) } } } impl From<pb::Sink> for pb::logical_plan::Operator { fn from(opr: pb::Sink) -> Self { pb::logical_plan::Operator { opr: Some(pb::logical_plan::operator::Opr::Sink(opr)) } } } impl From<Object> for common_pb::Value { fn from(value: Object) -> Self { let item = match value { Object::Primitive(v) => match v { // TODO: It seems that Byte is only used for bool for now Primitives::Byte(v) => common_pb::value::Item::Boolean(!(v == 0)), Primitives::Integer(v) => common_pb::value::Item::I32(v), Primitives::Long(v) => common_pb::value::Item::I64(v), Primitives::ULLong(v) => common_pb::value::Item::Str(v.to_string()), Primitives::Float(v) => common_pb::value::Item::F64(v), }, Object::String(s) => common_pb::value::Item::Str(s), Object::Blob(b) => common_pb::value::Item::Blob(b.to_vec()), Object::Vector(v) => common_pb::value::Item::StrArray(common_pb::StringArray { item: v .into_iter() .map(|obj| obj.to_string()) .collect(), }), Object::KV(kv) => { let mut pairs: Vec<common_pb::Pair> = Vec::with_capacity(kv.len()); for (key, val) in kv { let key_pb: common_pb::Value = key.into(); let val_pb: common_pb::Value = val.into(); pairs.push(common_pb::Pair { key: Some(key_pb), val: Some(val_pb) }) } common_pb::value::Item::PairArray(common_pb::PairArray { item: pairs }) } Object::None => common_pb::value::Item::None(common_pb::None {}), _ => unimplemented!(), }; common_pb::Value { item: Some(item) } } } impl Encode for result_pb::Results { fn write_to<W: WriteExt>(&self, writer: &mut W) -> io::Result<()> { let mut bytes = vec![]; self.encode_raw(&mut bytes); writer.write_u32(bytes.len() as u32)?; writer.write_all(bytes.as_slice())?; Ok(()) } } impl Decode for result_pb::Results { fn read_from<R: ReadExt>(reader: &mut R) -> io::Result<Self> { let len = reader.read_u32()? as usize; let mut buffer = Vec::with_capacity(len); reader.read_exact(&mut buffer)?; result_pb::Results::decode(buffer.as_slice()) .map_err(|_e| std::io::Error::new(std::io::ErrorKind::Other, "decoding result_pb failed!")) } } #[cfg(test)] mod test { use super::*; #[test] fn test_str_to_variable() { let case1 = "@1"; assert_eq!( common_pb::Variable { tag: Some(common_pb::NameOrId::from(1)), property: None }, common_pb::Variable::from(case1.to_string()) ); let case2 = "@a"; assert_eq!( common_pb::Variable { tag: Some(common_pb::NameOrId::from("a".to_string())), property: None }, common_pb::Variable::from(case2.to_string()) ); let case3 = "@1.~id"; assert_eq!( common_pb::Variable { tag: Some(common_pb::NameOrId::from(1)), property: Some(common_pb::Property { item: Some(common_pb::property::Item::Id(common_pb::IdKey {})) }) }, common_pb::Variable::from(case3.to_string()) ); let case4 = "@1.~label"; assert_eq!( common_pb::Variable { tag: Some(common_pb::NameOrId::from(1)), property: Some(common_pb::Property { item: Some(common_pb::property::Item::Label(common_pb::LabelKey {})) }) }, common_pb::Variable::from(case4.to_string()) ); let case5 = "@1.name"; assert_eq!( common_pb::Variable { tag: Some(common_pb::NameOrId::from(1)), property: Some(common_pb::Property { item: Some(common_pb::property::Item::Key("name".to_string().into())) }) }, common_pb::Variable::from(case5.to_string()) ); let case6 = "@.name"; assert_eq!( common_pb::Variable { tag: None, property: Some(common_pb::Property { item: Some(common_pb::property::Item::Key("name".to_string().into())) }) }, common_pb::Variable::from(case6.to_string()) ); let case7 = "@"; assert_eq!( common_pb::Variable { tag: None, property: None }, common_pb::Variable::from(case7.to_string()) ); } }
32.646018
106
0.548736
1a94c8bc6ad19d97893891434faeec3df72e06d1
9,496
//! Common types for room directory endpoints. use std::fmt; use js_int::UInt; use ruma_identifiers::{MxcUri, RoomAliasId, RoomId}; use ruma_serde::Outgoing; use serde::{ de::{Error, MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use serde_json::Value as JsonValue; /// A chunk of a room list response, describing one room. /// /// To create an instance of this type, first create a `PublicRoomsChunkInit` and convert it via /// `PublicRoomsChunk::from` / `.into()`. #[derive(Clone, Debug, Deserialize, Serialize)] #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] pub struct PublicRoomsChunk { /// Aliases of the room. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub aliases: Vec<RoomAliasId>, /// The canonical alias of the room, if any. #[serde(skip_serializing_if = "Option::is_none")] pub canonical_alias: Option<RoomAliasId>, /// The name of the room, if any. #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, /// The number of members joined to the room. pub num_joined_members: UInt, /// The ID of the room. pub room_id: RoomId, /// The topic of the room, if any. #[serde(skip_serializing_if = "Option::is_none")] pub topic: Option<String>, /// Whether the room may be viewed by guest users without joining. pub world_readable: bool, /// Whether guest users may join the room and participate in it. /// /// If they can, they will be subject to ordinary power level rules like any other user. pub guest_can_join: bool, /// The URL for the room's avatar, if one is set. /// /// If you activate the `compat` feature, this field being an empty string in JSON will give /// you `None` here. #[serde(skip_serializing_if = "Option::is_none")] #[cfg_attr( feature = "compat", serde(default, deserialize_with = "ruma_serde::empty_string_as_none") )] pub avatar_url: Option<MxcUri>, } /// Initial set of mandatory fields of `PublicRoomsChunk`. /// /// This struct will not be updated even if additional fields are added to `PublicRoomsChunk` in a /// new (non-breaking) release of the Matrix specification. #[derive(Debug)] #[allow(clippy::exhaustive_structs)] pub struct PublicRoomsChunkInit { /// The number of members joined to the room. pub num_joined_members: UInt, /// The ID of the room. pub room_id: RoomId, /// Whether the room may be viewed by guest users without joining. pub world_readable: bool, /// Whether guest users may join the room and participate in it. /// /// If they can, they will be subject to ordinary power level rules like any other user. pub guest_can_join: bool, } impl From<PublicRoomsChunkInit> for PublicRoomsChunk { fn from(init: PublicRoomsChunkInit) -> Self { let PublicRoomsChunkInit { num_joined_members, room_id, world_readable, guest_can_join } = init; Self { aliases: Vec::new(), canonical_alias: None, name: None, num_joined_members, room_id, topic: None, world_readable, guest_can_join, avatar_url: None, } } } /// A filter for public rooms lists #[derive(Clone, Debug, Default, Outgoing, Serialize)] #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] #[incoming_derive(Default)] pub struct Filter<'a> { /// A string to search for in the room metadata, e.g. name, topic, canonical alias etc. #[serde(skip_serializing_if = "Option::is_none")] pub generic_search_term: Option<&'a str>, } impl Filter<'_> { /// Creates an empty `Filter`. pub fn new() -> Self { Default::default() } /// Returns `true` if the filter is empty. pub fn is_empty(&self) -> bool { self.generic_search_term.is_none() } } /// Information about which networks/protocols from application services on the /// homeserver from which to request rooms. #[derive(Clone, Debug, PartialEq, Eq, Outgoing)] #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] #[incoming_derive(Clone, PartialEq, Eq, !Deserialize)] pub enum RoomNetwork<'a> { /// Return rooms from the Matrix network. Matrix, /// Return rooms from all the networks/protocols the homeserver knows about. All, /// Return rooms from a specific third party network/protocol. ThirdParty(&'a str), } impl<'a> Default for RoomNetwork<'a> { fn default() -> Self { RoomNetwork::Matrix } } impl<'a> Serialize for RoomNetwork<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut state; match self { Self::Matrix => { state = serializer.serialize_struct("RoomNetwork", 0)?; } Self::All => { state = serializer.serialize_struct("RoomNetwork", 1)?; state.serialize_field("include_all_networks", &true)?; } Self::ThirdParty(network) => { state = serializer.serialize_struct("RoomNetwork", 1)?; state.serialize_field("third_party_instance_id", network)?; } } state.end() } } impl<'de> Deserialize<'de> for IncomingRoomNetwork { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_map(RoomNetworkVisitor) } } struct RoomNetworkVisitor; impl<'de> Visitor<'de> for RoomNetworkVisitor { type Value = IncomingRoomNetwork; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("Network selection") } fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error> where M: MapAccess<'de>, { let mut include_all_networks = false; let mut third_party_instance_id = None; while let Some((key, value)) = access.next_entry::<String, JsonValue>()? { match key.as_str() { "include_all_networks" => { include_all_networks = match value.as_bool() { Some(b) => b, _ => false, } } "third_party_instance_id" => { third_party_instance_id = value.as_str().map(|v| v.to_owned()) } _ => {} }; } if include_all_networks { if third_party_instance_id.is_none() { Ok(IncomingRoomNetwork::All) } else { Err(M::Error::custom( "`include_all_networks = true` and `third_party_instance_id` are mutually exclusive.", )) } } else { Ok(match third_party_instance_id { Some(network) => IncomingRoomNetwork::ThirdParty(network), None => IncomingRoomNetwork::Matrix, }) } } } #[cfg(test)] mod tests { use serde_json::{from_value as from_json_value, json, to_value as to_json_value}; use super::{IncomingRoomNetwork, RoomNetwork}; #[test] fn serialize_matrix_network_only() { let json = json!({}); assert_eq!(to_json_value(RoomNetwork::Matrix).unwrap(), json); } #[test] fn deserialize_matrix_network_only() { let json = json!({ "include_all_networks": false }); assert_eq!( from_json_value::<IncomingRoomNetwork>(json).unwrap(), IncomingRoomNetwork::Matrix ); } #[test] fn serialize_default_network_is_empty() { let json = json!({}); assert_eq!(to_json_value(RoomNetwork::default()).unwrap(), json); } #[test] fn deserialize_empty_network_is_default() { let json = json!({}); assert_eq!( from_json_value::<IncomingRoomNetwork>(json).unwrap(), IncomingRoomNetwork::Matrix ); } #[test] fn serialize_include_all_networks() { let json = json!({ "include_all_networks": true }); assert_eq!(to_json_value(RoomNetwork::All).unwrap(), json); } #[test] fn deserialize_include_all_networks() { let json = json!({ "include_all_networks": true }); assert_eq!(from_json_value::<IncomingRoomNetwork>(json).unwrap(), IncomingRoomNetwork::All); } #[test] fn serialize_third_party_network() { let json = json!({ "third_party_instance_id": "freenode" }); assert_eq!(to_json_value(RoomNetwork::ThirdParty("freenode")).unwrap(), json); } #[test] fn deserialize_third_party_network() { let json = json!({ "third_party_instance_id": "freenode" }); assert_eq!( from_json_value::<IncomingRoomNetwork>(json).unwrap(), IncomingRoomNetwork::ThirdParty("freenode".into()) ); } #[test] fn deserialize_include_all_networks_and_third_party_exclusivity() { let json = json!({ "include_all_networks": true, "third_party_instance_id": "freenode" }); assert_eq!( from_json_value::<IncomingRoomNetwork>(json).unwrap_err().to_string().as_str(), "`include_all_networks = true` and `third_party_instance_id` are mutually exclusive." ); } }
31.759197
106
0.616891
284bfb6ccdb68d45fa8d410e56334bf8ebb3bdbe
5,215
// Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT mod error; mod util; use cid::Cid; pub use error::*; use futures::{AsyncRead, AsyncWrite, Stream, StreamExt}; use fvm_shared::blockstore::Blockstore; use fvm_shared::encoding::{from_slice, to_vec}; use serde::{Deserialize, Serialize}; use util::{ld_read, ld_write, read_node}; /// CAR file header #[derive(Debug, Default, Serialize, Deserialize, PartialEq)] pub struct CarHeader { pub roots: Vec<Cid>, pub version: u64, } impl CarHeader { /// Creates a new CAR file header pub fn new(roots: Vec<Cid>, version: u64) -> Self { Self { roots, version } } /// Writes header and stream of data to writer in Car format. pub async fn write_stream_async<W, S>( &self, writer: &mut W, stream: &mut S, ) -> Result<(), Error> where W: AsyncWrite + Send + Unpin, S: Stream<Item = (Cid, Vec<u8>)> + Unpin, { // Write header bytes let header_bytes = to_vec(self)?; ld_write(writer, &header_bytes).await?; // Write all key values from the stream while let Some((cid, bytes)) = stream.next().await { ld_write(writer, &[cid.to_bytes(), bytes].concat()).await?; } Ok(()) } } impl From<Vec<Cid>> for CarHeader { fn from(roots: Vec<Cid>) -> Self { Self { roots, version: 1 } } } /// Reads CAR files that are in a BufReader pub struct CarReader<R> { pub reader: R, pub header: CarHeader, } impl<R> CarReader<R> where R: AsyncRead + Send + Unpin, { /// Creates a new CarReader and parses the CarHeader pub async fn new(mut reader: R) -> Result<Self, Error> { let buf = ld_read(&mut reader) .await? .ok_or_else(|| Error::ParsingError("failed to parse uvarint for header".to_string()))?; let header: CarHeader = from_slice(&buf).map_err(|e| Error::ParsingError(e.to_string()))?; if header.roots.is_empty() { return Err(Error::ParsingError("empty CAR file".to_owned())); } if header.version != 1 { return Err(Error::InvalidFile("CAR file version must be 1".to_owned())); } Ok(CarReader { reader, header }) } /// Returns the next IPLD Block in the buffer pub async fn next_block(&mut self) -> Result<Option<Block>, Error> { // Read node -> cid, bytes let block = read_node(&mut self.reader) .await? .map(|(cid, data)| Block { cid, data }); Ok(block) } } /// IPLD Block #[derive(Clone, Debug)] pub struct Block { cid: Cid, data: Vec<u8>, } /// Loads a CAR buffer into a Blockstore pub async fn load_car<R, B>(s: &B, reader: R) -> Result<Vec<Cid>, Error> where B: Blockstore, R: AsyncRead + Send + Unpin, { let mut car_reader = CarReader::new(reader).await?; // Batch write key value pairs from car file // TODO: Stream the data once some of the stream APIs stabilize. let mut buf = Vec::with_capacity(100); while let Some(block) = car_reader.next_block().await? { buf.push((block.cid, block.data)); if buf.len() > 1000 { s.put_many_keyed(buf.iter().map(|(k, v)| (*k, &*v))) .map_err(|e| Error::Other(e.to_string()))?; buf.clear(); } } s.put_many_keyed(buf.iter().map(|(k, v)| (*k, &*v))) .map_err(|e| Error::Other(e.to_string()))?; Ok(car_reader.header.roots) } #[cfg(test)] mod tests { use std::sync::Arc; use async_std::channel::bounded; use async_std::io::Cursor; use async_std::sync::RwLock; use cid::multihash::Code::Blake2b256; use cid::multihash::MultihashDigest; use fvm_shared::blockstore::MemoryBlockstore; use fvm_shared::encoding::DAG_CBOR; use super::*; #[test] fn symmetric_header() { let cid = Cid::new_v1(DAG_CBOR, Blake2b256.digest(b"test")); let header = CarHeader { roots: vec![cid], version: 1, }; let bytes = to_vec(&header).unwrap(); assert_eq!(from_slice::<CarHeader>(&bytes).unwrap(), header); } #[async_std::test] async fn car_write_read() { let buffer: Arc<RwLock<Vec<u8>>> = Default::default(); let cid = Cid::new_v1(DAG_CBOR, Blake2b256.digest(b"test")); let header = CarHeader { roots: vec![cid], version: 1, }; assert_eq!(to_vec(&header).unwrap().len(), 60); let (tx, mut rx) = bounded(10); let buffer_cloned = buffer.clone(); let write_task = async_std::task::spawn(async move { header .write_stream_async(&mut *buffer_cloned.write().await, &mut rx) .await .unwrap() }); tx.send((cid, b"test".to_vec())).await.unwrap(); drop(tx); write_task.await; let buffer: Vec<_> = buffer.read().await.clone(); let reader = Cursor::new(&buffer); let bs = MemoryBlockstore::default(); load_car(&bs, reader).await.unwrap(); assert_eq!(bs.get(&cid).unwrap(), Some(b"test".to_vec())); } }
28.497268
99
0.580441
6726ac04550fe5e0a5a69466aad4a62ac5228f88
11,494
// Copyright 2019-2020 Parity Technologies (UK) Ltd. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A storage hash map that allows to associate keys with values. mod impls; mod iter; mod storage; #[cfg(test)] mod tests; #[cfg(all(test, feature = "ink-fuzz-tests"))] mod fuzz_tests; pub use self::iter::{ Iter, IterMut, Keys, Values, ValuesMut, }; use crate::{ hash::hasher::{ Blake2x256Hasher, Hasher, }, storage2::{ collections::Stash, lazy::LazyHashMap, traits::PackedLayout, }, }; use core::{ borrow::Borrow, cmp::Eq, }; use ink_prelude::borrow::ToOwned; use ink_primitives::Key; /// The index type within a hashmap. /// /// # Note /// /// Used for key indices internal to the hashmap. type KeyIndex = u32; /// A hash map operating on the contract storage. /// /// Stores a mapping between keys and values. /// /// # Note /// /// Unlike Rust's standard `HashMap` that uses the [`core::hash::Hash`] trait /// in order to hash its keys the storage hash map uses the [`scale::Encode`] /// encoding in order to hash its keys using a built-in cryptographic /// hash function provided by the chain runtime. /// /// The main difference between the lower-level `LazyHashMap` and the /// `storage::HashMap` is that the latter is aware of its associated keys and /// values and operates on those instances directly as opposed to `Option` /// instances of them. Also it provides a more high-level and user focused /// API. /// /// Users should generally prefer using this storage hash map over the low-level /// `LazyHashMap` for direct usage in their smart contracts. #[derive(Debug)] pub struct HashMap<K, V, H = Blake2x256Hasher> where K: Ord + Clone + PackedLayout, V: PackedLayout, H: Hasher, Key: From<<H as Hasher>::Output>, { /// The keys of the storage hash map. keys: Stash<K>, /// The values of the storage hash map. values: LazyHashMap<K, ValueEntry<V>, H>, } /// An entry within the storage hash map. /// /// Stores the value as well as the index to its associated key. #[derive(Debug, scale::Encode, scale::Decode)] #[cfg_attr(feature = "std", derive(scale_info::TypeInfo))] struct ValueEntry<V> { /// The value stored in this entry. value: V, /// The index of the key associated with this value. key_index: KeyIndex, } impl<K, V, H> HashMap<K, V, H> where K: Ord + Clone + PackedLayout, V: PackedLayout, H: Hasher, Key: From<<H as Hasher>::Output>, { /// Creates a new empty storage hash map. pub fn new() -> Self { Self { keys: Stash::new(), values: LazyHashMap::new(), } } /// Returns the number of key- value pairs stored in the hash map. pub fn len(&self) -> u32 { self.keys.len() } /// Returns `true` if the hash map is empty. pub fn is_empty(&self) -> bool { self.keys.is_empty() } /// Returns an iterator yielding shared references to all key/value pairs /// of the hash map. /// /// # Note /// /// - Avoid unbounded iteration over big storage hash maps. /// - Prefer using methods like `Iterator::take` in order to limit the number /// of yielded elements. pub fn iter(&self) -> Iter<K, V, H> { Iter::new(self) } /// Returns an iterator yielding exclusive references to all key/value pairs /// of the hash map. /// /// # Note /// /// - Avoid unbounded iteration over big storage hash maps. /// - Prefer using methods like `Iterator::take` in order to limit the number /// of yielded elements. pub fn iter_mut(&mut self) -> IterMut<K, V, H> { IterMut::new(self) } /// Returns an iterator yielding shared references to all values of the hash map. /// /// # Note /// /// - Avoid unbounded iteration over big storage hash maps. /// - Prefer using methods like `Iterator::take` in order to limit the number /// of yielded elements. pub fn values(&self) -> Values<K, V, H> { Values::new(self) } /// Returns an iterator yielding shared references to all values of the hash map. /// /// # Note /// /// - Avoid unbounded iteration over big storage hash maps. /// - Prefer using methods like `Iterator::take` in order to limit the number /// of yielded elements. pub fn values_mut(&mut self) -> ValuesMut<K, V, H> { ValuesMut::new(self) } /// Returns an iterator yielding shared references to all keys of the hash map. /// /// # Note /// /// - Avoid unbounded iteration over big storage hash maps. /// - Prefer using methods like `Iterator::take` in order to limit the number /// of yielded elements. pub fn keys(&self) -> Keys<K> { Keys::new(self) } } impl<K, V, H> HashMap<K, V, H> where K: Ord + Clone + PackedLayout, V: PackedLayout, H: Hasher, Key: From<<H as Hasher>::Output>, { fn clear_cells(&self) { if self.values.key().is_none() { // We won't clear any storage if we are in lazy state since there // probably has not been any state written to storage, yet. return } for key in self.keys() { // It might seem wasteful to clear all entries instead of just // the occupied ones. However this spares us from having one extra // read for every element in the storage stash to filter out vacant // entries. So this is actually a trade-off and at the time of this // implementation it is unclear which path is more efficient. // // The bet is that clearing a storage cell is cheaper than reading one. self.values.clear_packed_at(key); } } } impl<K, V, H> HashMap<K, V, H> where K: Ord + Eq + Clone + PackedLayout, V: PackedLayout, H: Hasher, Key: From<H::Output>, { /// Inserts a key-value pair into the map. /// /// Returns the previous value associated with the same key if any. /// If the map did not have this key present, `None` is returned. /// /// # Note /// /// - If the map did have this key present, the value is updated, /// and the old value is returned. The key is not updated, though; /// this matters for types that can be `==` without being identical. pub fn insert(&mut self, key: K, new_value: V) -> Option<V> { if let Some(occupied) = self.values.get_mut(&key) { // Update value, don't update key. let old_value = core::mem::replace(&mut occupied.value, new_value); return Some(old_value) } // At this point we know that `key` does not yet exist in the map. let key_index = self.keys.put(key.to_owned()); self.values.put( key, Some(ValueEntry { value: new_value, key_index, }), ); None } /// Removes the key/value pair from the map associated with the given key. /// /// - Returns the removed value if any. /// /// # Note /// /// The key may be any borrowed form of the map's key type, /// but `Hash` and `Eq` on the borrowed form must match those for the key type. pub fn take<Q>(&mut self, key: &Q) -> Option<V> where K: Borrow<Q>, Q: Ord + scale::Encode + ToOwned<Owned = K>, { let entry = self.values.put_get(key, None)?; self.keys .take(entry.key_index) .expect("`key_index` must point to a valid key entry"); Some(entry.value) } /// Returns a shared reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, /// but `Hash` and `Eq` on the borrowed form must match those for the key type. pub fn get<Q>(&self, key: &Q) -> Option<&V> where K: Borrow<Q>, Q: Ord + scale::Encode + ToOwned<Owned = K>, { self.values.get(key).map(|entry| &entry.value) } /// Returns a mutable reference to the value corresponding to the key. /// /// The key may be any borrowed form of the map's key type, /// but `Hash` and `Eq` on the borrowed form must match those for the key type. pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V> where K: Borrow<Q>, Q: Ord + scale::Encode + ToOwned<Owned = K>, { self.values.get_mut(key).map(|entry| &mut entry.value) } /// Returns `true` if there is an entry corresponding to the key in the map. pub fn contains_key<Q>(&self, key: &Q) -> bool where K: Borrow<Q>, Q: Ord + PartialEq<K> + Eq + scale::Encode + ToOwned<Owned = K>, { // We do not check if the given key is equal to the queried key which is // what normally a hash map implementation does because we do not resolve // or prevent collisions in this hash map implementation at any level. // Having a collision is virtually impossible since we // are using a keyspace of 2^256 bit. self.values.get(key).is_some() } /// Defragments storage used by the storage hash map. /// /// Returns the number of storage cells freed this way. /// /// A `max_iterations` parameter of `None` means that there is no limit /// to the number of iterations performed. This is generally not advised. /// /// # Note /// /// This frees storage that is held but not necessary for the hash map to hold. /// This operation might be expensive, especially for big `max_iteration` /// parameters. The `max_iterations` parameter can be used to limit the /// expensiveness for this operation and instead free up storage incrementally. pub fn defrag(&mut self, max_iterations: Option<u32>) -> u32 { // This method just defrags the underlying `storage::Stash` used to // store the keys as it can sometimes take a lot of unused storage // if many keys have been removed at some point. Some hash map // implementations might even prefer to perform this operation with a // limit set to 1 after every successful removal. if let Some(0) = max_iterations { // Bail out early if the iteration limit is set to 0 anyways to // completely avoid doing work in this case.y return 0 } let len_vacant = self.keys.capacity() - self.keys.len(); let max_iterations = max_iterations.unwrap_or(len_vacant); let values = &mut self.values; let callback = |old_index, new_index, key: &K| { let value_entry = values.get_mut(key).expect("key must be valid"); debug_assert_eq!(value_entry.key_index, old_index); value_entry.key_index = new_index; }; self.keys.defrag(Some(max_iterations), callback) } }
33.706745
85
0.615974
222d8a168a54440048231d2fb49087f146bd9f71
2,344
// This file is part of olympus-xmp. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/raphaelcohn/olympus-xmp/master/COPYRIGHT. No part of olympus-xmp, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2022 The developers of olympus-xmp. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/raphaelcohn/olympus-xmp/master/COPYRIGHT. #![allow(non_camel_case_types)] #![allow(non_snake_case)] #![allow(non_upper_case_globals)] #![deny(absolute_paths_not_starting_with_crate)] #![deny(invalid_html_tags)] #![deny(macro_use_extern_crate)] #![deny(missing_crate_level_docs)] #![deny(missing_docs)] #![deny(pointer_structural_match)] #![deny(unaligned_references)] #![deny(unconditional_recursion)] #![deny(unreachable_patterns)] #![deny(unused_import_braces)] #![deny(unused_must_use)] #![deny(unused_qualifications)] #![deny(unused_results)] #![deny(unreachable_code)] #![warn(unreachable_pub)] #![warn(unused_lifetimes)] #![warn(unused_crate_dependencies)] #![feature(adt_const_params)] #![feature(const_trait_impl)] #![feature(generic_arg_infer)] //! #iso-3166-1-country //! //! ISO country and country code domain types. use std::convert::TryFrom; use std::error; use std::fmt; use std::fmt::Display; use std::fmt::Debug; use std::fmt::Formatter; use std::str::FromStr; use std::str::from_utf8_unchecked; use Iso3166Dash1Alpha2CountryCode::*; use Iso3166Dash1Alpha3CountryCode::*; use Iso3166Dash1AlphaCountryCode::*; use Iso3166Dash1NumericCountryCode::*; use swiss_army_knife::a_to_z::A; use swiss_army_knife::a_to_z::Z; use swiss_army_knife::memchr::MemoryCharacter; use swiss_army_knife::get_unchecked::GetUnchecked; include!("letter_to_number_scaled.rs"); include!("letter_to_number_unchecked.rs"); include!("Iso3166Dash1Country.rs"); include!("Iso3166Dash1Alpha2CountryCode.rs"); include!("Iso3166Dash1Alpha3CountryCode.rs"); include!("Iso3166Dash1NumericCountryCode.rs"); include!("Iso3166Dash1AlphaCountryCode.rs"); include!("Iso3166Dash1AlphaCountryCodeParseError.rs"); include!("UnknownStringVariantParseError.rs"); include!("UnknownIso3166Dash1CodeError.rs");
36.061538
390
0.788396
fcc91e297f54c2024e4467c3dc370df44cccc4c7
1,341
mod util; use std::io; use std::path::PathBuf; use std::sync::Arc; use tokio_rustls::rustls; use tracerbench_recorded_response_set::RecordedResponseSets; use util::*; /// Server config pub struct Config { /// Value for chrome switch ignore-certificate-errors-spki-list /// BASE64(SHA256(cert.subjectPublicKeyInfo))) pub spki_digest: String, /// Shared config for a TLS acceptor pub tls_config: Arc<rustls::ServerConfig>, /// Recorded response sets pub response_sets: RecordedResponseSets, } impl Config { pub fn new( spki_digest: String, tls_config: Arc<rustls::ServerConfig>, response_sets: RecordedResponseSets, ) -> Self { Config { spki_digest, tls_config, response_sets, } } pub fn from_parts( cert_chain: Vec<rustls::Certificate>, private_key: rustls::PrivateKey, response_sets: RecordedResponseSets, ) -> Result<Self, io::Error> { Ok(Self::new( spki_digest(cert_chain[0].as_ref())?, build_tls_config(cert_chain, private_key)?, response_sets, )) } pub fn from_args( cert_pem: &PathBuf, key_pem: &PathBuf, response_sets_cbor: &PathBuf, ) -> Result<Self, io::Error> { Self::from_parts( read_cert_pem(cert_pem)?, read_key_pem(key_pem)?, read_response_set_cbor(response_sets_cbor)?, ) } }
23.12069
65
0.679344
08716253385d23d08298470e11bfd91183783269
4,672
use crate::randomness::{get_rng_with_seed, RandomNumberGenerator}; use crate::sparse::PyBinaryVector; use crate::pauli::PyPauliOperator; use bincode::{deserialize, serialize}; use ldpc::noise_model::{DepolarizingNoise, BinarySymmetricChannel, NoiseModel, Probability}; use pyo3::exceptions::PyValueError; use pyo3::prelude::*; use pyo3::types::PyBytes; use pyo3::PyObjectProtocol; use pyo3::ToPyObject; /// An implementation of a binary symmetric channel. /// /// A binary symmetric channel flips the value /// of each bits according to a given error probability. #[pyclass(name = "BinarySymmetricChannel", module="pyqec.pyqec")] pub struct PyBinarySymmetricChannel { channel: BinarySymmetricChannel, probability: f64, rng: RandomNumberGenerator, } #[pymethods] impl PyBinarySymmetricChannel { #[new] #[args(probability = "0.0", rng_seed = "None")] pub fn new(probability: f64, rng_seed: Option<u64>) -> PyResult<PyBinarySymmetricChannel> { let prob_wrapper = Probability::try_new(probability).ok_or(PyValueError::new_err( format!("{} is not a valid probability", probability,), ))?; let channel = BinarySymmetricChannel::with_probability(prob_wrapper); let rng = get_rng_with_seed(rng_seed); Ok(PyBinarySymmetricChannel { channel, probability, rng, }) } #[text_signature = "(self, length)"] fn sample_error_of_length(&mut self, length: usize) -> PyBinaryVector { self.channel .sample_error_of_length(length, &mut self.rng) .into() } #[text_signature = "(self)"] fn error_probability(&self) -> f64 { self.probability } pub fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let (channel, probability, rng) = deserialize(s.as_bytes()).unwrap(); self.channel = channel; self.probability = probability; self.rng = rng; Ok(()) } Err(e) => Err(e), } } pub fn __getstate__(&self, py: Python) -> PyResult<PyObject> { Ok(PyBytes::new( py, &serialize(&(&self.channel, &self.probability, &self.rng)).unwrap(), ) .to_object(py)) } } #[pyproto] impl PyObjectProtocol for PyBinarySymmetricChannel { fn __repr__(&self) -> String { format!("BSC({})", self.error_probability()) } } /// An implementation of a depolarizing noise channel. /// /// A depolarizing noise channel apply one of {X, Y, Z} /// with probability p and identity with probability 1 - p. #[pyclass(name = "DepolarizingNoise", module="pyqec.pyqec")] pub struct PyDepolarizingNoise { channel: DepolarizingNoise, probability: f64, rng: RandomNumberGenerator, } #[pymethods] impl PyDepolarizingNoise { #[new] #[args(probability = "0.0", rng_seed = "None")] pub fn new(probability: f64, rng_seed: Option<u64>) -> PyResult<PyDepolarizingNoise> { let prob_wrapper = Probability::try_new(probability).ok_or(PyValueError::new_err( format!("{} is not a valid probability", probability,), ))?; let channel = DepolarizingNoise::with_probability(prob_wrapper); let rng = get_rng_with_seed(rng_seed); Ok(PyDepolarizingNoise { channel, probability, rng, }) } #[text_signature = "(self, length)"] fn sample_error_of_length(&mut self, length: usize) -> PyPauliOperator { self.channel .sample_error_of_length(length, &mut self.rng) .into() } #[text_signature = "(self)"] fn error_probability(&self) -> f64 { self.probability } pub fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let (channel, probability, rng) = deserialize(s.as_bytes()).unwrap(); self.channel = channel; self.probability = probability; self.rng = rng; Ok(()) } Err(e) => Err(e), } } pub fn __getstate__(&self, py: Python) -> PyResult<PyObject> { Ok(PyBytes::new( py, &serialize(&(&self.channel, &self.probability, &self.rng)).unwrap(), ) .to_object(py)) } } #[pyproto] impl PyObjectProtocol for PyDepolarizingNoise { fn __repr__(&self) -> String { format!("Depolarizing({})", self.error_probability()) } }
31.146667
95
0.604666
db188ebcfb6fababed5b4b82c10850bdfd805c0a
384
#[cfg(debug_assertions)] macro_rules! cast { ($v: expr, $t: ident) => {{ $t::try_from($v).unwrap_or_else(|_| ASSERT!(false, "Error casting {} to {}", $v, stringify!($t))) }}; } #[cfg(not(debug_assertions))] macro_rules! cast { ($v: expr, $t: ident) => {{ $v as $t }}; } pub use super::super::logging; pub use crate::uses::f16; pub trait Cast<T> { fn to(val: T) -> Self; }
18.285714
99
0.583333
3a685916f3937bac8f10bc2b7cdb09237abd029f
436
mod confidence; pub(crate) mod detect; mod detector; mod filter_list; mod info; mod method; mod options; mod query; mod text; pub use confidence::calculate_confidence; pub use detect::{detect, detect_lang, detect_with_options}; pub use detector::Detector; pub use filter_list::FilterList; pub use info::Info; pub use method::Method; pub use options::Options; pub use query::{InternalQuery, Query}; pub use text::{LowercaseText, Text};
21.8
59
0.772936
6210f027f47c4ac7371208ef026fde72643fb5b3
603
use std::error::Error; use std::process::exit; use std::{env, fs}; use crate::models::SnailfishMathProblem; mod models; fn main() -> Result<(), Box<dyn Error>> { let args: Vec<String> = env::args().collect(); if args.len() != 2 { eprintln!("Invalid arguments count"); exit(1); } let file_content = fs::read_to_string(args[1].clone()).expect("Error while reading the data file"); let input = SnailfishMathProblem::parse_string(file_content); println!("part 1: {}", input.solve_part_1()); println!("part 2: {}", input.solve_part_2()); Ok(()) }
25.125
88
0.615257
1c740556fef19e7bc4f9689bd7cdc1fce347bd12
537
use handlebars::Handlebars; use serde_json::json; #[test] fn test_whitespaces_elision() { let hbs = Handlebars::new(); assert_eq!( "bar", hbs.render_template(" {{~ foo ~}} ", &json!({"foo": "bar"})) .unwrap() ); assert_eq!( "<bar/>", hbs.render_template(" {{{~ foo ~}}} ", &json!({"foo": "<bar/>"})) .unwrap() ); assert_eq!( "<bar/>", hbs.render_template(" {{~ {foo} ~}} ", &json!({"foo": "<bar/>"})) .unwrap() ); }
21.48
75
0.439479
79641894628f247101ebd4815b5f0c02f1c567b9
1,725
struct Sheep { naked: bool, name: &'static str, } trait Animal { // Static method signature; `Self` refers to the implementor type fn new(name: &'static str) -> Self; // Instance method signatures; these will return a string. fn name(&self) -> &'static str; fn noise(&self) -> &'static str; // Traits can provide default method definitions fn talk(&self) { println!("{} says {}", self.name(), self.noise()) } } impl Sheep { fn is_naked(&self) -> bool { self.naked } fn shear(&mut self) { if self.is_naked() { // Implementor methods can use the implementor's trait methods. println!("{} is already naked", self.name()) } else { println!("{} gets a haircut!", self.name); self.naked = true; } } } // Implement the `Animal` trait for `Sheep`. impl Animal for Sheep { // `Self` is the implementor type: `Sheep` fn new(name: &'static str) -> Self { Sheep { name, naked: false } } fn name(&self) -> &'static str { self.name } fn noise(&self) -> &'static str { if self.is_naked() { "baaaaah?" } else { "baaaaah!" } } // Default trait methods can be overridden fn talk(&self) { // For example, we can add some quiet contemplation println!("{} pauses briefly... {}", self.name, self.noise()); } } #[cfg(test)] mod tests { use super::*; #[test] fn test_case1() { // Type annotation is necessary in this case. let mut dolly: Sheep = Animal::new("Dolly"); dolly.talk(); dolly.shear(); dolly.talk(); } }
22.115385
75
0.532754
bf3becd6bceb5d23d4dd2241bc71e8c0f4ca41a1
3,439
/* * Copyright 2018 Intel Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ----------------------------------------------------------------------------- */ use sawtooth_sdk::processor::handler::ApplyError; use serde_json; use std::str::from_utf8; use validator_registry_signup_info::*; const VALIDATOR_NAME_LEN: usize = 64; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ValidatorRegistryPayload { // The action that the transaction processor will take. Currently this // is only “register”, but could include other actions in the futures // such as “revoke” pub verb: String, // The human readable name of the endpoint pub name: String, // Validator's public key (currently using signer_public_key as this is // stored in the transaction header) pub id: String, pub signup_info_str: String, //ValidatorRegistrySignupInfo, } impl ValidatorRegistryPayload { pub fn new(verb: String, name: String, id: String, signup_info_str: String) -> Self { ValidatorRegistryPayload { verb, name, id, signup_info_str, } } pub fn parse_from( payload_data: &[u8], public_key: &str, ) -> Result<ValidatorRegistryPayload, ApplyError> { let payload: ValidatorRegistryPayload; let payload_string = match from_utf8(payload_data) { Ok(s) => s, Err(error) => { return Err(ApplyError::InvalidTransaction(format!( "Invalid payload serialization {}", error ))); } }; payload = match serde_json::from_str(payload_string) { Ok(s) => s, Err(error) => { println!("{} is the payload_string", payload_string); return Err(ApplyError::InvalidTransaction(format!( "Invalid validator payload string {}", error ))); } }; if payload.name.is_empty() || payload.name.len() > VALIDATOR_NAME_LEN { return Err(ApplyError::InvalidTransaction(format!( "Invalid validator name length {}", payload.name.len() ))); } if payload.id != public_key { return Err(ApplyError::InvalidTransaction(format!( "Signature mismatch on validator registration with validator {} signed by {}", &payload.id, &public_key ))); } Ok(payload) } pub fn get_verb(&self) -> String { self.verb.clone() } pub fn get_name(&self) -> String { self.name.clone() } pub fn get_id(&self) -> String { self.id.clone() } pub fn get_signup_info(&self) -> ValidatorRegistrySignupInfo { serde_json::from_str(&*self.signup_info_str).unwrap() } }
30.981982
94
0.587962
e51e2c4d9ce5e5acbff2437c02d9e37d26bc2a2e
1,939
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! The `ToStr` trait for converting to strings */ #![experimental] use fmt; use string::String; /// A generic trait for converting a value to a string pub trait ToStr { /// Converts the value of `self` to an owned string fn to_str(&self) -> String; } /// Trait for converting a type to a string, consuming it in the process. pub trait IntoStr { /// Consume and convert to a string. fn into_str(self) -> String; } impl<T: fmt::Show> ToStr for T { fn to_str(&self) -> String { format!("{}", *self) } } #[cfg(test)] mod tests { use prelude::*; use super::*; #[test] fn test_simple_types() { assert_eq!(1i.to_str(), "1".to_string()); assert_eq!((-1i).to_str(), "-1".to_string()); assert_eq!(200u.to_str(), "200".to_string()); assert_eq!(2u8.to_str(), "2".to_string()); assert_eq!(true.to_str(), "true".to_string()); assert_eq!(false.to_str(), "false".to_string()); assert_eq!(().to_str(), "()".to_string()); assert_eq!(("hi".to_string()).to_str(), "hi".to_string()); } #[test] fn test_vectors() { let x: Vec<int> = vec![]; assert_eq!(x.to_str(), "[]".to_string()); assert_eq!((vec![1i]).to_str(), "[1]".to_string()); assert_eq!((vec![1i, 2, 3]).to_str(), "[1, 2, 3]".to_string()); assert!((vec![vec![], vec![1i], vec![1i, 1]]).to_str() == "[[], [1], [1, 1]]".to_string()); } }
28.940299
73
0.593605
01e840a3fa1b9fd46a11b12d9cfbd243fd073607
6,179
use std::str::FromStr; use crate::domain::*; use crate::util::*; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(tag = "type")] pub enum ConverterReturned { /// This will break process for ever. LogicalError { msg: String }, /// This can quick finish the process, and retry later. EnvError { msg: String }, /// No instance would be return. None, /// Tell `Nature` the task will be processed asynchronously, Nature will wait for seconds you assigned, and converter will callback to `Nature` later while result are ready. Delay { num: u32 }, /// return instances Instances { ins: Vec<Instance> }, /// return `SelfRouteInstance` SelfRoute { ins: Vec<SelfRouteInstance> }, } impl Default for ConverterReturned { fn default() -> Self { ConverterReturned::None } } #[derive(Serialize, Deserialize, Clone)] pub struct ConverterParameter { pub from: Instance, #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub last_state: Option<Instance>, /// This is used for callback pub task_id: u64, #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub master: Option<Instance>, /// executor setting #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub cfg: String, } #[derive(Serialize, Deserialize, Debug, Default, Clone, PartialEq)] pub struct DynamicConverter { /// Only `Dynamic` and `Null` metaType support pub to: Option<String>, /// REST api for convert to `to` pub fun: Executor, /// use upstream's id as downstream's id. #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub use_upstream_id: bool, #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub delay: i32, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Ord, PartialOrd, Eq, Hash)] #[serde(rename_all = "camelCase")] pub enum Protocol { LocalRust, Http, Https, /// Nature will automatically implement the converter. it can't be used by user. Auto, BuiltIn, } impl FromStr for Protocol { type Err = NatureError; fn from_str(s: &str) -> Result<Self> { let cmp = &*s.to_uppercase(); match cmp { "LOCALRUST" => Ok(Protocol::LocalRust), "HTTP" => Ok(Protocol::Http), "HTTPS" => Ok(Protocol::Https), "BUILTIN" => Ok(Protocol::BuiltIn), _ => { let msg = format!("unknown protocol : {}", s); Err(NatureError::VerifyError(msg)) } } } } impl Default for Protocol { fn default() -> Self { Protocol::LocalRust } } #[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Ord, PartialOrd, Eq, Hash)] pub struct Executor { pub protocol: Protocol, #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub url: String, /// A json string which resolved by executor itself #[serde(skip_serializing_if = "is_default")] #[serde(default)] pub settings: String, } impl Executor { pub fn for_local(path: &str) -> Self { Executor { protocol: Protocol::LocalRust, url: path.to_string(), settings: "".to_string(), } } pub fn new_auto() -> Self { Executor { protocol: Protocol::Auto, url: "".to_string(), settings: "".to_string(), } } } #[cfg(test)] mod executor_test { use super::*; #[test] fn serde_executor() { let exe = Executor { protocol: Protocol::LocalRust, url: "".to_string(), settings: "".to_string(), }; let ewe_s = serde_json::to_string(&exe).unwrap(); assert_eq!(ewe_s, "{\"protocol\":\"localRust\"}"); let ewe_dw: Executor = serde_json::from_str(&ewe_s).unwrap(); assert_eq!(ewe_dw, exe); } } #[cfg(test)] mod converter_returned_test { use super::*; #[test] fn none_test() { // converterReturned let none = ConverterReturned::None; let result = serde_json::to_string(&none).unwrap(); println!("{}", &result); let back: ConverterReturned = serde_json::from_str(&result).unwrap(); assert_eq!(none, back) } #[test] fn logical_error_test() { // converterReturned let original = ConverterReturned::LogicalError { msg: "some error".to_string() }; let result = serde_json::to_string(&original).unwrap(); println!("{}", &result); let back: ConverterReturned = serde_json::from_str(&result).unwrap(); assert_eq!(original, back) } #[test] fn env_error_test() { // converterReturned let original = ConverterReturned::EnvError { msg: "some error".to_string() }; let result = serde_json::to_string(&original).unwrap(); println!("{}", &result); let back: ConverterReturned = serde_json::from_str(&result).unwrap(); assert_eq!(original, back) } #[test] fn delay_test() { // converterReturned let original = ConverterReturned::Delay { num: 10 }; let result = serde_json::to_string(&original).unwrap(); println!("{}", &result); let back: ConverterReturned = serde_json::from_str(&result).unwrap(); assert_eq!(original, back) } #[test] fn instance_test() { // converterReturned let original = ConverterReturned::Instances { ins: vec![Instance::default()] }; let result = serde_json::to_string(&original).unwrap(); println!("{}", &result); let back: ConverterReturned = serde_json::from_str(&result).unwrap(); assert_eq!(original, back) } #[test] fn self_route_test() { // converterReturned let original = ConverterReturned::SelfRoute { ins: vec![SelfRouteInstance::default()] }; let result = serde_json::to_string(&original).unwrap(); println!("{}", &result); let back: ConverterReturned = serde_json::from_str(&result).unwrap(); assert_eq!(original, back) } }
29.564593
177
0.601715
fbc30340addfb732abcb254895a726cc3b613547
175
mod common; use common::*; fn invariant_fail_covariant<'a>() { let _in: In<Lifetime<'a>> = In( Invariant::<Lifetime<'static>>::default(), ); } fn main() {}
14.583333
50
0.571429
67a7f59ef7ae00bb0c264f2ba822050e4c15474e
1,505
use crate::{ ParseRule, Parser, properties::PropertyParser, functions::FunctionParser, modules::ModuleParser, }; use lexer::tokens::{ TokenType, }; use notices::{ DiagnosticSourceBuilder, DiagnosticLevel }; pub struct StatementParser; impl ParseRule for StatementParser{ fn parse(parser: &mut Parser) -> Result<(), ()>{ let token = parser.current_token(); match token.type_ { TokenType::KwMod => ModuleParser::parse(parser)?, TokenType::KwVal => PropertyParser::parse(parser)?, TokenType::KwVar => PropertyParser::parse(parser)?, TokenType::KwFun => FunctionParser::parse(parser)?, _ => { let source = match parser.request_source_snippet(token.pos){ Ok(source) => source, Err(diag) => { parser.emit_parse_diagnostic(&[], &[diag]); return Err(()) } }; let diag_source = DiagnosticSourceBuilder::new(parser.name.clone(), token.pos.start.0) .level(DiagnosticLevel::Error) .message(format!("Unexpected token found: {:?}", token.type_)) .range(token.pos.col_range()) .source(source) .build(); parser.emit_parse_diagnostic(&[], &[diag_source]); return Err(()); } } Ok(()) } }
31.354167
102
0.515615
144f5cd131018721a42c7b7dac53aef61aba5dad
2,158
// Stuff for the /games API call. #[derive(Deserialize, Debug)] struct APIGames { data: Vec<APIGamesData> } #[derive(Deserialize, Debug)] struct APIGamesData { id: String, names: APIGamesNames, categories: Option<APICategories> } #[derive(Deserialize, Debug)] struct APIGamesNames { international: String } #[derive(Deserialize, Debug)] struct APICategories { data: Vec<APICategoryData> } #[derive(Deserialize, Debug)] struct APICategoryData { id: String, name: String, #[serde(rename="type")] type_: String, variables: APICategoryVariables } #[derive(Deserialize, Debug)] struct APICategoryVariables { data: Vec<APICategoryVariablesData> } #[derive(Deserialize, Debug)] struct APICategoryVariablesData { id: String, #[serde(rename="is-subcategory")] is_subcategory: bool, values: APICategoryVariablesValues } #[derive(Deserialize, Debug)] struct APICategoryVariablesValues { values: BTreeMap<String, APICategoryVariablesValuesValue> } #[derive(Deserialize, Debug)] struct APICategoryVariablesValuesValue { label: String } // Stuff for the /leaderboards API call. #[derive(Deserialize, Debug)] struct APILeaderboards { data: APILeaderboardsData } #[derive(Deserialize, Debug)] struct APILeaderboardsData { runs: Vec<APIRun>, players: APILeaderboardsPlayers } #[derive(Deserialize, Debug)] struct APIRun { place: u64, run: APIRunRun, category: Option<APICategory> } #[derive(Deserialize, Debug)] struct APICategory { data: APICategoryData } #[derive(Deserialize, Debug)] struct APIRunRun { times: APIRunRunTimes, values: BTreeMap<String, String> // Variable ID to value ID. } #[derive(Deserialize, Debug)] struct APIRunRunTimes { primary_t: f64 } #[derive(Deserialize, Debug)] struct APILeaderboardsPlayers { data: Vec<APILeaderboardsPlayersData> } #[derive(Deserialize, Debug)] struct APILeaderboardsPlayersData { names: Option<APILeaderboardsPlayersNames>, name: Option<String> } #[derive(Deserialize, Debug)] struct APILeaderboardsPlayersNames { international: String } // Stuff for the /users API call. #[derive(Deserialize, Debug)] struct APIUsers { status: Option<u64>, data: Option<Vec<APIRun>> }
18.603448
61
0.757646
fb2f532b124c4ef90e979bb0a8f12db9e0a59191
1,950
use crate::test_utils::TestRandom; use crate::*; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; /// A header of a `BeaconBlock`. /// /// Spec v0.11.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockHeader { pub slot: Slot, pub proposer_index: u64, pub parent_root: Hash256, pub state_root: Hash256, pub body_root: Hash256, } impl SignedRoot for BeaconBlockHeader {} impl BeaconBlockHeader { /// Returns the `tree_hash_root` of the header. /// /// Spec v0.11.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.tree_hash_root()[..]) } /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. /// /// Spec v0.11.1 pub fn into_block<T: EthSpec>(self, body: BeaconBlockBody<T>) -> BeaconBlock<T> { BeaconBlock { slot: self.slot, proposer_index: self.proposer_index, parent_root: self.parent_root, state_root: self.state_root, body, } } /// Signs `self`, producing a `SignedBeaconBlockHeader`. pub fn sign<E: EthSpec>( self, secret_key: &SecretKey, fork: &Fork, genesis_validators_root: Hash256, spec: &ChainSpec, ) -> SignedBeaconBlockHeader { let epoch = self.slot.epoch(E::slots_per_epoch()); let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork, genesis_validators_root); let message = self.signing_root(domain); let signature = Signature::new(message.as_bytes(), secret_key); SignedBeaconBlockHeader { message: self, signature, } } } #[cfg(test)] mod tests { use super::*; ssz_and_tree_hash_tests!(BeaconBlockHeader); }
27.857143
99
0.637949
89906409b93b60fa345c4871b902f20b6d11bdf1
315
use rand; use rand::Rng; fn generate_single_program() -> String { let corpus = ['<', '>', '+', '-', '[', ']', '.']; let mut rng = rand::thread_rng(); let mut program = String::new(); for _i in 0..rng.gen_range(10, 100) { program.push(*rng.choose(&corpus).unwrap()); } program }
24.230769
53
0.526984
18519dab6e506cee9bb750df4891276462bd138b
3,927
use ckb_jsonrpc_types::{AlertMessage, ChainInfo, PeerState}; use ckb_network_alert::notifier::Notifier as AlertNotifier; use ckb_shared::shared::Shared; use ckb_sync::Synchronizer; use ckb_traits::BlockMedianTimeContext; use ckb_util::Mutex; use jsonrpc_core::Result; use jsonrpc_derive::rpc; use std::sync::Arc; /// RPC Module Stats for getting various statistic data. #[rpc(server)] pub trait StatsRpc { /// Returns statistics about the chain. /// /// ## Examples /// /// Request /// /// ```json /// { /// "id": 42, /// "jsonrpc": "2.0", /// "method": "get_blockchain_info", /// "params": [] /// } /// ``` /// /// Response /// /// ```json /// { /// "id": 42, /// "jsonrpc": "2.0", /// "result": { /// "alerts": [ /// { /// "id": "0x2a", /// "message": "An example alert message!", /// "notice_until": "0x24bcca57c00", /// "priority": "0x1" /// } /// ], /// "chain": "ckb", /// "difficulty": "0x1f4003", /// "epoch": "0x7080018000001", /// "is_initial_block_download": true, /// "median_time": "0x5cd2b105" /// } /// } /// ``` #[rpc(name = "get_blockchain_info")] fn get_blockchain_info(&self) -> Result<ChainInfo>; /// Return state info of peers /// /// ## Examples /// /// Request /// /// ```json /// { /// "id": 42, /// "jsonrpc": "2.0", /// "method": "get_peers_state", /// "params": [] /// } /// ``` /// /// Response /// /// ```json /// { /// "id": 42, /// "jsonrpc": "2.0", /// "result": [ /// { /// "blocks_in_flight": "0x56", /// "last_updated": "0x16a95af332d", /// "peer": "0x1" /// } /// ] /// } /// ``` #[deprecated( since = "0.12.0", note = "Please use RPC [`get_peers`](trait.NetRpc.html#tymethod.get_peers) instead" )] #[rpc(name = "get_peers_state")] fn get_peers_state(&self) -> Result<Vec<PeerState>>; } pub(crate) struct StatsRpcImpl { pub shared: Shared, pub synchronizer: Synchronizer, pub alert_notifier: Arc<Mutex<AlertNotifier>>, } impl StatsRpc for StatsRpcImpl { fn get_blockchain_info(&self) -> Result<ChainInfo> { let chain = self.synchronizer.shared.consensus().id.clone(); let (tip_header, median_time) = { let snapshot = self.shared.snapshot(); let tip_header = snapshot.tip_header().clone(); let median_time = snapshot.block_median_time(&tip_header.hash()); (tip_header, median_time) }; let epoch = tip_header.epoch(); let difficulty = tip_header.difficulty(); let is_initial_block_download = self .synchronizer .shared .active_chain() .is_initial_block_download(); let alerts: Vec<AlertMessage> = { let now = faketime::unix_time_as_millis(); let mut notifier = self.alert_notifier.lock(); notifier.clear_expired_alerts(now); notifier .noticed_alerts() .into_iter() .map(Into::into) .collect() }; Ok(ChainInfo { chain, median_time: median_time.into(), epoch: epoch.into(), difficulty, is_initial_block_download, alerts, }) } fn get_peers_state(&self) -> Result<Vec<PeerState>> { // deprecated Ok(self .synchronizer .shared() .state() .read_inflight_blocks() .blocks_iter() .map(|(peer, blocks)| PeerState::new(peer.value(), 0, blocks.len())) .collect()) } }
26.533784
91
0.496053
8798352016cd7962707561be607457973a6e65d1
21,778
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The Green Task implementation //! //! This module contains the glue to the libstd runtime necessary to integrate //! M:N scheduling. This GreenTask structure is hidden as a trait object in all //! rust tasks and virtual calls are made in order to interface with it. //! //! Each green task contains a scheduler if it is currently running, and it also //! contains the rust task itself in order to juggle around ownership of the //! values. use std::any::Any; use std::mem; use std::raw; use std::rt::Runtime; use std::rt::local::Local; use std::rt::mutex::NativeMutex; use std::rt::rtio; use std::rt::stack; use std::rt::task::{Task, BlockedTask, TaskOpts}; use std::rt; use context::Context; use coroutine::Coroutine; use sched::{Scheduler, SchedHandle, RunOnce}; use stack::StackPool; /// The necessary fields needed to keep track of a green task (as opposed to a /// 1:1 task). pub struct GreenTask { /// Coroutine that this task is running on, otherwise known as the register /// context and the stack that this task owns. This field is optional to /// relinquish ownership back to a scheduler to recycle stacks at a later /// date. pub coroutine: Option<Coroutine>, /// Optional handle back into the home sched pool of this task. This field /// is lazily initialized. pub handle: Option<SchedHandle>, /// Slot for maintaining ownership of a scheduler. If a task is running, /// this value will be Some(sched) where the task is running on "sched". pub sched: Option<Box<Scheduler>>, /// Temporary ownership slot of a std::rt::task::Task object. This is used /// to squirrel that libstd task away while we're performing green task /// operations. pub task: Option<Box<Task>>, /// Dictates whether this is a sched task or a normal green task pub task_type: TaskType, /// Home pool that this task was spawned into. This field is lazily /// initialized until when the task is initially scheduled, and is used to /// make sure that tasks are always woken up in the correct pool of /// schedulers. pub pool_id: uint, // See the comments in the scheduler about why this is necessary pub nasty_deschedule_lock: NativeMutex, io: ::native::io::IoFactory, } pub enum TaskType { TypeGreen(Option<Home>), TypeSched, } pub enum Home { AnySched, HomeSched(SchedHandle), } /// Spawn a new green task, assuming the current task is a green task. /// /// # Failure /// /// This function will fail if the current task is not already a green task. pub fn spawn(f: proc(): Send) { spawn_opts(TaskOpts::new(), f) } /// See documentation for `spawn`. pub fn spawn_opts(opts: TaskOpts, f: proc(): Send) { let mut task: Box<Task> = Local::take(); let task = match task.maybe_take_runtime::<GreenTask>() { Some(mut green) => { green.put_task(task); green } None => { Local::put(task); fail!("cannot spawn a green task from a non-green task") } }; // First, set up a bomb which when it goes off will restore the local // task unless its disarmed. This will allow us to gracefully fail from // inside of `configure` which allocates a new task. struct Bomb { inner: Option<Box<GreenTask>> } impl Drop for Bomb { fn drop(&mut self) { let _ = self.inner.take().map(|task| task.put()); } } let mut bomb = Bomb { inner: Some(task) }; // Spawns a task into the current scheduler. We allocate the new task's // stack from the scheduler's stack pool, and then configure it // accordingly to `opts`. Afterwards we bootstrap it immediately by // switching to it. // // Upon returning, our task is back in TLS and we're good to return. let sibling = { let sched = bomb.inner.as_mut().unwrap().sched.as_mut().unwrap(); GreenTask::configure(&mut sched.stack_pool, opts, f) }; let mut me = bomb.inner.take().unwrap(); let sched = me.sched.take().unwrap(); sched.run_task(me, sibling) } /// Trampoline code for all new green tasks which are running around. This /// function is passed through to Context::new as the initial rust landing pad /// for all green tasks. This code is actually called after the initial context /// switch onto a green thread. /// /// The first argument to this function is the `Box<GreenTask>` pointer, and /// the next two arguments are the user-provided procedure for running code. /// /// The goal for having this weird-looking function is to reduce the number of /// allocations done on a green-task startup as much as possible. extern fn bootstrap_green_task(task: uint, code: *mut (), env: *mut ()) -> ! { // Acquire ownership of the `proc()` let start: proc() = unsafe { mem::transmute(raw::Procedure { code: code, env: env }) }; // Acquire ownership of the `Box<GreenTask>` let mut task: Box<GreenTask> = unsafe { mem::transmute(task) }; // First code after swap to this new context. Run our cleanup job task.pool_id = { let sched = task.sched.as_mut().unwrap(); sched.run_cleanup_job(); sched.task_state.increment(); sched.pool_id }; // Convert our green task to a libstd task and then execute the code // requested. This is the "try/catch" block for this green task and // is the wrapper for *all* code run in the task. let mut start = Some(start); let task = task.swap().run(|| start.take().unwrap()()).destroy(); // Once the function has exited, it's time to run the termination // routine. This means we need to context switch one more time but // clean ourselves up on the other end. Since we have no way of // preserving a handle to the GreenTask down to this point, this // unfortunately must call `GreenTask::convert`. In order to avoid // this we could add a `terminate` function to the `Runtime` trait // in libstd, but that seems less appropriate since the conversion // method exists. GreenTask::convert(task).terminate(); } impl GreenTask { /// Creates a new green task which is not homed to any particular scheduler /// and will not have any contained Task structure. pub fn new(stack_pool: &mut StackPool, stack_size: Option<uint>, start: proc():Send) -> Box<GreenTask> { GreenTask::new_homed(stack_pool, stack_size, AnySched, start) } /// Creates a new task (like `new`), but specifies the home for new task. pub fn new_homed(stack_pool: &mut StackPool, stack_size: Option<uint>, home: Home, start: proc():Send) -> Box<GreenTask> { // Allocate ourselves a GreenTask structure let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home))); // Allocate a stack for us to run on let stack_size = stack_size.unwrap_or_else(|| rt::min_stack()); let mut stack = stack_pool.take_stack(stack_size); let context = Context::new(bootstrap_green_task, ops.as_uint(), start, &mut stack); // Package everything up in a coroutine and return ops.coroutine = Some(Coroutine { current_stack_segment: stack, saved_context: context, }); return ops; } /// Creates a new green task with the specified coroutine and type, this is /// useful when creating scheduler tasks. pub fn new_typed(coroutine: Option<Coroutine>, task_type: TaskType) -> Box<GreenTask> { box GreenTask { pool_id: 0, coroutine: coroutine, task_type: task_type, sched: None, handle: None, nasty_deschedule_lock: unsafe { NativeMutex::new() }, task: Some(box Task::new()), io: ::native::io::IoFactory::new(), } } /// Creates a new green task with the given configuration options for the /// contained Task object. The given stack pool is also used to allocate a /// new stack for this task. pub fn configure(pool: &mut StackPool, opts: TaskOpts, f: proc():Send) -> Box<GreenTask> { let TaskOpts { name, stack_size, on_exit } = opts; let mut green = GreenTask::new(pool, stack_size, f); { let task = green.task.as_mut().unwrap(); task.name = name; task.death.on_exit = on_exit; } return green; } /// Just like the `maybe_take_runtime` function, this function should *not* /// exist. Usage of this function is _strongly_ discouraged. This is an /// absolute last resort necessary for converting a libstd task to a green /// task. /// /// This function will assert that the task is indeed a green task before /// returning (and will kill the entire process if this is wrong). pub fn convert(mut task: Box<Task>) -> Box<GreenTask> { match task.maybe_take_runtime::<GreenTask>() { Some(mut green) => { green.put_task(task); green } None => rtabort!("not a green task any more?"), } } pub fn give_home(&mut self, new_home: Home) { match self.task_type { TypeGreen(ref mut home) => { *home = Some(new_home); } TypeSched => rtabort!("type error: used SchedTask as GreenTask"), } } pub fn take_unwrap_home(&mut self) -> Home { match self.task_type { TypeGreen(ref mut home) => home.take().unwrap(), TypeSched => rtabort!("type error: used SchedTask as GreenTask"), } } // New utility functions for homes. pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool { match self.task_type { TypeGreen(Some(AnySched)) => { false } TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => { *id == sched.sched_id() } TypeGreen(None) => { rtabort!("task without home"); } TypeSched => { // Awe yea rtabort!("type error: expected: TypeGreen, found: TaskSched"); } } } pub fn homed(&self) -> bool { match self.task_type { TypeGreen(Some(AnySched)) => { false } TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true } TypeGreen(None) => { rtabort!("task without home"); } TypeSched => { rtabort!("type error: expected: TypeGreen, found: TaskSched"); } } } pub fn is_sched(&self) -> bool { match self.task_type { TypeGreen(..) => false, TypeSched => true, } } // Unsafe functions for transferring ownership of this GreenTask across // context switches pub fn as_uint(&self) -> uint { self as *const GreenTask as uint } pub unsafe fn from_uint(val: uint) -> Box<GreenTask> { mem::transmute(val) } // Runtime glue functions and helpers pub fn put_with_sched(mut self: Box<GreenTask>, sched: Box<Scheduler>) { assert!(self.sched.is_none()); self.sched = Some(sched); self.put(); } pub fn put_task(&mut self, task: Box<Task>) { assert!(self.task.is_none()); self.task = Some(task); } pub fn swap(mut self: Box<GreenTask>) -> Box<Task> { let mut task = self.task.take().unwrap(); task.put_runtime(self); return task; } pub fn put(self: Box<GreenTask>) { assert!(self.sched.is_some()); Local::put(self.swap()); } fn terminate(mut self: Box<GreenTask>) -> ! { let sched = self.sched.take().unwrap(); sched.terminate_current_task(self) } // This function is used to remotely wakeup this green task back on to its // original pool of schedulers. In order to do so, each tasks arranges a // SchedHandle upon descheduling to be available for sending itself back to // the original pool. // // Note that there is an interesting transfer of ownership going on here. We // must relinquish ownership of the green task, but then also send the task // over the handle back to the original scheduler. In order to safely do // this, we leverage the already-present "nasty descheduling lock". The // reason for doing this is that each task will bounce on this lock after // resuming after a context switch. By holding the lock over the enqueueing // of the task, we're guaranteed that the SchedHandle's memory will be valid // for this entire function. // // An alternative would include having incredibly cheaply cloneable handles, // but right now a SchedHandle is something like 6 allocations, so it is // *not* a cheap operation to clone a handle. Until the day comes that we // need to optimize this, a lock should do just fine (it's completely // uncontended except for when the task is rescheduled). fn reawaken_remotely(mut self: Box<GreenTask>) { unsafe { let mtx = &mut self.nasty_deschedule_lock as *mut NativeMutex; let handle = self.handle.as_mut().unwrap() as *mut SchedHandle; let _guard = (*mtx).lock(); (*handle).send(RunOnce(self)); } } } impl Runtime for GreenTask { fn yield_now(mut self: Box<GreenTask>, cur_task: Box<Task>) { self.put_task(cur_task); let sched = self.sched.take().unwrap(); sched.yield_now(self); } fn maybe_yield(mut self: Box<GreenTask>, cur_task: Box<Task>) { self.put_task(cur_task); let sched = self.sched.take().unwrap(); sched.maybe_yield(self); } fn deschedule(mut self: Box<GreenTask>, times: uint, cur_task: Box<Task>, f: |BlockedTask| -> Result<(), BlockedTask>) { self.put_task(cur_task); let mut sched = self.sched.take().unwrap(); // In order for this task to be reawoken in all possible contexts, we // may need a handle back in to the current scheduler. When we're woken // up in anything other than the local scheduler pool, this handle is // used to send this task back into the scheduler pool. if self.handle.is_none() { self.handle = Some(sched.make_handle()); self.pool_id = sched.pool_id; } // This code is pretty standard, except for the usage of // `GreenTask::convert`. Right now if we use `reawaken` directly it will // expect for there to be a task in local TLS, but that is not true for // this deschedule block (because the scheduler must retain ownership of // the task while the cleanup job is running). In order to get around // this for now, we invoke the scheduler directly with the converted // Task => GreenTask structure. if times == 1 { sched.deschedule_running_task_and_then(self, |sched, task| { match f(task) { Ok(()) => {} Err(t) => { t.wake().map(|t| { sched.enqueue_task(GreenTask::convert(t)) }); } } }); } else { sched.deschedule_running_task_and_then(self, |sched, task| { for task in task.make_selectable(times) { match f(task) { Ok(()) => {}, Err(task) => { task.wake().map(|t| { sched.enqueue_task(GreenTask::convert(t)) }); break } } } }); } } fn reawaken(mut self: Box<GreenTask>, to_wake: Box<Task>) { self.put_task(to_wake); assert!(self.sched.is_none()); // Optimistically look for a local task, but if one's not available to // inspect (in order to see if it's in the same sched pool as we are), // then just use our remote wakeup routine and carry on! let mut running_task: Box<Task> = match Local::try_take() { Some(task) => task, None => return self.reawaken_remotely() }; // Waking up a green thread is a bit of a tricky situation. We have no // guarantee about where the current task is running. The options we // have for where this current task is running are: // // 1. Our original scheduler pool // 2. Some other scheduler pool // 3. Something that isn't a scheduler pool // // In order to figure out what case we're in, this is the reason that // the `maybe_take_runtime` function exists. Using this function we can // dynamically check to see which of these cases is the current // situation and then dispatch accordingly. // // In case 1, we just use the local scheduler to resume ourselves // immediately (if a rescheduling is possible). // // In case 2 and 3, we need to remotely reawaken ourself in order to be // transplanted back to the correct scheduler pool. match running_task.maybe_take_runtime::<GreenTask>() { Some(mut running_green_task) => { running_green_task.put_task(running_task); let sched = running_green_task.sched.take().unwrap(); if sched.pool_id == self.pool_id { sched.run_task(running_green_task, self); } else { self.reawaken_remotely(); // put that thing back where it came from! running_green_task.put_with_sched(sched); } } None => { self.reawaken_remotely(); Local::put(running_task); } } } fn spawn_sibling(mut self: Box<GreenTask>, cur_task: Box<Task>, opts: TaskOpts, f: proc():Send) { self.put_task(cur_task); self.put(); ::native::task::spawn_opts(opts, f) } // Local I/O is provided by the scheduler's event loop fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>> { Some(rtio::LocalIo::new(&mut self.io)) } fn stack_bounds(&self) -> (uint, uint) { let c = self.coroutine.as_ref() .expect("GreenTask.stack_bounds called without a coroutine"); // Don't return the red zone as part of the usable stack of this task, // it's essentially an implementation detail. (c.current_stack_segment.start() as uint + stack::RED_ZONE, c.current_stack_segment.end() as uint) } fn can_block(&self) -> bool { false } fn wrap(self: Box<GreenTask>) -> Box<Any + Send> { self as Box<Any + Send> } } #[cfg(test)] mod tests { use std::task; use std::rt::task::TaskOpts; use super::super::{PoolConfig, SchedPool}; fn spawn_opts(opts: TaskOpts, f: proc():Send) { let mut pool = SchedPool::new(PoolConfig { threads: 1, event_loop_factory: ::basic::event_loop, }); pool.spawn(opts, f); pool.shutdown(); } #[test] fn smoke() { let (tx, rx) = channel(); spawn_opts(TaskOpts::new(), proc() { tx.send(()); }); rx.recv(); } #[test] fn smoke_fail() { let (tx, rx) = channel::<int>(); spawn_opts(TaskOpts::new(), proc() { let _tx = tx; fail!() }); assert_eq!(rx.recv_opt(), Err(())); } #[test] fn smoke_opts() { let mut opts = TaskOpts::new(); opts.name = Some("test".into_maybe_owned()); opts.stack_size = Some(20 * 4096); let (tx, rx) = channel(); opts.on_exit = Some(proc(r) tx.send(r)); spawn_opts(opts, proc() {}); assert!(rx.recv().is_ok()); } #[test] fn smoke_opts_fail() { let mut opts = TaskOpts::new(); let (tx, rx) = channel(); opts.on_exit = Some(proc(r) tx.send(r)); spawn_opts(opts, proc() { fail!() }); assert!(rx.recv().is_err()); } #[test] fn yield_test() { let (tx, rx) = channel(); spawn_opts(TaskOpts::new(), proc() { for _ in range(0u, 10) { task::deschedule(); } tx.send(()); }); rx.recv(); } #[test] fn spawn_children() { let (tx1, rx) = channel(); spawn_opts(TaskOpts::new(), proc() { let (tx2, rx) = channel(); spawn(proc() { let (tx3, rx) = channel(); spawn(proc() { tx3.send(()); }); rx.recv(); tx2.send(()); }); rx.recv(); tx1.send(()); }); rx.recv(); } }
36.056291
81
0.584764
e8073d985df35f9e5af214c206103aa37081b4cb
949
use uart_16550::SerialPort; use spin::Mutex; use lazy_static::lazy_static; lazy_static! { pub static ref SERIAL1: Mutex<SerialPort> = { let mut serial_port = unsafe { SerialPort::new(0x3F8) }; serial_port.init(); Mutex::new(serial_port) }; } #[doc(hidden)] pub fn _print(args: ::core::fmt::Arguments) { use core::fmt::Write; SERIAL1.lock().write_fmt(args).expect("Printing to serial failed"); } /// Prints to the host through the serial interface. #[macro_export] macro_rules! serial_print { ($($arg:tt)*) => { $crate::serial::_print(format_args!($($arg)*)); }; } /// Prints to the host through the serial interface, appending a newline. #[macro_export] macro_rules! serial_println { () => ($crate::serial_print!("\n")); ($fmt:expr) => ($crate::serial_print!(concat!($fmt, "\n"))); ($fmt:expr, $($arg:tt)*) => ($crate::serial_print!( concat!($fmt, "\n"), $($arg)*)); }
27.114286
73
0.617492
5bdd02edd1524c861028457a435cb15cc4d15c74
6,236
use num::{NumAssignOps, NumOps, Zero}; use std::any::Any; use std::f64; use std::fmt::Debug; use std::ops::Neg; use crate::scalar::{ComplexField, Field, SubsetOf, SupersetOf}; use crate::simd::{SimdRealField, SimdValue}; /// Lane-wise generalisation of `ComplexField` for SIMD complex fields. /// /// Each lane of an SIMD complex field should contain one complex field. #[allow(missing_docs)] pub trait SimdComplexField: SubsetOf<Self> + SupersetOf<f64> + Field + Clone + Neg<Output = Self> // + MeetSemilattice // + JoinSemilattice + Send + Sync + Any + 'static + Debug + NumAssignOps + NumOps + PartialEq { /// Type of the coefficients of a complex number. type SimdRealField: SimdRealField<SimdBool = <Self as SimdValue>::SimdBool>; complex_trait_methods!(SimdRealField, simd_); /// Computes the sum of all the lanes of `self`. fn simd_horizontal_sum(self) -> Self::Element; /// Computes the product of all the lanes of `self`. fn simd_horizontal_product(self) -> Self::Element; } // Blanket impl: ComplexField => SimdComplexField impl<T: ComplexField> SimdComplexField for T { type SimdRealField = T::RealField; #[inline(always)] fn from_simd_real(re: Self::SimdRealField) -> Self { Self::from_real(re) } #[inline(always)] fn simd_real(self) -> Self::SimdRealField { self.real() } #[inline(always)] fn simd_imaginary(self) -> Self::SimdRealField { self.imaginary() } #[inline(always)] fn simd_modulus(self) -> Self::SimdRealField { self.modulus() } #[inline(always)] fn simd_modulus_squared(self) -> Self::SimdRealField { self.modulus_squared() } #[inline(always)] fn simd_argument(self) -> Self::SimdRealField { self.argument() } #[inline(always)] fn simd_norm1(self) -> Self::SimdRealField { self.norm1() } #[inline(always)] fn simd_scale(self, factor: Self::SimdRealField) -> Self { self.scale(factor) } #[inline(always)] fn simd_unscale(self, factor: Self::SimdRealField) -> Self { self.unscale(factor) } #[inline(always)] fn simd_to_polar(self) -> (Self::SimdRealField, Self::SimdRealField) { self.to_polar() } #[inline(always)] fn simd_to_exp(self) -> (Self::SimdRealField, Self) { self.to_exp() } #[inline(always)] fn simd_signum(self) -> Self { self.signum() } #[inline(always)] fn simd_floor(self) -> Self { self.floor() } #[inline(always)] fn simd_ceil(self) -> Self { self.ceil() } #[inline(always)] fn simd_round(self) -> Self { self.round() } #[inline(always)] fn simd_trunc(self) -> Self { self.trunc() } #[inline(always)] fn simd_fract(self) -> Self { self.fract() } #[inline(always)] fn simd_mul_add(self, a: Self, b: Self) -> Self { self.mul_add(a, b) } #[inline(always)] fn simd_abs(self) -> Self::SimdRealField { self.abs() } #[inline(always)] fn simd_hypot(self, other: Self) -> Self::SimdRealField { self.hypot(other) } #[inline(always)] fn simd_recip(self) -> Self { self.recip() } #[inline(always)] fn simd_conjugate(self) -> Self { self.conjugate() } #[inline(always)] fn simd_sin(self) -> Self { self.sin() } #[inline(always)] fn simd_cos(self) -> Self { self.cos() } #[inline(always)] fn simd_sin_cos(self) -> (Self, Self) { self.sin_cos() } #[inline(always)] fn simd_sinh_cosh(self) -> (Self, Self) { self.sinh_cosh() } #[inline(always)] fn simd_tan(self) -> Self { self.tan() } #[inline(always)] fn simd_asin(self) -> Self { self.asin() } #[inline(always)] fn simd_acos(self) -> Self { self.acos() } #[inline(always)] fn simd_atan(self) -> Self { self.atan() } #[inline(always)] fn simd_sinh(self) -> Self { self.sinh() } #[inline(always)] fn simd_cosh(self) -> Self { self.cosh() } #[inline(always)] fn simd_tanh(self) -> Self { self.tanh() } #[inline(always)] fn simd_asinh(self) -> Self { self.asinh() } #[inline(always)] fn simd_acosh(self) -> Self { self.acosh() } #[inline(always)] fn simd_atanh(self) -> Self { self.atanh() } #[inline(always)] fn simd_sinc(self) -> Self { self.sinc() } #[inline(always)] fn simd_sinhc(self) -> Self { self.sinhc() } #[inline(always)] fn simd_cosc(self) -> Self { self.cosc() } #[inline(always)] fn simd_coshc(self) -> Self { self.coshc() } #[inline(always)] fn simd_log(self, base: Self::SimdRealField) -> Self { self.log(base) } #[inline(always)] fn simd_log2(self) -> Self { self.log2() } #[inline(always)] fn simd_log10(self) -> Self { self.log10() } #[inline(always)] fn simd_ln(self) -> Self { self.ln() } #[inline(always)] fn simd_ln_1p(self) -> Self { self.ln_1p() } #[inline(always)] fn simd_sqrt(self) -> Self { self.sqrt() } #[inline(always)] fn simd_exp(self) -> Self { self.exp() } #[inline(always)] fn simd_exp2(self) -> Self { self.exp2() } #[inline(always)] fn simd_exp_m1(self) -> Self { self.exp_m1() } #[inline(always)] fn simd_powi(self, n: i32) -> Self { self.powi(n) } #[inline(always)] fn simd_powf(self, n: Self::SimdRealField) -> Self { self.powf(n) } #[inline(always)] fn simd_powc(self, n: Self) -> Self { self.powc(n) } #[inline(always)] fn simd_cbrt(self) -> Self { self.cbrt() } #[inline(always)] fn simd_horizontal_sum(self) -> Self::Element { self } #[inline(always)] fn simd_horizontal_product(self) -> Self::Element { self } }
22.759124
80
0.551956
fecbfd99f98665fe40f17094fca4e8811e5c1c9e
1,362
//! # 205. 同构字符串 //! https://leetcode-cn.com/problems/isomorphic-strings/ //! 给定两个字符串 s 和 t,判断它们是否是同构的。 //! 如果 s 中的字符可以被替换得到 t ,那么这两个字符串是同构的。 //! 所有出现的字符都必须用另一个字符替换,同时保留字符的顺序。两个字符不能映射到同一个字符上,但字符可以映射自己本身。 //! # 解题思路 //! y=f(x) && x=f-1(y) pub struct Solution; impl Solution { pub fn is_isomorphic(s: String, t: String) -> bool { let mut chars_map = [None; 256]; let mut mapped = [false; 256]; for i in 0..s.len() { match chars_map[s.as_bytes()[i] as usize] { Some(v) => { if v != t.as_bytes()[i] { return false; } } None => { //不能存在相同的映射关系,比如"ab""aa" a->a b不能再映射a if mapped[t.as_bytes()[i] as usize] { return false; } chars_map[s.as_bytes()[i] as usize] = Some(t.as_bytes()[i]); mapped[t.as_bytes()[i] as usize] = true; } } } true } } #[cfg(test)] mod tests { #[test] fn it_works() { assert_eq!( super::Solution::is_isomorphic("aa".into(), "bb".into()), true ); assert_eq!( super::Solution::is_isomorphic("ab".into(), "ba".into()), true ); } }
27.795918
80
0.446402
f79f73516989f1597f4a55b425bd33009daddd66
7,266
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! The `Clone` trait for types that cannot be 'implicitly copied'. //! //! In Rust, some simple types are "implicitly copyable" and when you //! assign them or pass them as arguments, the receiver will get a copy, //! leaving the original value in place. These types do not require //! allocation to copy and do not have finalizers (i.e. they do not //! contain owned boxes or implement [`Drop`]), so the compiler considers //! them cheap and safe to copy. For other types copies must be made //! explicitly, by convention implementing the [`Clone`] trait and calling //! the [`clone`][clone] method. //! //! [`Clone`]: trait.Clone.html //! [clone]: trait.Clone.html#tymethod.clone //! [`Drop`]: ../../std/ops/trait.Drop.html //! //! Basic usage example: //! //! ``` //! let s = String::new(); // String type implements Clone //! let copy = s.clone(); // so we can clone it //! ``` //! //! To easily implement the Clone trait, you can also use //! `#[derive(Clone)]`. Example: //! //! ``` //! #[derive(Clone)] // we add the Clone trait to Morpheus struct //! struct Morpheus { //! blue_pill: f32, //! red_pill: i64, //! } //! //! fn main() { //! let f = Morpheus { blue_pill: 0.0, red_pill: 0 }; //! let copy = f.clone(); // and now we can clone it! //! } //! ``` #![stable(feature = "rust1", since = "1.0.0")] /// A common trait for the ability to explicitly duplicate an object. /// /// Differs from [`Copy`] in that [`Copy`] is implicit and extremely inexpensive, while /// `Clone` is always explicit and may or may not be expensive. In order to enforce /// these characteristics, Rust does not allow you to reimplement [`Copy`], but you /// may reimplement `Clone` and run arbitrary code. /// /// Since `Clone` is more general than [`Copy`], you can automatically make anything /// [`Copy`] be `Clone` as well. /// /// ## Derivable /// /// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d /// implementation of [`clone`] calls [`clone`] on each field. /// /// ## How can I implement `Clone`? /// /// Types that are [`Copy`] should have a trivial implementation of `Clone`. More formally: /// if `T: Copy`, `x: T`, and `y: &T`, then `let x = y.clone();` is equivalent to `let x = *y;`. /// Manual implementations should be careful to uphold this invariant; however, unsafe code /// must not rely on it to ensure memory safety. /// /// An example is an array holding more than 32 elements of a type that is `Clone`; the standard /// library only implements `Clone` up until arrays of size 32. In this case, the implementation of /// `Clone` cannot be `derive`d, but can be implemented as: /// /// [`Copy`]: ../../std/marker/trait.Copy.html /// [`clone`]: trait.Clone.html#tymethod.clone /// /// ``` /// #[derive(Copy)] /// struct Stats { /// frequencies: [i32; 100], /// } /// /// impl Clone for Stats { /// fn clone(&self) -> Stats { *self } /// } /// ``` /// /// ## Additional implementors /// /// In addition to the [implementors listed below][impls], /// the following types also implement `Clone`: /// /// * Function item types (i.e. the distinct types defined for each function) /// * Function pointer types (e.g. `fn() -> i32`) /// * Array types, for all sizes, if the item type also implements `Clone` (e.g. `[i32; 123456]`) /// * Tuple types, if each component also implements `Clone` (e.g. `()`, `(i32, bool)`) /// * Closure types, if they capture no value from the environment /// or if all such captured values implement `Clone` themselves. /// Note that variables captured by shared reference always implement `Clone` /// (even if the referent doesn't), /// while variables captured by mutable reference never implement `Clone`. /// /// [impls]: #implementors #[stable(feature = "rust1", since = "1.0.0")] #[lang = "clone"] pub trait Clone : Sized { /// Returns a copy of the value. /// /// # Examples /// /// ``` /// let hello = "Hello"; // &str implements Clone /// /// assert_eq!("Hello", hello.clone()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "cloning is often expensive and is not expected to have side effects"] fn clone(&self) -> Self; /// Performs copy-assignment from `source`. /// /// `a.clone_from(&b)` is equivalent to `a = b.clone()` in functionality, /// but can be overridden to reuse the resources of `a` to avoid unnecessary /// allocations. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn clone_from(&mut self, source: &Self) { *self = source.clone() } } // FIXME(aburka): these structs are used solely by #[derive] to // assert that every component of a type implements Clone or Copy. // // These structs should never appear in user code. #[doc(hidden)] #[allow(missing_debug_implementations)] #[unstable(feature = "derive_clone_copy", reason = "deriving hack, should not be public", issue = "0")] pub struct AssertParamIsClone<T: Clone + ?Sized> { _field: ::marker::PhantomData<T> } #[doc(hidden)] #[allow(missing_debug_implementations)] #[unstable(feature = "derive_clone_copy", reason = "deriving hack, should not be public", issue = "0")] pub struct AssertParamIsCopy<T: Copy + ?Sized> { _field: ::marker::PhantomData<T> } /// Implementations of `Clone` for primitive types. /// /// Implementations that cannot be described in Rust /// are implemented in `SelectionContext::copy_clone_conditions()` in librustc. #[cfg(not(stage0))] mod impls { use super::Clone; macro_rules! impl_clone { ($($t:ty)*) => { $( #[stable(feature = "rust1", since = "1.0.0")] impl Clone for $t { #[inline] fn clone(&self) -> Self { *self } } )* } } impl_clone! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 bool char } #[unstable(feature = "never_type", issue = "35121")] impl Clone for ! { #[inline] fn clone(&self) -> Self { *self } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Clone for *const T { #[inline] fn clone(&self) -> Self { *self } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: ?Sized> Clone for *mut T { #[inline] fn clone(&self) -> Self { *self } } // Shared references can be cloned, but mutable references *cannot*! #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: ?Sized> Clone for &'a T { #[inline] fn clone(&self) -> Self { *self } } }
33.638889
99
0.606799
e6d01302e3955b04e73afcfbac6b7b38b6259633
5,171
//! Test cases for `crate::threading` use quickcheck_macros::quickcheck; use std::{ sync::{ atomic::{AtomicBool, AtomicU32, Ordering}, Arc, }, thread::{sleep, yield_now}, time::{Duration, Instant}, }; use super::threading; #[test] fn unpark_external_thread() { let parent_thread = threading::current(); let f: &'static AtomicBool = Box::leak(Box::new(AtomicBool::new(false))); threading::spawn(move || { f.store(true, Ordering::Relaxed); // `parent_thread` wasn't created by `threading::spawn`, but this // should succeed parent_thread.unpark(); }); threading::park(); assert!(f.load(Ordering::Relaxed)); } #[test] fn park_early() { let parent_thread = threading::current(); let f: &'static AtomicBool = Box::leak(Box::new(AtomicBool::new(false))); let jh = threading::spawn(move || { threading::park(); assert!(f.load(Ordering::Relaxed)); // Wake up the parent thread, signifying success parent_thread.unpark(); }); sleep(Duration::from_millis(100)); f.store(true, Ordering::Relaxed); // Wake up the sleeping child thread jh.thread().unpark(); threading::park(); } #[test] fn park_late() { let parent_thread = threading::current(); let f: &'static AtomicBool = Box::leak(Box::new(AtomicBool::new(false))); let jh = threading::spawn(move || { sleep(Duration::from_millis(100)); threading::park(); assert!(f.load(Ordering::Relaxed)); // Wake up the parent thread, signifying success parent_thread.unpark(); }); f.store(true, Ordering::Relaxed); // Wake up the child thread, which probably hasn't yet parked jh.thread().unpark(); threading::park(); } #[test] fn remote_park_properties() { let parent_thread = threading::current(); let done: &_ = Box::leak(Box::new(AtomicBool::new(false))); let exit: &_ = Box::leak(Box::new(AtomicBool::new(false))); let counter: &_ = Box::leak(Box::new(AtomicU32::new(0))); let jh = threading::spawn(move || { while !exit.load(Ordering::Relaxed) { counter.fetch_add(1, Ordering::Relaxed); } done.store(true, Ordering::Relaxed); // Wake up the parent thread, signifying success parent_thread.unpark(); }); sleep(Duration::from_millis(200)); // Suspend and resume the child thread in a rapid succession for _ in 0..1000 { jh.thread().park(); jh.thread().unpark(); } // Park a lot for _ in 0..1000 { jh.thread().park(); } for _ in 0..1000 { jh.thread().unpark(); } // Check that the child thread is running let i1 = counter.load(Ordering::Relaxed); yield_now(); sleep(Duration::from_millis(200)); yield_now(); let i2 = counter.load(Ordering::Relaxed); assert_ne!(i1, i2); for _ in 0..1000 { // Suspend the child thread jh.thread().park(); // Check that the child thread is not running let i1 = counter.load(Ordering::Relaxed); yield_now(); let i2 = counter.load(Ordering::Relaxed); assert_eq!(i1, i2); // Resume the child thread jh.thread().unpark(); // Check that the child thread is running let i1 = counter.load(Ordering::Relaxed); let start = Instant::now(); let i2 = loop { yield_now(); let i2 = counter.load(Ordering::Relaxed); if i1 != i2 || start.elapsed() > Duration::from_millis(20000) { break i2; } }; assert_ne!(i1, i2); // This should be no-op jh.thread().unpark(); // Make a token available jh.thread().park(); // Immediately consume that token } // Stop the child thread (this should work assuming that the child thread // is still running) exit.store(true, Ordering::Relaxed); // Wait for the child thread to exit threading::park(); assert!(done.load(Ordering::Relaxed)); } #[quickcheck] fn qc_remote_park_accumulation(ops: Vec<u8>) { let parent_thread = threading::current(); let done = Arc::new(AtomicBool::new(false)); let exit = Arc::new(AtomicBool::new(false)); let done2 = Arc::clone(&done); let exit2 = Arc::clone(&exit); let jh = threading::spawn(move || { while !exit2.load(Ordering::Relaxed) {} done2.store(true, Ordering::Relaxed); // Wake up the parent thread, signifying success parent_thread.unpark(); }); let mut park_level = 0; for op in ops { if park_level < 0 || (op & 1 == 0) { park_level += 1; jh.thread().park(); } else { park_level -= 1; jh.thread().unpark(); } } for _ in 0..park_level { jh.thread().unpark(); } // Stop the child thread (this should work assuming that the child thread // is still running) exit.store(true, Ordering::Relaxed); // Wait for the child thread to exit threading::park(); assert!(done.load(Ordering::Relaxed)); }
26.792746
77
0.584606
ac8c0c283ef844c91ae9bf4a1670ef7e8f32bab0
546
#![feature(const_generics)] #![feature(const_evaluatable_checked)] #![feature(const_fn)] trait MiniTypeId { const TYPE_ID: u64; } impl<T> MiniTypeId for T { const TYPE_ID: u64 = 0; } enum Lift<const V: bool> {} trait IsFalse {} impl IsFalse for Lift<false> {} const fn is_same_type<T: MiniTypeId, U: MiniTypeId>() -> bool { T::TYPE_ID == U::TYPE_ID } fn requires_distinct<A, B>(_a: A, _b: B) where A: MiniTypeId, B: MiniTypeId, Lift<{is_same_type::<A, B>()}>: IsFalse {} fn main() { requires_distinct("str", 12); }
18.827586
63
0.648352
16f852040b91cc793d46227b040c30be00ed1e7e
974
use proc_macro2::*; use syn; use util::*; pub fn derive(mut item: syn::DeriveInput) -> Result<TokenStream, Diagnostic> { let type_params = item .generics .type_params() .map(|param| param.ident.clone()) .collect::<Vec<_>>(); for type_param in type_params { let where_clause = item.generics.make_where_clause(); where_clause .predicates .push(parse_quote!(#type_param: NonAggregate)); } let (impl_generics, ty_generics, where_clause) = item.generics.split_for_impl(); let struct_name = &item.ident; let dummy_mod = format!("_impl_non_aggregate_for_{}", item.ident).to_lowercase(); Ok(wrap_in_dummy_mod( Ident::new(&dummy_mod, Span::call_site()), quote! { use diesel::expression::NonAggregate; impl #impl_generics NonAggregate for #struct_name #ty_generics #where_clause { } }, )) }
27.828571
85
0.603696
6707090e8a52a9907ec93970ac4f318282beecc7
4,723
use crate::prelude::*; use crate::headers::from_headers::*; use azure_core::headers::{ content_type_from_headers, etag_from_headers, session_token_from_headers, }; use azure_core::{collect_pinned_stream, Context, Response as HttpResponse}; use chrono::{DateTime, Utc}; #[derive(Debug, Clone)] pub struct GetCollectionBuilder { client: CollectionClient, consistency_level: Option<ConsistencyLevel>, context: Context, } impl GetCollectionBuilder { pub(crate) fn new(client: CollectionClient) -> Self { Self { client, consistency_level: None, context: Context::new(), } } setters! { consistency_level: ConsistencyLevel => Some(consistency_level), context: Context => context, } pub fn into_future(self) -> GetCollection { Box::pin(async move { let mut request = self .client .prepare_request_with_collection_name(http::Method::GET); azure_core::headers::add_optional_header2(&self.consistency_level, &mut request)?; let response = self .client .pipeline() .send( self.context.clone().insert(ResourceType::Collections), &mut request, ) .await?; GetCollectionResponse::try_from(response).await }) } } /// The future returned by calling `into_future` on the builder. pub type GetCollection = futures::future::BoxFuture<'static, crate::Result<GetCollectionResponse>>; #[cfg(feature = "into_future")] impl std::future::IntoFuture for GetCollectionBuilder { type Future = GetCollection; type Output = <GetCollection as std::future::Future>::Output; fn into_future(self) -> Self::Future { Self::into_future(self) } } #[derive(Debug, Clone)] pub struct GetCollectionResponse { pub collection: Collection, pub last_state_change: DateTime<Utc>, pub etag: String, pub collection_partition_index: u64, pub collection_service_index: u64, pub lsn: u64, pub schema_version: String, pub alt_content_path: String, pub content_path: String, pub global_committed_lsn: u64, pub number_of_read_regions: u32, pub item_lsn: u64, pub transport_request_id: u64, pub cosmos_llsn: u64, pub cosmos_item_llsn: u64, pub charge: f64, pub service_version: String, pub activity_id: uuid::Uuid, pub session_token: String, pub gateway_version: String, pub server: String, pub xp_role: u32, pub content_type: String, pub content_location: String, pub date: DateTime<Utc>, } impl GetCollectionResponse { pub async fn try_from(response: HttpResponse) -> crate::Result<Self> { let (_status_code, headers, pinned_stream) = response.deconstruct(); let body = collect_pinned_stream(pinned_stream).await?; Ok(Self { collection: serde_json::from_slice(&body)?, last_state_change: last_state_change_from_headers(&headers)?, etag: etag_from_headers(&headers)?, collection_partition_index: collection_partition_index_from_headers(&headers)?, collection_service_index: collection_service_index_from_headers(&headers)?, lsn: lsn_from_headers(&headers)?, schema_version: schema_version_from_headers(&headers)?.to_owned(), alt_content_path: alt_content_path_from_headers(&headers)?.to_owned(), content_path: content_path_from_headers(&headers)?.to_owned(), global_committed_lsn: global_committed_lsn_from_headers(&headers)?, number_of_read_regions: number_of_read_regions_from_headers(&headers)?, item_lsn: item_lsn_from_headers(&headers)?, transport_request_id: transport_request_id_from_headers(&headers)?, cosmos_llsn: cosmos_llsn_from_headers(&headers)?, cosmos_item_llsn: cosmos_item_llsn_from_headers(&headers)?, charge: request_charge_from_headers(&headers)?, service_version: service_version_from_headers(&headers)?.to_owned(), activity_id: activity_id_from_headers(&headers)?, session_token: session_token_from_headers(&headers)?, gateway_version: gateway_version_from_headers(&headers)?.to_owned(), server: server_from_headers(&headers)?.to_owned(), xp_role: role_from_headers(&headers)?, content_type: content_type_from_headers(&headers)?.to_owned(), content_location: content_location_from_headers(&headers)?.to_owned(), date: date_from_headers(&headers)?, }) } }
36.898438
99
0.66589
1cdab36d941dee0ad37fadefeb10e88e08c132ed
895
// Copyright 2016 lazy-static.rs Developers // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. extern crate spin; use self::spin::Once; pub struct Lazy<T: Sync>(Once<T>); impl<T: Sync> Lazy<T> { #[inline(always)] pub const fn new() -> Self { Lazy(Once::new()) } #[inline(always)] pub fn get<F>(&'static self, builder: F) -> &T where F: FnOnce() -> T { self.0.call_once(builder) } } #[macro_export] // #[allow_internal_unstable] #[doc(hidden)] macro_rules! __lazy_static_create { ($NAME:ident, $T:ty) => { static $NAME: $crate::lazy::Lazy<$T> = $crate::lazy::Lazy::new(); } }
24.861111
77
0.62905
39709f3c438280608680e9756110dd60737255fc
23,614
// "asi_vulkan" - Aldaron's System Interface - Vulkan // // Copyright Jeron A. Lau 2018. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // https://www.boost.org/LICENSE_1_0.txt) #[macro_use] extern crate dl_api; extern crate awi; extern crate euler; // Modules pub mod types; mod memory; mod image; mod gpu; mod sprite; mod style; pub mod fence; // use std::{ mem, u64 }; use std::ptr::{ null, null_mut }; use std::os::raw::c_void; // Export Types pub use self::memory::{ Memory, Buffer, BufferBuilderType }; pub use self::image::Image; pub use self::sprite::Sprite; pub use self::style::Style; pub use self::fence::Fence; pub use self::gpu::Gpu; pub use euler::Vec3; // use self::types::*; const VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT: VkFlags = 0x00000002; const VK_MEMORY_PROPERTY_HOST_COHERENT_BIT: VkFlags = 0x00000004; const VK_SAMPLE_COUNT: VkSampleCount = VkSampleCount::Sc8; // TODO #[derive(Clone)] #[repr(C)] pub struct TransformUniform { pub mat4: [f32; 16], } // TODO #[derive(Clone)] #[repr(C)] pub struct FogUniform { pub fogc: [f32; 4], pub fogr: [f32; 2], } pub unsafe fn queue_present(connection: &Gpu, next: u32) { let connection = connection.get(); let present_info = VkPresentInfo { s_type: VkStructureType::PresentInfo, next: null(), wait_semaphore_count: 0, wait_semaphores: null(), swapchain_count: 1, swapchains: &connection.swapchain, image_indices: &next, results: null_mut(), }; (connection.queue_present)(connection.present_queue, &present_info) .unwrap() } pub unsafe fn wait_idle(connection: &Gpu) { let connection = connection.get(); (connection.wait_idle)(connection.device).unwrap(); } pub unsafe fn subres_layout(connection: &Gpu, image: &Image) -> VkSubresourceLayout { let connection = connection.get(); let mut layout = mem::uninitialized(); (connection.subres_layout)( connection.device, image.image().0, &VkImageSubresource { aspect_mask: VkImageAspectFlags::Color, mip_level: 0, array_layer: 0, }, &mut layout ); layout } pub unsafe fn map_memory<T>(connection: &Gpu, vb_memory: VkDeviceMemory, size: u64) -> *mut T where T: Clone { let connection = connection.get(); let mut mapped = mem::uninitialized(); (connection.mapmem)(connection.device, vb_memory, 0, size, 0, &mut mapped as *mut *mut _ as *mut *mut c_void).unwrap(); mapped } pub unsafe fn unmap_memory(connection: &Gpu, vb_memory: VkDeviceMemory) { let connection = connection.get(); (connection.unmap)(connection.device, vb_memory); } pub unsafe fn get_memory_type(connection: &Gpu, mut type_bits: u32, reqs_mask: VkFlags) -> u32 { let connection = connection.get(); let mut props = mem::uninitialized(); // TODO; only needs to happen once (connection.get_memprops)(connection.gpu, &mut props); for i in 0..(props.memory_type_count as usize) { // Memory type req's matches vkGetImageMemoryRequirements()? if (type_bits & 1) == 1 && (props.memory_types[i].property_flags & reqs_mask) == reqs_mask { return i as u32; } // Check next bit from vkGetImageMemoryRequirements(). type_bits >>= 1; } // Nothing works, panic panic!("Vulkan couldn't find suitable memory type!") } pub unsafe fn cmd_bind_descsets(connection: &Gpu, pipeline_layout: VkPipelineLayout, desc_set: VkDescriptorSet) { let connection = connection.get(); (connection.bind_descsets)( connection.command_buffer, VkPipelineBindPoint::Graphics, pipeline_layout, 0, 1, [desc_set].as_ptr(), 0, null(), ); } pub unsafe fn cmd_bind_pipeline(connection: &Gpu, pipeline: VkPipeline) { let connection = connection.get(); (connection.bind_pipeline)( connection.command_buffer, VkPipelineBindPoint::Graphics, pipeline ); } #[inline(always)] pub unsafe fn cmd_bind_vb(connection: &Gpu, vertex_buffers: &[VkBuffer]) { let connection = connection.get(); let offsets1 : [u64; 1] = [0]; let offsets2 : [u64; 2] = [0, 0]; let offsets3 : [u64; 3] = [0, 0, 0]; let length = vertex_buffers.len(); (connection.bind_vb)( connection.command_buffer, 0, length as u32, vertex_buffers.as_ptr(), match length { 1 => offsets1.as_ptr(), 2 => offsets2.as_ptr(), 3 => offsets3.as_ptr(), _ => panic!("Wrong number of vertex buffers (Not 1-3)"), }, ); } pub unsafe fn cmd_draw(connection: &Gpu, nvertices: u32, ninstances: u32, firstvertex: u32, firstinstance: u32) { let connection = connection.get(); debug_assert!(nvertices > 2); (connection.draw)(connection.command_buffer, nvertices, ninstances, firstvertex, firstinstance); } pub unsafe fn new_semaphore(connection: &Gpu) -> VkSemaphore { let connection = connection.get(); let mut semaphore = mem::uninitialized(); (connection.new_semaphore)( connection.device, &VkSemaphoreCreateInfo { s_type: VkStructureType::SemaphoreCreateInfo, next: null(), flags: 0, }, null(), &mut semaphore, ).unwrap(); semaphore } pub unsafe fn drop_semaphore(connection: &Gpu, semaphore: VkSemaphore) { let connection = connection.get(); (connection.drop_semaphore)( connection.device, semaphore, null(), ); } pub unsafe fn draw_begin(connection: &Gpu, render_pass: VkRenderPass, image: VkImage, frame_buffer: VkFramebuffer) { let connection = connection.get(); let begin_info = VkCommandBufferBeginInfo { s_type: VkStructureType::CommandBufferBeginInfo, p_next: null(), flags: VkCommandBufferUsage::OneTimeSubmitBit, p_inheritance_info: null(), }; (connection.begin_cmdbuff)(connection.command_buffer, &begin_info) .unwrap(); let layout_transition_barrier = VkImageMemoryBarrier { s_type: VkStructureType::ImageMemoryBarrier, p_next: null(), src_access_mask: VkAccess::MemoryReadBit, dst_access_mask: VkAccess::ColorAttachmentReadWrite, old_layout: VkImageLayout::PresentSrc, new_layout: VkImageLayout::ColorAttachmentOptimal, src_queue_family_index: !0, dst_queue_family_index: !0, image, subresource_range: VkImageSubresourceRange { aspect_mask: VkImageAspectFlags::Color, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }, }; (connection.pipeline_barrier)( connection.command_buffer, VkPipelineStage::TopOfPipe, VkPipelineStage::TopOfPipeAndColorAttachmentOutput, 0, 0, null(), 0, null(), 1, &layout_transition_barrier); let rgb = connection.rgb; // activate render pass: let clear_value = [ VkClearValue { color: VkClearColorValue { float32: [rgb.x, rgb.y, rgb.z, 1.0] } }, VkClearValue { depth_stencil: VkClearDepthStencilValue { depth: 1.0, stencil: 0 } }, ]; let render_pass_begin_info = VkRenderPassBeginInfo { s_type: VkStructureType::RenderPassBeginInfo, p_next: null(), render_pass: render_pass, framebuffer: frame_buffer, render_area: VkRect2D { offset: VkOffset2D { x: 0, y: 0 }, extent: connection.extent, }, clear_value_count: clear_value.len() as u32, p_clear_values: clear_value.as_ptr(), }; (connection.begin_render)( connection.command_buffer, &render_pass_begin_info, VkSubpassContents::Inline ); dynamic_state(&connection, connection.command_buffer); } pub unsafe fn end_render_pass(connection: &Gpu) { let connection = connection.get(); (connection.end_render_pass)(connection.command_buffer); } /// Update the dynamic state (resize viewport). unsafe fn dynamic_state(connection: &gpu::GpuContext, command_buffer: VkCommandBuffer) { (connection.set_viewport)(command_buffer, 0, 1, &VkViewport { x: 0.0, y: 0.0, width: connection.extent.width as f32, height: connection.extent.height as f32, min_depth: 0.0, max_depth: 1.0, }); (connection.set_scissor)(command_buffer, 0, 1, &VkRect2D { offset: VkOffset2D { x: 0, y: 0 }, extent: connection.extent, }); } pub unsafe fn pipeline_barrier(connection: &Gpu, image: VkImage) { let connection = connection.get(); let barrier = VkImageMemoryBarrier { s_type: VkStructureType::ImageMemoryBarrier, p_next: null(), src_access_mask: VkAccess::ColorAttachmentWriteBit, dst_access_mask: VkAccess::MemoryReadBit, old_layout: VkImageLayout::Undefined, // ColorAttachmentOptimal, new_layout: VkImageLayout::PresentSrc, src_queue_family_index: !0, dst_queue_family_index: !0, image: image, subresource_range: VkImageSubresourceRange { aspect_mask: VkImageAspectFlags::Color, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }, }; (connection.pipeline_barrier)( connection.command_buffer, VkPipelineStage::AllCommands, VkPipelineStage::BottomOfPipe, 0, 0, null(), 0, null(), 1, &barrier); } pub unsafe fn get_next_image(vulkan: &Gpu, fence: VkFence) -> u32 { let mut image_id = mem::uninitialized(); match (vulkan.get().get_next_image)( vulkan.get().device, vulkan.get().swapchain, u64::MAX, 0 /* no semaphore */, fence, &mut image_id, ) { VkResult::Success => { /* nothing */ } VkResult::OutOfDate => { println!("Oof"); return get_next_image(vulkan, fence); } a => { println!("{}", a); a.unwrap() }, }; image_id } pub unsafe fn get_buffering(connection: &Gpu) -> u32 { // Set Data let connection = connection.get(); let mut surface_info = mem::uninitialized(); // Run Function (connection.get_surface_capabilities)(connection.gpu, connection.surface, &mut surface_info).unwrap(); // Use minimum number of buffers. assert!(surface_info.min_image_count <= 2); surface_info.min_image_count } #[inline(always)] pub unsafe fn copy_image(connection: &Gpu, src_image: &Image, dst_image: &Image, width: u16, height: u16) { let connection = connection.get(); (connection.copy_image)( connection.command_buffer, src_image.image().0, VkImageLayout::TransferSrcOptimal, dst_image.image().0, VkImageLayout::TransferDstOptimal, 1, &VkImageCopy { src_subresource: VkImageSubresourceLayers { aspect_mask: VkImageAspectFlags::Color, mip_level: 0, base_array_layer: 0, layer_count: 1, }, src_offset: VkOffset3D { x: 0, y: 0, z: 0 }, dst_subresource: VkImageSubresourceLayers { aspect_mask: VkImageAspectFlags::Color, mip_level: 0, base_array_layer: 0, layer_count: 1, }, dst_offset: VkOffset3D { x: 0, y: 0, z: 0 }, extent: VkExtent3D { width: width as u32, height: height as u32, depth: 1 }, } ); } #[inline(always)] pub unsafe fn create_swapchain( connection: &Gpu, image_count: &mut u32, swap_images: *mut VkImage) { let mut connection = connection.get_mut(); let surface = connection.surface; let mut surface_info = mem::uninitialized(); (connection.get_surface_capabilities)(connection.gpu, connection.surface, &mut surface_info).unwrap(); // Update extent. connection.extent = surface_info.max_image_extent; (connection.new_swapchain)( connection.device, &VkSwapchainCreateInfoKHR { s_type: VkStructureType::SwapchainCreateInfo, p_next: null(), flags: 0, surface, min_image_count: *image_count, image_format: connection.format.clone(), image_color_space: VkColorSpaceKHR::SrgbNonlinearKhr, image_extent: connection.extent, image_array_layers: 1, image_usage: VkImageUsage::ColorAttachmentBit, image_sharing_mode: VkSharingMode::Exclusive, pre_transform: VkSurfaceTransformFlagBitsKHR::Identity, composite_alpha: VkCompositeAlphaFlagBitsKHR::Opaque, present_mode: VkPresentModeKHR::Fifo, clipped: 1/*do the clipping rendering optimization*/, old_swapchain: mem::zeroed(), // vulkan->swapchain, queue_family_index_count: 0, p_queue_family_indices: null(), }, null(), &mut connection.swapchain ).unwrap(); (connection.get_swapcount)(connection.device, connection.swapchain, image_count, null_mut()).unwrap(); (connection.get_swapcount)(connection.device, connection.swapchain, image_count, swap_images).unwrap(); } unsafe fn create_img_view(connection: &Gpu, image: VkImage, format: VkFormat, has_color: bool) -> VkImageView { let connection = connection.get(); let mut image_view = mem::uninitialized(); let (components, aspect_mask) = if has_color { ( VkComponentMapping { r: VkComponentSwizzle::R, g: VkComponentSwizzle::G, b: VkComponentSwizzle::B, a: VkComponentSwizzle::A, }, VkImageAspectFlags::Color ) } else { ( VkComponentMapping { r: VkComponentSwizzle::Identity, g: VkComponentSwizzle::Identity, b: VkComponentSwizzle::Identity, a: VkComponentSwizzle::Identity, }, VkImageAspectFlags::Depth ) }; (connection.create_imgview)( connection.device, &VkImageViewCreateInfo { s_type: VkStructureType::ImageViewCreateInfo, p_next: null(), flags: 0, view_type: VkImageViewType::SingleLayer2d, format: format.clone(), components, subresource_range: VkImageSubresourceRange { aspect_mask, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }, image, }, null(), &mut image_view ).unwrap(); image_view } pub unsafe fn end_cmdbuff(connection: &Gpu) { let connection = connection.get(); (connection.end_cmdbuff)(connection.command_buffer).unwrap(); } pub unsafe fn queue_submit(connection: &Gpu, submit_fence: &Fence, pipelane_stage: VkPipelineStage, semaphore: Option<VkSemaphore>) { let connection = connection.get(); (connection.queue_submit)( connection.present_queue, 1, &VkSubmitInfo { s_type: VkStructureType::SubmitInfo, p_next: null(), wait_semaphore_count: 0, wait_semaphores: null(), wait_dst_stage_mask: &pipelane_stage, command_buffer_count: 1, p_command_buffers: &connection.command_buffer, signal_semaphore_count: if semaphore.is_none() { 0 } else { 1 }, p_signal_semaphores: if let Some(ref sem) = semaphore { sem } else { null() }, }, submit_fence.fence() ).unwrap(); } pub unsafe fn wait_fence(connection: &Gpu, fence: &Fence) { fence::wait(connection, fence.fence()); } #[inline(always)] pub unsafe fn create_image_view( vulkan: &Gpu, image_count: u32, swap_images: &mut [VkImage; 2], image_views: &mut [VkImageView; 2]) -> Fence { let submit_fence = Fence::new(vulkan); for i in 0..(image_count as usize) { (vulkan.get().begin_cmdbuff)( vulkan.get().command_buffer, &VkCommandBufferBeginInfo { s_type: VkStructureType::CommandBufferBeginInfo, p_next: null(), flags: VkCommandBufferUsage::OneTimeSubmitBit, p_inheritance_info: null(), } ).unwrap(); (vulkan.get().pipeline_barrier)( vulkan.get().command_buffer, VkPipelineStage::TopOfPipe, VkPipelineStage::TopOfPipe, 0, 0, null(), 0, null(), 1, &VkImageMemoryBarrier { s_type: VkStructureType::ImageMemoryBarrier, p_next: null(), src_access_mask: VkAccess::NoFlags, dst_access_mask: VkAccess::MemoryReadBit, old_layout: VkImageLayout::Undefined, new_layout: VkImageLayout::PresentSrc, src_queue_family_index: !0, dst_queue_family_index: !0, image: swap_images[i], subresource_range: VkImageSubresourceRange { aspect_mask: VkImageAspectFlags::Color, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }, } ); end_cmdbuff(vulkan); queue_submit(vulkan, &submit_fence, VkPipelineStage::ColorAttachmentOutput, None); wait_fence(vulkan, &submit_fence); (vulkan.get().reset_fence)(vulkan.get().device, 1, &submit_fence.fence()).unwrap(); (vulkan.get().reset_cmdbuff)(vulkan.get().command_buffer, 0); image_views[i] = create_img_view(vulkan, swap_images[i], vulkan.get().format.clone(), true); } submit_fence } #[inline(always)] pub unsafe fn create_ms_buffer(vulkan: &Gpu) -> Image { let extent = vulkan.get().extent; Image::new(vulkan, extent.width, extent.height, vulkan.get().format.clone(), VkImageTiling::Optimal, VkImageUsage::TransientColorAttachment, VkImageLayout::Undefined, 0, VK_SAMPLE_COUNT) } #[inline(always)] pub unsafe fn create_depth_buffer( vulkan: &Gpu, submit_fence: &Fence) -> Image { let extent = vulkan.get().extent; let image = Image::new(vulkan, extent.width, extent.height, VkFormat::D16Unorm, VkImageTiling::Optimal, VkImageUsage::DepthStencilAttachmentBit, VkImageLayout::Undefined, 0, VK_SAMPLE_COUNT); // before using this depth buffer we must change it's layout: (vulkan.get().begin_cmdbuff)( vulkan.get().command_buffer, &VkCommandBufferBeginInfo { s_type: VkStructureType::CommandBufferBeginInfo, p_next: null(), flags: VkCommandBufferUsage::OneTimeSubmitBit, p_inheritance_info: null(), } ).unwrap(); (vulkan.get().pipeline_barrier)( vulkan.get().command_buffer, VkPipelineStage::TopOfPipe, VkPipelineStage::TopOfPipeAndEarlyFragmentTests, 0, 0, null(), 0, null(), 1, &VkImageMemoryBarrier { s_type: VkStructureType::ImageMemoryBarrier, p_next: null(), src_access_mask: VkAccess::NoFlags, dst_access_mask: VkAccess::DepthStencilAttachmentReadWrite, old_layout: VkImageLayout::Undefined, new_layout: VkImageLayout::DepthStencilAttachmentOptimal, src_queue_family_index: !0, dst_queue_family_index: !0, image: image.image().0, subresource_range: VkImageSubresourceRange { aspect_mask: VkImageAspectFlags::Depth, base_mip_level: 0, level_count: 1, base_array_layer: 0, layer_count: 1, }, } ); end_cmdbuff(vulkan); queue_submit(vulkan, &submit_fence, VkPipelineStage::ColorAttachmentOutput, None); wait_fence(vulkan, &submit_fence); (vulkan.get().reset_fence)(vulkan.get().device, 1, &submit_fence.fence()).unwrap(); (vulkan.get().reset_cmdbuff)(vulkan.get().command_buffer, 0); image } #[inline(always)] pub unsafe fn create_render_pass(connection: &Gpu) -> VkRenderPass { let connection = connection.get(); let mut render_pass = mem::uninitialized(); (connection.new_renderpass)( connection.device, &VkRenderPassCreateInfo { s_type: VkStructureType::RenderPassCreateInfo, p_next: null(), flags: 0, attachment_count: 3, attachments: [ // Itermediary VkAttachmentDescription { flags: 0, format: connection.format.clone(), samples: VK_SAMPLE_COUNT, load_op: VkAttachmentLoadOp::Clear, store_op: VkAttachmentStoreOp::DontCare, stencil_load_op: VkAttachmentLoadOp::DontCare, stencil_store_op: VkAttachmentStoreOp::DontCare, initial_layout: VkImageLayout::Undefined, final_layout: VkImageLayout::ColorAttachmentOptimal, }, // Depth Buffer VkAttachmentDescription { flags: 0, format: VkFormat::D16Unorm, samples: VK_SAMPLE_COUNT, load_op: VkAttachmentLoadOp::Clear, store_op: VkAttachmentStoreOp::DontCare, stencil_load_op: VkAttachmentLoadOp::DontCare, stencil_store_op: VkAttachmentStoreOp::DontCare, initial_layout: VkImageLayout::DepthStencilAttachmentOptimal, final_layout: VkImageLayout::DepthStencilAttachmentOptimal, }, // Color Buffer VkAttachmentDescription { flags: 0, format: connection.format.clone(), samples: VkSampleCount::Sc1, load_op: VkAttachmentLoadOp::DontCare, store_op: VkAttachmentStoreOp::Store, stencil_load_op: VkAttachmentLoadOp::DontCare, stencil_store_op: VkAttachmentStoreOp::DontCare, initial_layout: VkImageLayout::Undefined, final_layout: VkImageLayout::PresentSrc, }, ].as_ptr(), subpass_count: 1, subpasses: &VkSubpassDescription { flags: 0, pipeline_bind_point: VkPipelineBindPoint::Graphics, color_attachment_count: 1, color_attachments: &VkAttachmentReference { attachment: 0, layout: VkImageLayout::ColorAttachmentOptimal, }, depth_stencil_attachment: &VkAttachmentReference { attachment: 1, layout: VkImageLayout::DepthStencilAttachmentOptimal, }, input_attachment_count: 0, input_attachments: null(), preserve_attachment_count: 0, preserve_attachments: null(), resolve_attachments: &VkAttachmentReference { attachment: 2, layout: VkImageLayout::PresentSrc, }, }, dependency_count: 1, dependencies: &VkSubpassDependency { src_subpass: !0, dst_subpass: 0, src_stage_mask: VkPipelineStage::ColorAttachmentOutput, dst_stage_mask: VkPipelineStage::ColorAttachmentOutput, src_access_mask: VkAccess::ColorAttachmentWriteBit, dst_access_mask: VkAccess::ColorAttachmentReadWrite, dependency_flags: 0, }, }, null(), &mut render_pass ).unwrap(); render_pass } #[inline(always)] pub unsafe fn create_framebuffers( connection: &Gpu, image_count: u32, render_pass: VkRenderPass, present_imgviews: &[VkImageView], multisample_img: &Image, depth_img: &Image, fbs: &mut[VkFramebuffer]) { let connection = connection.get(); // create a framebuffer per swap chain imageView: for i in 0..(image_count as usize) { (connection.create_framebuffer)( connection.device, &VkFramebufferCreateInfo { s_type: VkStructureType::FramebufferCreateInfo, p_next: null(), flags: 0, attachment_count: 3, attachments: [ multisample_img.image().2, depth_img.image().2, present_imgviews[i], ].as_ptr(), layers: 1, render_pass, width: connection.extent.width, height: connection.extent.height, }, null(), &mut fbs[i] ).unwrap(); } } #[inline(always)] pub unsafe fn destroy_swapchain( connection: &Gpu, frame_buffers: &[VkFramebuffer], present_imgviews: &[VkImageView], render_pass: VkRenderPass, image_count: u32) { let connection = connection.get(); let device = connection.device; // Free framebuffers & present image views for i in 0..(image_count as usize) { (connection.drop_framebuffer)(device, frame_buffers[i], null()); (connection.drop_imgview)(device, present_imgviews[i], null()); } // Free render pass (connection.drop_renderpass)(device, render_pass, null()); // Free swapchain (connection.drop_swapchain)(device, connection.swapchain, null()); } pub unsafe fn vw_camera_new(connection: &Gpu, fog_color: (f32, f32, f32, f32), range: (f32, f32)) -> (Memory<TransformUniform>, Memory<FogUniform>) { let ucamera_memory = Memory::new(connection, TransformUniform { mat4: [ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0 ], } ); let ueffect_memory = Memory::new(connection, FogUniform { fogc: [fog_color.0, fog_color.1, fog_color.2, fog_color.3], fogr: [range.0, range.1], } ); (ucamera_memory, ueffect_memory) } pub unsafe fn new_buffer(vulkan: &Gpu, vertices: &[f32]) -> Buffer { Buffer::new(vulkan, vertices, BufferBuilderType::Vertex) } pub struct ShaderModule( VkShaderModule, // TODO: Don't VkDevice, unsafe extern "system" fn(VkDevice, VkShaderModule, *const c_void) -> (), ); impl ShaderModule { /// Load a new shader module into memory. pub fn new(connection: &Gpu, spirv_shader: &[u8]) -> ShaderModule { let connection = connection.get(); let mut shader = unsafe { mem::uninitialized() }; unsafe { (connection.new_shademod)( connection.device, &VkShaderModuleCreateInfo { s_type: VkStructureType::ShaderModuleCreateInfo, next: null(), flags: 0, code_size: spirv_shader.len(), code: spirv_shader.as_ptr(), }, null(), &mut shader ).unwrap(); } ShaderModule(shader, connection.device, connection.drop_shademod) } } impl Drop for ShaderModule { fn drop(&mut self) -> () { unsafe { (self.2)(self.1, self.0, null()); } } }
25.723312
86
0.709537
72e5741cc113b76611e94dc8e6eea31fcc122cad
1,260
use crate::serde::SceneDeserializer; use anyhow::Result; use bevy_asset::{AssetLoader, LoadContext, LoadedAsset}; use bevy_ecs::world::{FromWorld, World}; use bevy_reflect::TypeRegistryArc; use bevy_utils::BoxedFuture; use serde::de::DeserializeSeed; #[derive(Debug)] pub struct SceneLoader { type_registry: TypeRegistryArc, } impl FromWorld for SceneLoader { fn from_world(world: &mut World) -> Self { let type_registry = world.get_resource::<TypeRegistryArc>().unwrap(); SceneLoader { type_registry: (&*type_registry).clone(), } } } impl AssetLoader for SceneLoader { fn load<'a>( &'a self, bytes: &'a [u8], load_context: &'a mut LoadContext, ) -> BoxedFuture<'a, Result<()>> { Box::pin(async move { let mut deserializer = ron::de::Deserializer::from_bytes(&bytes)?; let scene_deserializer = SceneDeserializer { type_registry: &*self.type_registry.read(), }; let scene = scene_deserializer.deserialize(&mut deserializer)?; load_context.set_default_asset(LoadedAsset::new(scene)); Ok(()) }) } fn extensions(&self) -> &[&str] { &["scn", "scn.ron"] } }
28.636364
78
0.612698
5d2256100ff67c9f9afdd386b78bae78338d8835
52,142
use crate::{LateContext, LateLintPass, LintContext}; use rustc_ast as ast; use rustc_attr as attr; use rustc_data_structures::fx::FxHashSet; use rustc_errors::Applicability; use rustc_hir as hir; use rustc_hir::{is_range_literal, ExprKind, Node}; use rustc_middle::ty::layout::{IntegerExt, SizeSkeleton}; use rustc_middle::ty::subst::SubstsRef; use rustc_middle::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; use rustc_span::source_map; use rustc_span::symbol::sym; use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::Abi; use rustc_target::abi::{Integer, LayoutOf, TagEncoding, Variants}; use rustc_target::spec::abi::Abi as SpecAbi; use std::cmp; use std::iter; use std::ops::ControlFlow; use tracing::debug; declare_lint! { /// The `unused_comparisons` lint detects comparisons made useless by /// limits of the types involved. /// /// ### Example /// /// ```rust /// fn foo(x: u8) { /// x >= 0; /// } /// ``` /// /// {{produces}} /// /// ### Explanation /// /// A useless comparison may indicate a mistake, and should be fixed or /// removed. UNUSED_COMPARISONS, Warn, "comparisons made useless by limits of the types involved" } declare_lint! { /// The `overflowing_literals` lint detects literal out of range for its /// type. /// /// ### Example /// /// ```rust,compile_fail /// let x: u8 = 1000; /// ``` /// /// {{produces}} /// /// ### Explanation /// /// It is usually a mistake to use a literal that overflows the type where /// it is used. Either use a literal that is within range, or change the /// type to be within the range of the literal. OVERFLOWING_LITERALS, Deny, "literal out of range for its type" } declare_lint! { /// The `variant_size_differences` lint detects enums with widely varying /// variant sizes. /// /// ### Example /// /// ```rust,compile_fail /// #![deny(variant_size_differences)] /// enum En { /// V0(u8), /// VBig([u8; 1024]), /// } /// ``` /// /// {{produces}} /// /// ### Explanation /// /// It can be a mistake to add a variant to an enum that is much larger /// than the other variants, bloating the overall size required for all /// variants. This can impact performance and memory usage. This is /// triggered if one variant is more than 3 times larger than the /// second-largest variant. /// /// Consider placing the large variant's contents on the heap (for example /// via [`Box`]) to keep the overall size of the enum itself down. /// /// This lint is "allow" by default because it can be noisy, and may not be /// an actual problem. Decisions about this should be guided with /// profiling and benchmarking. /// /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html VARIANT_SIZE_DIFFERENCES, Allow, "detects enums with widely varying variant sizes" } #[derive(Copy, Clone)] pub struct TypeLimits { /// Id of the last visited negated expression negated_expr_id: Option<hir::HirId>, } impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]); impl TypeLimits { pub fn new() -> TypeLimits { TypeLimits { negated_expr_id: None } } } /// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint. /// Returns `true` iff the lint was overridden. fn lint_overflowing_range_endpoint<'tcx>( cx: &LateContext<'tcx>, lit: &hir::Lit, lit_val: u128, max: u128, expr: &'tcx hir::Expr<'tcx>, parent_expr: &'tcx hir::Expr<'tcx>, ty: &str, ) -> bool { // We only want to handle exclusive (`..`) ranges, // which are represented as `ExprKind::Struct`. let mut overwritten = false; if let ExprKind::Struct(_, eps, _) = &parent_expr.kind { if eps.len() != 2 { return false; } // We can suggest using an inclusive range // (`..=`) instead only if it is the `end` that is // overflowing and only by 1. if eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max { cx.struct_span_lint(OVERFLOWING_LITERALS, parent_expr.span, |lint| { let mut err = lint.build(&format!("range endpoint is out of range for `{}`", ty)); if let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) { use ast::{LitIntType, LitKind}; // We need to preserve the literal's suffix, // as it may determine typing information. let suffix = match lit.node { LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(), LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(), LitKind::Int(_, LitIntType::Unsuffixed) => "", _ => bug!(), }; let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix); err.span_suggestion( parent_expr.span, &"use an inclusive range instead", suggestion, Applicability::MachineApplicable, ); err.emit(); overwritten = true; } }); } } overwritten } // For `isize` & `usize`, be conservative with the warnings, so that the // warnings are consistent between 32- and 64-bit platforms. fn int_ty_range(int_ty: ty::IntTy) -> (i128, i128) { match int_ty { ty::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()), ty::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()), ty::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()), ty::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()), ty::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()), ty::IntTy::I128 => (i128::MIN, i128::MAX), } } fn uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128) { let max = match uint_ty { ty::UintTy::Usize => u64::MAX.into(), ty::UintTy::U8 => u8::MAX.into(), ty::UintTy::U16 => u16::MAX.into(), ty::UintTy::U32 => u32::MAX.into(), ty::UintTy::U64 => u64::MAX.into(), ty::UintTy::U128 => u128::MAX, }; (0, max) } fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> { let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?; let firstch = src.chars().next()?; if firstch == '0' { match src.chars().nth(1) { Some('x' | 'b') => return Some(src), _ => return None, } } None } fn report_bin_hex_error( cx: &LateContext<'_>, expr: &hir::Expr<'_>, ty: attr::IntType, repr_str: String, val: u128, negative: bool, ) { let size = Integer::from_attr(&cx.tcx, ty).size(); cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| { let (t, actually) = match ty { attr::IntType::SignedInt(t) => { let actually = if negative { -(size.sign_extend(val) as i128) } else { size.sign_extend(val) as i128 }; (t.name_str(), actually.to_string()) } attr::IntType::UnsignedInt(t) => { let actually = size.truncate(val); (t.name_str(), actually.to_string()) } }; let mut err = lint.build(&format!("literal out of range for `{}`", t)); if negative { // If the value is negative, // emits a note about the value itself, apart from the literal. err.note(&format!( "the literal `{}` (decimal `{}`) does not fit into \ the type `{}`", repr_str, val, t )); err.note(&format!("and the value `-{}` will become `{}{}`", repr_str, actually, t)); } else { err.note(&format!( "the literal `{}` (decimal `{}`) does not fit into \ the type `{}` and will become `{}{}`", repr_str, val, t, actually, t )); } if let Some(sugg_ty) = get_type_suggestion(&cx.typeck_results().node_type(expr.hir_id), val, negative) { if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') { let (sans_suffix, _) = repr_str.split_at(pos); err.span_suggestion( expr.span, &format!("consider using the type `{}` instead", sugg_ty), format!("{}{}", sans_suffix, sugg_ty), Applicability::MachineApplicable, ); } else { err.help(&format!("consider using the type `{}` instead", sugg_ty)); } } err.emit(); }); } // This function finds the next fitting type and generates a suggestion string. // It searches for fitting types in the following way (`X < Y`): // - `iX`: if literal fits in `uX` => `uX`, else => `iY` // - `-iX` => `iY` // - `uX` => `uY` // // No suggestion for: `isize`, `usize`. fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> { use ty::IntTy::*; use ty::UintTy::*; macro_rules! find_fit { ($ty:expr, $val:expr, $negative:expr, $($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => { { let _neg = if negative { 1 } else { 0 }; match $ty { $($type => { $(if !negative && val <= uint_ty_range($utypes).1 { return Some($utypes.name_str()) })* $(if val <= int_ty_range($itypes).1 as u128 + _neg { return Some($itypes.name_str()) })* None },)+ _ => None } } } } match t.kind() { ty::Int(i) => find_fit!(i, val, negative, I8 => [U8] => [I16, I32, I64, I128], I16 => [U16] => [I32, I64, I128], I32 => [U32] => [I64, I128], I64 => [U64] => [I128], I128 => [U128] => []), ty::Uint(u) => find_fit!(u, val, negative, U8 => [U8, U16, U32, U64, U128] => [], U16 => [U16, U32, U64, U128] => [], U32 => [U32, U64, U128] => [], U64 => [U64, U128] => [], U128 => [U128] => []), _ => None, } } fn lint_int_literal<'tcx>( cx: &LateContext<'tcx>, type_limits: &TypeLimits, e: &'tcx hir::Expr<'tcx>, lit: &hir::Lit, t: ty::IntTy, v: u128, ) { let int_type = t.normalize(cx.sess().target.pointer_width); let (min, max) = int_ty_range(int_type); let max = max as u128; let negative = type_limits.negated_expr_id == Some(e.hir_id); // Detect literal value out of range [min, max] inclusive // avoiding use of -min to prevent overflow/panic if (negative && v > max + 1) || (!negative && v > max) { if let Some(repr_str) = get_bin_hex_repr(cx, lit) { report_bin_hex_error( cx, e, attr::IntType::SignedInt(ty::ast_int_ty(t)), repr_str, v, negative, ); return; } let par_id = cx.tcx.hir().get_parent_node(e.hir_id); if let Node::Expr(par_e) = cx.tcx.hir().get(par_id) { if let hir::ExprKind::Struct(..) = par_e.kind { if is_range_literal(par_e) && lint_overflowing_range_endpoint(cx, lit, v, max, e, par_e, t.name_str()) { // The overflowing literal lint was overridden. return; } } } cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| { let mut err = lint.build(&format!("literal out of range for `{}`", t.name_str())); err.note(&format!( "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`", cx.sess() .source_map() .span_to_snippet(lit.span) .expect("must get snippet from literal"), t.name_str(), min, max, )); if let Some(sugg_ty) = get_type_suggestion(&cx.typeck_results().node_type(e.hir_id), v, negative) { err.help(&format!("consider using the type `{}` instead", sugg_ty)); } err.emit(); }); } } fn lint_uint_literal<'tcx>( cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>, lit: &hir::Lit, t: ty::UintTy, ) { let uint_type = t.normalize(cx.sess().target.pointer_width); let (min, max) = uint_ty_range(uint_type); let lit_val: u128 = match lit.node { // _v is u8, within range by definition ast::LitKind::Byte(_v) => return, ast::LitKind::Int(v, _) => v, _ => bug!(), }; if lit_val < min || lit_val > max { let parent_id = cx.tcx.hir().get_parent_node(e.hir_id); if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) { match par_e.kind { hir::ExprKind::Cast(..) => { if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() { cx.struct_span_lint(OVERFLOWING_LITERALS, par_e.span, |lint| { lint.build("only `u8` can be cast into `char`") .span_suggestion( par_e.span, &"use a `char` literal instead", format!("'\\u{{{:X}}}'", lit_val), Applicability::MachineApplicable, ) .emit(); }); return; } } hir::ExprKind::Struct(..) if is_range_literal(par_e) => { let t = t.name_str(); if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, par_e, t) { // The overflowing literal lint was overridden. return; } } _ => {} } } if let Some(repr_str) = get_bin_hex_repr(cx, lit) { report_bin_hex_error( cx, e, attr::IntType::UnsignedInt(ty::ast_uint_ty(t)), repr_str, lit_val, false, ); return; } cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| { lint.build(&format!("literal out of range for `{}`", t.name_str())) .note(&format!( "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`", cx.sess() .source_map() .span_to_snippet(lit.span) .expect("must get snippet from literal"), t.name_str(), min, max, )) .emit() }); } } fn lint_literal<'tcx>( cx: &LateContext<'tcx>, type_limits: &TypeLimits, e: &'tcx hir::Expr<'tcx>, lit: &hir::Lit, ) { match *cx.typeck_results().node_type(e.hir_id).kind() { ty::Int(t) => { match lit.node { ast::LitKind::Int(v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed) => { lint_int_literal(cx, type_limits, e, lit, t, v) } _ => bug!(), }; } ty::Uint(t) => lint_uint_literal(cx, e, lit, t), ty::Float(t) => { let is_infinite = match lit.node { ast::LitKind::Float(v, _) => match t { ty::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite), ty::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite), }, _ => bug!(), }; if is_infinite == Ok(true) { cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| { lint.build(&format!("literal out of range for `{}`", t.name_str())) .note(&format!( "the literal `{}` does not fit into the type `{}` and will be converted to `{}::INFINITY`", cx.sess() .source_map() .span_to_snippet(lit.span) .expect("must get snippet from literal"), t.name_str(), t.name_str(), )) .emit(); }); } } _ => {} } } impl<'tcx> LateLintPass<'tcx> for TypeLimits { fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) { match e.kind { hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => { // propagate negation, if the negation itself isn't negated if self.negated_expr_id != Some(e.hir_id) { self.negated_expr_id = Some(expr.hir_id); } } hir::ExprKind::Binary(binop, ref l, ref r) => { if is_comparison(binop) && !check_limits(cx, binop, &l, &r) { cx.struct_span_lint(UNUSED_COMPARISONS, e.span, |lint| { lint.build("comparison is useless due to type limits").emit() }); } } hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit), _ => {} }; fn is_valid<T: cmp::PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool { match binop.node { hir::BinOpKind::Lt => v > min && v <= max, hir::BinOpKind::Le => v >= min && v < max, hir::BinOpKind::Gt => v >= min && v < max, hir::BinOpKind::Ge => v > min && v <= max, hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max, _ => bug!(), } } fn rev_binop(binop: hir::BinOp) -> hir::BinOp { source_map::respan( binop.span, match binop.node { hir::BinOpKind::Lt => hir::BinOpKind::Gt, hir::BinOpKind::Le => hir::BinOpKind::Ge, hir::BinOpKind::Gt => hir::BinOpKind::Lt, hir::BinOpKind::Ge => hir::BinOpKind::Le, _ => return binop, }, ) } fn check_limits( cx: &LateContext<'_>, binop: hir::BinOp, l: &hir::Expr<'_>, r: &hir::Expr<'_>, ) -> bool { let (lit, expr, swap) = match (&l.kind, &r.kind) { (&hir::ExprKind::Lit(_), _) => (l, r, true), (_, &hir::ExprKind::Lit(_)) => (r, l, false), _ => return true, }; // Normalize the binop so that the literal is always on the RHS in // the comparison let norm_binop = if swap { rev_binop(binop) } else { binop }; match *cx.typeck_results().node_type(expr.hir_id).kind() { ty::Int(int_ty) => { let (min, max) = int_ty_range(int_ty); let lit_val: i128 = match lit.kind { hir::ExprKind::Lit(ref li) => match li.node { ast::LitKind::Int( v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed, ) => v as i128, _ => return true, }, _ => bug!(), }; is_valid(norm_binop, lit_val, min, max) } ty::Uint(uint_ty) => { let (min, max): (u128, u128) = uint_ty_range(uint_ty); let lit_val: u128 = match lit.kind { hir::ExprKind::Lit(ref li) => match li.node { ast::LitKind::Int(v, _) => v, _ => return true, }, _ => bug!(), }; is_valid(norm_binop, lit_val, min, max) } _ => true, } } fn is_comparison(binop: hir::BinOp) -> bool { matches!( binop.node, hir::BinOpKind::Eq | hir::BinOpKind::Lt | hir::BinOpKind::Le | hir::BinOpKind::Ne | hir::BinOpKind::Ge | hir::BinOpKind::Gt ) } } } declare_lint! { /// The `improper_ctypes` lint detects incorrect use of types in foreign /// modules. /// /// ### Example /// /// ```rust /// extern "C" { /// static STATIC: String; /// } /// ``` /// /// {{produces}} /// /// ### Explanation /// /// The compiler has several checks to verify that types used in `extern` /// blocks are safe and follow certain rules to ensure proper /// compatibility with the foreign interfaces. This lint is issued when it /// detects a probable mistake in a definition. The lint usually should /// provide a description of the issue, along with possibly a hint on how /// to resolve it. IMPROPER_CTYPES, Warn, "proper use of libc types in foreign modules" } declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]); declare_lint! { /// The `improper_ctypes_definitions` lint detects incorrect use of /// [`extern` function] definitions. /// /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier /// /// ### Example /// /// ```rust /// # #![allow(unused)] /// pub extern "C" fn str_type(p: &str) { } /// ``` /// /// {{produces}} /// /// ### Explanation /// /// There are many parameter and return types that may be specified in an /// `extern` function that are not compatible with the given ABI. This /// lint is an alert that these types should not be used. The lint usually /// should provide a description of the issue, along with possibly a hint /// on how to resolve it. IMPROPER_CTYPES_DEFINITIONS, Warn, "proper use of libc types in foreign item definitions" } declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS]); #[derive(Clone, Copy)] crate enum CItemKind { Declaration, Definition, } struct ImproperCTypesVisitor<'a, 'tcx> { cx: &'a LateContext<'tcx>, mode: CItemKind, } enum FfiResult<'tcx> { FfiSafe, FfiPhantom(Ty<'tcx>), FfiUnsafe { ty: Ty<'tcx>, reason: String, help: Option<String> }, } crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: &ty::AdtDef) -> bool { tcx.get_attrs(def.did) .iter() .any(|a| tcx.sess.check_name(a, sym::rustc_nonnull_optimization_guaranteed)) } /// `repr(transparent)` structs can have a single non-ZST field, this function returns that /// field. pub fn transparent_newtype_field<'a, 'tcx>( tcx: TyCtxt<'tcx>, variant: &'a ty::VariantDef, ) -> Option<&'a ty::FieldDef> { let param_env = tcx.param_env(variant.def_id); for field in &variant.fields { let field_ty = tcx.type_of(field.did); let is_zst = tcx.layout_of(param_env.and(field_ty)).map_or(false, |layout| layout.is_zst()); if !is_zst { return Some(field); } } None } /// Is type known to be non-null? fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool { let tcx = cx.tcx; match ty.kind() { ty::FnPtr(_) => true, ty::Ref(..) => true, ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true, ty::Adt(def, substs) if def.repr.transparent() && !def.is_union() => { let marked_non_null = nonnull_optimization_guaranteed(tcx, &def); if marked_non_null { return true; } // Types with a `#[repr(no_niche)]` attribute have their niche hidden. // The attribute is used by the UnsafeCell for example (the only use so far). if def.repr.hide_niche() { return false; } for variant in &def.variants { if let Some(field) = transparent_newtype_field(cx.tcx, variant) { if ty_is_known_nonnull(cx, field.ty(tcx, substs), mode) { return true; } } } false } _ => false, } } /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type. /// If the type passed in was not scalar, returns None. fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> { let tcx = cx.tcx; Some(match *ty.kind() { ty::Adt(field_def, field_substs) => { let inner_field_ty = { let first_non_zst_ty = field_def.variants.iter().filter_map(|v| transparent_newtype_field(cx.tcx, v)); debug_assert_eq!( first_non_zst_ty.clone().count(), 1, "Wrong number of fields for transparent type" ); first_non_zst_ty .last() .expect("No non-zst fields in transparent type.") .ty(tcx, field_substs) }; return get_nullable_type(cx, inner_field_ty); } ty::Int(ty) => tcx.mk_mach_int(ty), ty::Uint(ty) => tcx.mk_mach_uint(ty), ty::RawPtr(ty_mut) => tcx.mk_ptr(ty_mut), // As these types are always non-null, the nullable equivalent of // Option<T> of these types are their raw pointer counterparts. ty::Ref(_region, ty, mutbl) => tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }), ty::FnPtr(..) => { // There is no nullable equivalent for Rust's function pointers -- you // must use an Option<fn(..) -> _> to represent it. ty } // We should only ever reach this case if ty_is_known_nonnull is extended // to other types. ref unhandled => { debug!( "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}", unhandled, ty ); return None; } }) } /// Check if this enum can be safely exported based on the "nullable pointer optimization". If it /// can, return the type that `ty` can be safely converted to, otherwise return `None`. /// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`, /// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes. /// FIXME: This duplicates code in codegen. crate fn repr_nullable_ptr<'tcx>( cx: &LateContext<'tcx>, ty: Ty<'tcx>, ckind: CItemKind, ) -> Option<Ty<'tcx>> { debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty); if let ty::Adt(ty_def, substs) = ty.kind() { let field_ty = match &ty_def.variants.raw[..] { [var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) { ([], [field]) | ([field], []) => field.ty(cx.tcx, substs), _ => return None, }, _ => return None, }; if !ty_is_known_nonnull(cx, field_ty, ckind) { return None; } // At this point, the field's type is known to be nonnull and the parent enum is Option-like. // If the computed size for the field and the enum are different, the nonnull optimization isn't // being applied (and we've got a problem somewhere). let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap(); if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) { bug!("improper_ctypes: Option nonnull optimization not applied?"); } // Return the nullable type this Option-like enum can be safely represented with. let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi; if let Abi::Scalar(field_ty_scalar) = field_ty_abi { match (field_ty_scalar.valid_range.start(), field_ty_scalar.valid_range.end()) { (0, _) => unreachable!("Non-null optimisation extended to a non-zero value."), (1, _) => { return Some(get_nullable_type(cx, field_ty).unwrap()); } (start, end) => unreachable!("Unhandled start and end range: ({}, {})", start, end), }; } } None } impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { /// Check if the type is array and emit an unsafe type lint. fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool { if let ty::Array(..) = ty.kind() { self.emit_ffi_unsafe_type_lint( ty, sp, "passing raw arrays by value is not FFI-safe", Some("consider passing a pointer to the array"), ); true } else { false } } /// Checks if the given field's type is "ffi-safe". fn check_field_type_for_ffi( &self, cache: &mut FxHashSet<Ty<'tcx>>, field: &ty::FieldDef, substs: SubstsRef<'tcx>, ) -> FfiResult<'tcx> { let field_ty = field.ty(self.cx.tcx, substs); if field_ty.has_opaque_types() { self.check_type_for_ffi(cache, field_ty) } else { let field_ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, field_ty); self.check_type_for_ffi(cache, field_ty) } } /// Checks if the given `VariantDef`'s field types are "ffi-safe". fn check_variant_for_ffi( &self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>, def: &ty::AdtDef, variant: &ty::VariantDef, substs: SubstsRef<'tcx>, ) -> FfiResult<'tcx> { use FfiResult::*; if def.repr.transparent() { // Can assume that only one field is not a ZST, so only check // that field's type for FFI-safety. if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) { self.check_field_type_for_ffi(cache, field, substs) } else { bug!("malformed transparent type"); } } else { // We can't completely trust repr(C) markings; make sure the fields are // actually safe. let mut all_phantom = !variant.fields.is_empty(); for field in &variant.fields { match self.check_field_type_for_ffi(cache, &field, substs) { FfiSafe => { all_phantom = false; } FfiPhantom(..) if def.is_enum() => { return FfiUnsafe { ty, reason: "this enum contains a PhantomData field".into(), help: None, }; } FfiPhantom(..) => {} r => return r, } } if all_phantom { FfiPhantom(ty) } else { FfiSafe } } } /// Checks if the given type is "ffi-safe" (has a stable, well-defined /// representation which can be exported to C code). fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> { use FfiResult::*; let tcx = self.cx.tcx; // Protect against infinite recursion, for example // `struct S(*mut S);`. // FIXME: A recursion limit is necessary as well, for irregular // recursive types. if !cache.insert(ty) { return FfiSafe; } match *ty.kind() { ty::Adt(def, substs) => { if def.is_box() && matches!(self.mode, CItemKind::Definition) { if ty.boxed_ty().is_sized(tcx.at(DUMMY_SP), self.cx.param_env) { return FfiSafe; } else { return FfiUnsafe { ty, reason: format!("box cannot be represented as a single pointer"), help: None, }; } } if def.is_phantom_data() { return FfiPhantom(ty); } match def.adt_kind() { AdtKind::Struct | AdtKind::Union => { let kind = if def.is_struct() { "struct" } else { "union" }; if !def.repr.c() && !def.repr.transparent() { return FfiUnsafe { ty, reason: format!("this {} has unspecified layout", kind), help: Some(format!( "consider adding a `#[repr(C)]` or \ `#[repr(transparent)]` attribute to this {}", kind )), }; } let is_non_exhaustive = def.non_enum_variant().is_field_list_non_exhaustive(); if is_non_exhaustive && !def.did.is_local() { return FfiUnsafe { ty, reason: format!("this {} is non-exhaustive", kind), help: None, }; } if def.non_enum_variant().fields.is_empty() { return FfiUnsafe { ty, reason: format!("this {} has no fields", kind), help: Some(format!("consider adding a member to this {}", kind)), }; } self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs) } AdtKind::Enum => { if def.variants.is_empty() { // Empty enums are okay... although sort of useless. return FfiSafe; } // Check for a repr() attribute to specify the size of the // discriminant. if !def.repr.c() && !def.repr.transparent() && def.repr.int.is_none() { // Special-case types like `Option<extern fn()>`. if repr_nullable_ptr(self.cx, ty, self.mode).is_none() { return FfiUnsafe { ty, reason: "enum has no representation hint".into(), help: Some( "consider adding a `#[repr(C)]`, \ `#[repr(transparent)]`, or integer `#[repr(...)]` \ attribute to this enum" .into(), ), }; } } if def.is_variant_list_non_exhaustive() && !def.did.is_local() { return FfiUnsafe { ty, reason: "this enum is non-exhaustive".into(), help: None, }; } // Check the contained variants. for variant in &def.variants { let is_non_exhaustive = variant.is_field_list_non_exhaustive(); if is_non_exhaustive && !variant.def_id.is_local() { return FfiUnsafe { ty, reason: "this enum has non-exhaustive variants".into(), help: None, }; } match self.check_variant_for_ffi(cache, ty, def, variant, substs) { FfiSafe => (), r => return r, } } FfiSafe } } } ty::Char => FfiUnsafe { ty, reason: "the `char` type has no C equivalent".into(), help: Some("consider using `u32` or `libc::wchar_t` instead".into()), }, ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => FfiUnsafe { ty, reason: "128-bit integers don't currently have a known stable ABI".into(), help: None, }, // Primitive types with a stable representation. ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe, ty::Slice(_) => FfiUnsafe { ty, reason: "slices have no C equivalent".into(), help: Some("consider using a raw pointer instead".into()), }, ty::Dynamic(..) => { FfiUnsafe { ty, reason: "trait objects have no C equivalent".into(), help: None } } ty::Str => FfiUnsafe { ty, reason: "string slices have no C equivalent".into(), help: Some("consider using `*const u8` and a length instead".into()), }, ty::Tuple(..) => FfiUnsafe { ty, reason: "tuples have unspecified layout".into(), help: Some("consider using a struct instead".into()), }, ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) if { matches!(self.mode, CItemKind::Definition) && ty.is_sized(self.cx.tcx.at(DUMMY_SP), self.cx.param_env) } => { FfiSafe } ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => { self.check_type_for_ffi(cache, ty) } ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty), ty::FnPtr(sig) => { if self.is_internal_abi(sig.abi()) { return FfiUnsafe { ty, reason: "this function pointer has Rust-specific calling convention".into(), help: Some( "consider using an `extern fn(...) -> ...` \ function pointer instead" .into(), ), }; } let sig = tcx.erase_late_bound_regions(sig); if !sig.output().is_unit() { let r = self.check_type_for_ffi(cache, sig.output()); match r { FfiSafe => {} _ => { return r; } } } for arg in sig.inputs() { let r = self.check_type_for_ffi(cache, arg); match r { FfiSafe => {} _ => { return r; } } } FfiSafe } ty::Foreign(..) => FfiSafe, // While opaque types are checked for earlier, if a projection in a struct field // normalizes to an opaque type, then it will reach this branch. ty::Opaque(..) => { FfiUnsafe { ty, reason: "opaque types have no C equivalent".into(), help: None } } // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe, // so they are currently ignored for the purposes of this lint. ty::Param(..) | ty::Projection(..) if matches!(self.mode, CItemKind::Definition) => { FfiSafe } ty::Param(..) | ty::Projection(..) | ty::Infer(..) | ty::Bound(..) | ty::Error(_) | ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Placeholder(..) | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty), } } fn emit_ffi_unsafe_type_lint( &mut self, ty: Ty<'tcx>, sp: Span, note: &str, help: Option<&str>, ) { let lint = match self.mode { CItemKind::Declaration => IMPROPER_CTYPES, CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS, }; self.cx.struct_span_lint(lint, sp, |lint| { let item_description = match self.mode { CItemKind::Declaration => "block", CItemKind::Definition => "fn", }; let mut diag = lint.build(&format!( "`extern` {} uses type `{}`, which is not FFI-safe", item_description, ty )); diag.span_label(sp, "not FFI-safe"); if let Some(help) = help { diag.help(help); } diag.note(note); if let ty::Adt(def, _) = ty.kind() { if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did) { diag.span_note(sp, "the type is defined here"); } } diag.emit(); }); } fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool { struct ProhibitOpaqueTypes<'a, 'tcx> { cx: &'a LateContext<'tcx>, } impl<'a, 'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueTypes<'a, 'tcx> { type BreakTy = Ty<'tcx>; fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { match ty.kind() { ty::Opaque(..) => ControlFlow::Break(ty), // Consider opaque types within projections FFI-safe if they do not normalize // to more opaque types. ty::Projection(..) => { let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty); // If `ty` is a opaque type directly then `super_visit_with` won't invoke // this function again. if ty.has_opaque_types() { self.visit_ty(ty) } else { ControlFlow::CONTINUE } } _ => ty.super_visit_with(self), } } } if let Some(ty) = ty.visit_with(&mut ProhibitOpaqueTypes { cx: self.cx }).break_value() { self.emit_ffi_unsafe_type_lint(ty, sp, "opaque types have no C equivalent", None); true } else { false } } fn check_type_for_ffi_and_report_errors( &mut self, sp: Span, ty: Ty<'tcx>, is_static: bool, is_return_type: bool, ) { // We have to check for opaque types before `normalize_erasing_regions`, // which will replace opaque types with their underlying concrete type. if self.check_for_opaque_ty(sp, ty) { // We've already emitted an error due to an opaque type. return; } // it is only OK to use this function because extern fns cannot have // any generic types right now: let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty); // C doesn't really support passing arrays by value - the only way to pass an array by value // is through a struct. So, first test that the top level isn't an array, and then // recursively check the types inside. if !is_static && self.check_for_array_ty(sp, ty) { return; } // Don't report FFI errors for unit return types. This check exists here, and not in // `check_foreign_fn` (where it would make more sense) so that normalization has definitely // happened. if is_return_type && ty.is_unit() { return; } match self.check_type_for_ffi(&mut FxHashSet::default(), ty) { FfiResult::FfiSafe => {} FfiResult::FfiPhantom(ty) => { self.emit_ffi_unsafe_type_lint(ty, sp, "composed only of `PhantomData`", None); } // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic // argument, which after substitution, is `()`, then this branch can be hit. FfiResult::FfiUnsafe { ty, .. } if is_return_type && ty.is_unit() => {} FfiResult::FfiUnsafe { ty, reason, help } => { self.emit_ffi_unsafe_type_lint(ty, sp, &reason, help.as_deref()); } } } fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) { let def_id = self.cx.tcx.hir().local_def_id(id); let sig = self.cx.tcx.fn_sig(def_id); let sig = self.cx.tcx.erase_late_bound_regions(sig); for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) { self.check_type_for_ffi_and_report_errors(input_hir.span, input_ty, false, false); } if let hir::FnRetTy::Return(ref ret_hir) = decl.output { let ret_ty = sig.output(); self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false, true); } } fn check_foreign_static(&mut self, id: hir::HirId, span: Span) { let def_id = self.cx.tcx.hir().local_def_id(id); let ty = self.cx.tcx.type_of(def_id); self.check_type_for_ffi_and_report_errors(span, ty, true, false); } fn is_internal_abi(&self, abi: SpecAbi) -> bool { matches!( abi, SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic ) } } impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations { fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) { let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration }; let abi = cx.tcx.hir().get_foreign_abi(it.hir_id()); if !vis.is_internal_abi(abi) { match it.kind { hir::ForeignItemKind::Fn(ref decl, _, _) => { vis.check_foreign_fn(it.hir_id(), decl); } hir::ForeignItemKind::Static(ref ty, _) => { vis.check_foreign_static(it.hir_id(), ty.span); } hir::ForeignItemKind::Type => (), } } } } impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions { fn check_fn( &mut self, cx: &LateContext<'tcx>, kind: hir::intravisit::FnKind<'tcx>, decl: &'tcx hir::FnDecl<'_>, _: &'tcx hir::Body<'_>, _: Span, hir_id: hir::HirId, ) { use hir::intravisit::FnKind; let abi = match kind { FnKind::ItemFn(_, _, header, ..) => header.abi, FnKind::Method(_, sig, ..) => sig.header.abi, _ => return, }; let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition }; if !vis.is_internal_abi(abi) { vis.check_foreign_fn(hir_id, decl); } } } declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]); impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences { fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind { let t = cx.tcx.type_of(it.def_id); let ty = cx.tcx.erase_regions(t); let layout = match cx.layout_of(ty) { Ok(layout) => layout, Err( ty::layout::LayoutError::Unknown(_) | ty::layout::LayoutError::SizeOverflow(_), ) => return, }; let (variants, tag) = match layout.variants { Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, ref variants, .. } => (variants, tag), _ => return, }; let tag_size = tag.value.size(&cx.tcx).bytes(); debug!( "enum `{}` is {} bytes large with layout:\n{:#?}", t, layout.size.bytes(), layout ); let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants) .map(|(variant, variant_layout)| { // Subtract the size of the enum tag. let bytes = variant_layout.size.bytes().saturating_sub(tag_size); debug!("- variant `{}` is {} bytes large", variant.ident, bytes); bytes }) .enumerate() .fold((0, 0, 0), |(l, s, li), (idx, size)| { if size > l { (size, l, idx) } else if size > s { (l, size, li) } else { (l, s, li) } }); // We only warn if the largest variant is at least thrice as large as // the second-largest. if largest > slargest * 3 && slargest > 0 { cx.struct_span_lint( VARIANT_SIZE_DIFFERENCES, enum_definition.variants[largest_index].span, |lint| { lint.build(&format!( "enum variant is more than three times \ larger ({} bytes) than the next largest", largest )) .emit() }, ); } } } }
37.458333
119
0.474416
1a285b379813c09a69d6462932a6ff4141fad363
19,809
#[doc = "Reader of register C1"] pub type R = crate::R<u8, super::C1>; #[doc = "Writer for register C1"] pub type W = crate::W<u8, super::C1>; #[doc = "Register C1 `reset()`'s with value 0"] impl crate::ResetValue for super::C1 { type Type = u8; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Parity Type\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PT_A { #[doc = "0: Even parity."] _0, #[doc = "1: Odd parity."] _1, } impl From<PT_A> for bool { #[inline(always)] fn from(variant: PT_A) -> Self { match variant { PT_A::_0 => false, PT_A::_1 => true, } } } #[doc = "Reader of field `PT`"] pub type PT_R = crate::R<bool, PT_A>; impl PT_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PT_A { match self.bits { false => PT_A::_0, true => PT_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == PT_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == PT_A::_1 } } #[doc = "Write proxy for field `PT`"] pub struct PT_W<'a> { w: &'a mut W, } impl<'a> PT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PT_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Even parity."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(PT_A::_0) } #[doc = "Odd parity."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(PT_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u8) & 0x01); self.w } } #[doc = "Parity Enable\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PE_A { #[doc = "0: Parity function disabled."] _0, #[doc = "1: Parity function enabled."] _1, } impl From<PE_A> for bool { #[inline(always)] fn from(variant: PE_A) -> Self { match variant { PE_A::_0 => false, PE_A::_1 => true, } } } #[doc = "Reader of field `PE`"] pub type PE_R = crate::R<bool, PE_A>; impl PE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PE_A { match self.bits { false => PE_A::_0, true => PE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == PE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == PE_A::_1 } } #[doc = "Write proxy for field `PE`"] pub struct PE_W<'a> { w: &'a mut W, } impl<'a> PE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Parity function disabled."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(PE_A::_0) } #[doc = "Parity function enabled."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(PE_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u8) & 0x01) << 1); self.w } } #[doc = "Idle Line Type Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ILT_A { #[doc = "0: Idle character bit count starts after start bit."] _0, #[doc = "1: Idle character bit count starts after stop bit."] _1, } impl From<ILT_A> for bool { #[inline(always)] fn from(variant: ILT_A) -> Self { match variant { ILT_A::_0 => false, ILT_A::_1 => true, } } } #[doc = "Reader of field `ILT`"] pub type ILT_R = crate::R<bool, ILT_A>; impl ILT_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ILT_A { match self.bits { false => ILT_A::_0, true => ILT_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == ILT_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == ILT_A::_1 } } #[doc = "Write proxy for field `ILT`"] pub struct ILT_W<'a> { w: &'a mut W, } impl<'a> ILT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ILT_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Idle character bit count starts after start bit."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(ILT_A::_0) } #[doc = "Idle character bit count starts after stop bit."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(ILT_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u8) & 0x01) << 2); self.w } } #[doc = "Receiver Wakeup Method Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WAKE_A { #[doc = "0: Idle line wakeup."] _0, #[doc = "1: Address mark wakeup."] _1, } impl From<WAKE_A> for bool { #[inline(always)] fn from(variant: WAKE_A) -> Self { match variant { WAKE_A::_0 => false, WAKE_A::_1 => true, } } } #[doc = "Reader of field `WAKE`"] pub type WAKE_R = crate::R<bool, WAKE_A>; impl WAKE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WAKE_A { match self.bits { false => WAKE_A::_0, true => WAKE_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == WAKE_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == WAKE_A::_1 } } #[doc = "Write proxy for field `WAKE`"] pub struct WAKE_W<'a> { w: &'a mut W, } impl<'a> WAKE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WAKE_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Idle line wakeup."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(WAKE_A::_0) } #[doc = "Address mark wakeup."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(WAKE_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u8) & 0x01) << 3); self.w } } #[doc = "9-bit or 8-bit Mode Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum M_A { #[doc = "0: Normal-start + 8 data bits (MSB/LSB first as determined by MSBF) + stop."] _0, #[doc = "1: Use-start + 9 data bits (MSB/LSB first as determined by MSBF) + stop."] _1, } impl From<M_A> for bool { #[inline(always)] fn from(variant: M_A) -> Self { match variant { M_A::_0 => false, M_A::_1 => true, } } } #[doc = "Reader of field `M`"] pub type M_R = crate::R<bool, M_A>; impl M_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> M_A { match self.bits { false => M_A::_0, true => M_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == M_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == M_A::_1 } } #[doc = "Write proxy for field `M`"] pub struct M_W<'a> { w: &'a mut W, } impl<'a> M_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: M_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Normal-start + 8 data bits (MSB/LSB first as determined by MSBF) + stop."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(M_A::_0) } #[doc = "Use-start + 9 data bits (MSB/LSB first as determined by MSBF) + stop."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(M_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u8) & 0x01) << 4); self.w } } #[doc = "Receiver Source Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RSRC_A { #[doc = "0: Selects internal loop back mode. The receiver input is internally connected to transmitter output."] _0, #[doc = "1: Single wire UART mode where the receiver input is connected to the transmit pin input signal."] _1, } impl From<RSRC_A> for bool { #[inline(always)] fn from(variant: RSRC_A) -> Self { match variant { RSRC_A::_0 => false, RSRC_A::_1 => true, } } } #[doc = "Reader of field `RSRC`"] pub type RSRC_R = crate::R<bool, RSRC_A>; impl RSRC_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RSRC_A { match self.bits { false => RSRC_A::_0, true => RSRC_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == RSRC_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == RSRC_A::_1 } } #[doc = "Write proxy for field `RSRC`"] pub struct RSRC_W<'a> { w: &'a mut W, } impl<'a> RSRC_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RSRC_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Selects internal loop back mode. The receiver input is internally connected to transmitter output."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RSRC_A::_0) } #[doc = "Single wire UART mode where the receiver input is connected to the transmit pin input signal."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RSRC_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | (((value as u8) & 0x01) << 5); self.w } } #[doc = "UART Stops in Wait Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum UARTSWAI_A { #[doc = "0: UART clock continues to run in Wait mode."] _0, #[doc = "1: UART clock freezes while CPU is in Wait mode."] _1, } impl From<UARTSWAI_A> for bool { #[inline(always)] fn from(variant: UARTSWAI_A) -> Self { match variant { UARTSWAI_A::_0 => false, UARTSWAI_A::_1 => true, } } } #[doc = "Reader of field `UARTSWAI`"] pub type UARTSWAI_R = crate::R<bool, UARTSWAI_A>; impl UARTSWAI_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> UARTSWAI_A { match self.bits { false => UARTSWAI_A::_0, true => UARTSWAI_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == UARTSWAI_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == UARTSWAI_A::_1 } } #[doc = "Write proxy for field `UARTSWAI`"] pub struct UARTSWAI_W<'a> { w: &'a mut W, } impl<'a> UARTSWAI_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: UARTSWAI_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "UART clock continues to run in Wait mode."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(UARTSWAI_A::_0) } #[doc = "UART clock freezes while CPU is in Wait mode."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(UARTSWAI_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | (((value as u8) & 0x01) << 6); self.w } } #[doc = "Loop Mode Select\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum LOOPS_A { #[doc = "0: Normal operation."] _0, #[doc = "1: Loop mode where transmitter output is internally connected to receiver input. The receiver input is determined by RSRC."] _1, } impl From<LOOPS_A> for bool { #[inline(always)] fn from(variant: LOOPS_A) -> Self { match variant { LOOPS_A::_0 => false, LOOPS_A::_1 => true, } } } #[doc = "Reader of field `LOOPS`"] pub type LOOPS_R = crate::R<bool, LOOPS_A>; impl LOOPS_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> LOOPS_A { match self.bits { false => LOOPS_A::_0, true => LOOPS_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { *self == LOOPS_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { *self == LOOPS_A::_1 } } #[doc = "Write proxy for field `LOOPS`"] pub struct LOOPS_W<'a> { w: &'a mut W, } impl<'a> LOOPS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: LOOPS_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Normal operation."] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(LOOPS_A::_0) } #[doc = "Loop mode where transmitter output is internally connected to receiver input. The receiver input is determined by RSRC."] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(LOOPS_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | (((value as u8) & 0x01) << 7); self.w } } impl R { #[doc = "Bit 0 - Parity Type"] #[inline(always)] pub fn pt(&self) -> PT_R { PT_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Parity Enable"] #[inline(always)] pub fn pe(&self) -> PE_R { PE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Idle Line Type Select"] #[inline(always)] pub fn ilt(&self) -> ILT_R { ILT_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Receiver Wakeup Method Select"] #[inline(always)] pub fn wake(&self) -> WAKE_R { WAKE_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - 9-bit or 8-bit Mode Select"] #[inline(always)] pub fn m(&self) -> M_R { M_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Receiver Source Select"] #[inline(always)] pub fn rsrc(&self) -> RSRC_R { RSRC_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - UART Stops in Wait Mode"] #[inline(always)] pub fn uartswai(&self) -> UARTSWAI_R { UARTSWAI_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Loop Mode Select"] #[inline(always)] pub fn loops(&self) -> LOOPS_R { LOOPS_R::new(((self.bits >> 7) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Parity Type"] #[inline(always)] pub fn pt(&mut self) -> PT_W { PT_W { w: self } } #[doc = "Bit 1 - Parity Enable"] #[inline(always)] pub fn pe(&mut self) -> PE_W { PE_W { w: self } } #[doc = "Bit 2 - Idle Line Type Select"] #[inline(always)] pub fn ilt(&mut self) -> ILT_W { ILT_W { w: self } } #[doc = "Bit 3 - Receiver Wakeup Method Select"] #[inline(always)] pub fn wake(&mut self) -> WAKE_W { WAKE_W { w: self } } #[doc = "Bit 4 - 9-bit or 8-bit Mode Select"] #[inline(always)] pub fn m(&mut self) -> M_W { M_W { w: self } } #[doc = "Bit 5 - Receiver Source Select"] #[inline(always)] pub fn rsrc(&mut self) -> RSRC_W { RSRC_W { w: self } } #[doc = "Bit 6 - UART Stops in Wait Mode"] #[inline(always)] pub fn uartswai(&mut self) -> UARTSWAI_W { UARTSWAI_W { w: self } } #[doc = "Bit 7 - Loop Mode Select"] #[inline(always)] pub fn loops(&mut self) -> LOOPS_W { LOOPS_W { w: self } } }
27.474341
137
0.523752
edfc5d3738eb3d43845b88b0c4aaf36923e0cbb1
4,590
extern crate backtrace; pub extern crate bigdecimal; extern crate diesel; pub extern crate ethabi; extern crate futures; extern crate graphql_parser; extern crate hex; #[macro_use] extern crate lazy_static; extern crate num_bigint; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_yaml; pub extern crate slog; #[macro_use] pub extern crate failure; extern crate ipfs_api; extern crate parity_wasm; extern crate rand; extern crate semver; pub extern crate serde_json; pub extern crate slog_async; extern crate slog_envlogger; extern crate slog_term; extern crate tiny_keccak; pub extern crate tokio; pub extern crate tokio_executor; extern crate tokio_retry; pub extern crate tokio_timer; pub extern crate web3; /// Traits and types for all system components. pub mod components; /// Common data types used throughout The Graph. pub mod data; /// Utilities. pub mod util; /// Extension traits for external types. pub mod ext; /// A prelude that makes all system component traits and data types available. /// /// Add the following code to import all traits and data types listed below at once. /// /// ``` /// use graph::prelude::*; /// ``` pub mod prelude { pub use failure::{self, bail, err_msg, format_err, Error, Fail, SyncFailure}; pub use serde_derive::{Deserialize, Serialize}; pub use slog::{self, crit, debug, error, info, o, trace, warn, Logger}; pub use std::fmt::Debug; pub use std::iter::FromIterator; pub use std::sync::Arc; pub use tokio; pub use tokio::prelude::*; pub use crate::components::ethereum::{ BlockStream, BlockStreamBuilder, ChainHeadUpdate, ChainHeadUpdateListener, ChainHeadUpdateStream, EthereumAdapter, EthereumAdapterError, EthereumBlock, EthereumBlockData, EthereumBlockFilter, EthereumBlockPointer, EthereumBlockTriggerType, EthereumBlockWithCalls, EthereumBlockWithTriggers, EthereumCall, EthereumCallData, EthereumCallFilter, EthereumContractCall, EthereumContractCallError, EthereumEventData, EthereumLogFilter, EthereumNetworkIdentifier, EthereumTransactionData, EthereumTrigger, }; pub use crate::components::graphql::{ GraphQlRunner, QueryResultFuture, SubscriptionResultFuture, }; pub use crate::components::link_resolver::LinkResolver; pub use crate::components::server::admin::JsonRpcServer; pub use crate::components::server::query::GraphQLServer; pub use crate::components::server::subscription::SubscriptionServer; pub use crate::components::store::{ AttributeIndexDefinition, ChainStore, EntityChange, EntityChangeOperation, EntityFilter, EntityKey, EntityOperation, EntityOrder, EntityQuery, EntityRange, EventSource, Store, StoreError, StoreEvent, StoreEventStream, StoreEventStreamBox, SubgraphDeploymentStore, TransactionAbortError, SUBSCRIPTION_THROTTLE_INTERVAL, }; pub use crate::components::subgraph::{ BlockState, DataSourceLoader, DataSourceTemplateInfo, RuntimeHost, RuntimeHostBuilder, SubgraphAssignmentProvider, SubgraphInstance, SubgraphInstanceManager, SubgraphRegistrar, SubgraphVersionSwitchingMode, }; pub use crate::components::{EventConsumer, EventProducer}; pub use crate::data::graphql::{SerializableValue, TryFromValue, ValueMap}; pub use crate::data::query::{ Query, QueryError, QueryExecutionError, QueryResult, QueryVariables, }; pub use crate::data::schema::Schema; pub use crate::data::store::scalar::{BigDecimal, BigInt, BigIntSign}; pub use crate::data::store::{ AssignmentEvent, Attribute, Entity, NodeId, SubgraphEntityPair, SubgraphVersionSummary, Value, ValueType, }; pub use crate::data::subgraph::schema::{SubgraphDeploymentEntity, TypedEntity}; pub use crate::data::subgraph::{ BlockHandlerFilter, CreateSubgraphResult, DataSource, DataSourceTemplate, Link, MappingABI, MappingBlockHandler, MappingCallHandler, MappingEventHandler, SubgraphAssignmentProviderError, SubgraphAssignmentProviderEvent, SubgraphDeploymentId, SubgraphManifest, SubgraphManifestResolveError, SubgraphManifestValidationError, SubgraphName, SubgraphRegistrarError, }; pub use crate::data::subscription::{ QueryResultStream, Subscription, SubscriptionError, SubscriptionResult, }; pub use crate::ext::futures::{ CancelGuard, CancelHandle, CancelableError, FutureExtension, SharedCancelGuard, StreamExtension, }; pub use crate::util::futures::retry; }
39.230769
99
0.749673
e9b03920c9b6cea37ba9886abbe4ac13e8fce1bf
8,561
pub mod params; use std::hash::Hash; use std::{collections::HashMap, convert::TryFrom, fmt::Display}; use serde::{Deserialize, Deserializer, Serialize}; use self::params::{BoolParam, FloatParam, StringParam}; #[derive(Deserialize, Debug, Clone)] #[serde(untagged)] pub enum SerializedStringParam { Param(StringParam), Custom(String), } pub fn deserialize_string_param<'de, D>(deserializer: D) -> Result<StringParam, D::Error> where D: Deserializer<'de>, { match SerializedStringParam::deserialize(deserializer)? { SerializedStringParam::Param(string_param) => Ok(string_param), SerializedStringParam::Custom(str) => Ok(StringParam::Custom(str)), } } #[derive(Debug, Clone, Copy)] pub enum Module { Internal, Lightroom, } #[derive(Serialize, Debug)] #[serde(untagged)] pub enum Param { Float(FloatParam), Bool(BoolParam), String(StringParam), } impl Into<Param> for FloatParam { fn into(self) -> Param { Param::Float(self) } } impl Into<Param> for BoolParam { fn into(self) -> Param { Param::Bool(self) } } impl Into<Param> for StringParam { fn into(self) -> Param { Param::String(self) } } pub fn param_module<P>(param: &P) -> Module where P: Clone + Into<Param>, { let param: Param = param.clone().into(); match param { Param::String(StringParam::Profile) => Module::Internal, Param::String(StringParam::Custom(_)) => Module::Internal, _ => Module::Lightroom, } } #[derive(Deserialize, Debug)] #[serde(untagged)] pub enum StateValue { Float { parameter: FloatParam, value: Option<f64>, }, String { parameter: StringParam, value: Option<String>, }, Bool { parameter: BoolParam, value: Option<bool>, }, } pub trait SetMapEntry { type Key; type Value; fn set(&mut self, param: Self::Key, value: Option<Self::Value>) -> (); } impl<P, V> SetMapEntry for HashMap<P, V> where P: Eq + Hash, { type Key = P; type Value = V; fn set(&mut self, param: P, value: Option<V>) { match value { Some(v) => self.insert(param, v), None => self.remove(&param), }; () } } #[derive(Deserialize, Serialize, PartialEq, Debug, Clone)] #[serde(untagged)] pub enum Value { String(String), Float(f64), Boolean(bool), } impl Display for Value { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Value::String(val) => write!(f, "\"{}\"", val), Value::Boolean(val) => write!(f, "{}", val), Value::Float(val) => write!(f, "{}", val), } } } #[derive(Default)] pub struct State { pub bools: HashMap<BoolParam, bool>, pub floats: HashMap<FloatParam, f64>, pub strings: HashMap<StringParam, String>, } impl State { pub fn new() -> State { Default::default() } pub fn clear(&mut self) { self.bools.clear(); self.floats.clear(); self.strings.clear(); } pub fn update(&mut self, values: Vec<StateValue>) { for value in values { match value { StateValue::Float { parameter, value } => self.floats.set(parameter, value), StateValue::String { parameter, value } => self.strings.set(parameter, value), StateValue::Bool { parameter, value } => self.bools.set(parameter, value), } } } } #[derive(Deserialize, Debug, Clone)] #[serde(try_from = "String")] pub enum GeneralComparison { Equal, NotEqual, } impl Default for GeneralComparison { fn default() -> Self { GeneralComparison::Equal } } impl TryFrom<String> for GeneralComparison { type Error = String; fn try_from(value: String) -> Result<Self, Self::Error> { match value.as_str() { "==" => Ok(GeneralComparison::Equal), "!=" => Ok(GeneralComparison::NotEqual), _ => Err(format!("Unknown comparison: {}", value)), } } } #[derive(Deserialize, PartialEq, Debug, Clone)] #[serde(try_from = "String")] pub enum NumericComparison { Equal, NotEqual, LessThan, LessThanEqual, GreaterThan, GreaterThanEqual, } impl Default for NumericComparison { fn default() -> Self { NumericComparison::Equal } } impl TryFrom<String> for NumericComparison { type Error = String; fn try_from(value: String) -> Result<Self, Self::Error> { match value.as_str() { "==" => Ok(NumericComparison::Equal), "!=" => Ok(NumericComparison::NotEqual), "<" => Ok(NumericComparison::LessThan), "<=" => Ok(NumericComparison::LessThanEqual), ">" => Ok(NumericComparison::GreaterThan), ">=" => Ok(NumericComparison::GreaterThanEqual), _ => Err(format!("Unknown comparison: {}", value)), } } } #[derive(Deserialize, Debug, Clone)] #[serde(untagged)] #[serde(rename = "camelCase")] pub enum Condition { Any { any: Vec<Condition>, #[serde(default)] invert: bool, }, All { all: Vec<Condition>, #[serde(default)] invert: bool, }, NumericComparison { parameter: FloatParam, #[serde(default)] comparison: NumericComparison, value: Option<f64>, }, BoolComparison { parameter: BoolParam, #[serde(default)] comparison: GeneralComparison, value: Option<bool>, }, StringComparison { #[serde(deserialize_with = "deserialize_string_param")] parameter: StringParam, #[serde(default)] comparison: GeneralComparison, value: Option<String>, }, } impl Condition { pub fn matches(&self, state: &State) -> bool { match self { Condition::Any { any, invert } => { for condition in any { if condition.matches(state) { return !invert; } } *invert } Condition::All { all, invert } => { for condition in all { if !condition.matches(state) { return *invert; } } !invert } Condition::NumericComparison { parameter, comparison, value, } => { let state_value = match state.floats.get(parameter) { Some(val) => *val, None => { if value.is_some() { return comparison == &NumericComparison::NotEqual; } else { return comparison == &NumericComparison::Equal; } } }; let value = match value { Some(val) => *val, None => return comparison == &NumericComparison::NotEqual, }; match comparison { NumericComparison::Equal => state_value == value, NumericComparison::NotEqual => state_value != value, NumericComparison::LessThan => state_value < value, NumericComparison::LessThanEqual => state_value <= value, NumericComparison::GreaterThan => state_value > value, NumericComparison::GreaterThanEqual => state_value >= value, } } Condition::BoolComparison { parameter, comparison, value, } => { let state_value = state.bools.get(parameter).copied(); match comparison { GeneralComparison::Equal => &state_value == value, GeneralComparison::NotEqual => &state_value != value, } } Condition::StringComparison { parameter, comparison, value, } => { let state_value = state.strings.get(parameter).cloned(); match comparison { GeneralComparison::Equal => &state_value == value, GeneralComparison::NotEqual => &state_value != value, } } } } }
25.942424
94
0.528093
b97b2b33a7409b55c34fa552cefe143843057914
576
/* automatically generated by rust-bindgen 0.55.1 */ // Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Generated by src/lib/network/ethernet/scripts/bindgen.sh pub const ETH_FIFO_RX_OK: u32 = 1; pub const ETH_FIFO_TX_OK: u32 = 1; pub const ETH_FIFO_INVALID: u32 = 2; pub const ETH_FIFO_RX_TX: u32 = 4; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct eth_fifo_entry { pub offset: u32, pub length: u16, pub flags: u16, pub cookie: u64, }
27.428571
73
0.715278
ef78f2c6dc3947109d100c5be7c1ef0db46d6627
135,708
use { crate::{ block_error::BlockError, blockstore::Blockstore, blockstore_db::BlockstoreError, blockstore_meta::SlotMeta, leader_schedule_cache::LeaderScheduleCache, }, chrono_humanize::{Accuracy, HumanTime, Tense}, crossbeam_channel::Sender, itertools::Itertools, log::*, rand::{seq::SliceRandom, thread_rng}, rayon::{prelude::*, ThreadPool}, solana_entry::entry::{ self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers, }, solana_measure::measure::Measure, solana_metrics::{datapoint_error, inc_new_counter_debug}, solana_program_runtime::timings::ExecuteTimings, solana_rayon_threadlimit::get_thread_count, solana_runtime::{ accounts_db::{AccountShrinkThreshold, AccountsDbConfig}, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, bank::{ Bank, RentDebits, TransactionBalancesSet, TransactionExecutionResult, TransactionResults, }, bank_forks::BankForks, bank_utils, block_cost_limits::*, commitment::VOTE_THRESHOLD_SIZE, snapshot_config::SnapshotConfig, snapshot_package::{AccountsPackageSender, SnapshotType}, snapshot_utils::{self, BankFromArchiveTimings}, transaction_batch::TransactionBatch, vote_account::VoteAccount, vote_sender_types::ReplayVoteSender, }, solana_sdk::{ clock::{Slot, MAX_PROCESSING_AGE}, feature_set, genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, signature::{Keypair, Signature}, timing, transaction::{ Result, SanitizedTransaction, TransactionError, TransactionVerificationMode, VersionedTransaction, }, }, solana_transaction_status::token_balances::{ collect_token_balances, TransactionTokenBalancesSet, }, std::{ cell::RefCell, collections::{HashMap, HashSet}, path::PathBuf, result, sync::{Arc, RwLock}, time::{Duration, Instant}, }, thiserror::Error, }; // it tracks the block cost available capacity - number of compute-units allowed // by max blockl cost limit #[derive(Debug)] pub struct BlockCostCapacityMeter { pub capacity: u64, pub accumulated_cost: u64, } impl Default for BlockCostCapacityMeter { fn default() -> Self { BlockCostCapacityMeter::new(MAX_BLOCK_UNITS) } } impl BlockCostCapacityMeter { pub fn new(capacity_limit: u64) -> Self { Self { capacity: capacity_limit, accumulated_cost: 0_u64, } } // return the remaining capacity pub fn accumulate(&mut self, cost: u64) -> u64 { self.accumulated_cost += cost; self.capacity.saturating_sub(self.accumulated_cost) } } pub type BlockstoreProcessorInner = (BankForks, LeaderScheduleCache, Option<Slot>); pub type BlockstoreProcessorResult = result::Result<BlockstoreProcessorInner, BlockstoreProcessorError>; thread_local!(static PAR_THREAD_POOL: RefCell<ThreadPool> = RefCell::new(rayon::ThreadPoolBuilder::new() .num_threads(get_thread_count()) .thread_name(|ix| format!("blockstore_processor_{}", ix)) .build() .unwrap()) ); fn first_err(results: &[Result<()>]) -> Result<()> { for r in results { if r.is_err() { return r.clone(); } } Ok(()) } // Includes transaction signature for unit-testing fn get_first_error( batch: &TransactionBatch, fee_collection_results: Vec<Result<()>>, ) -> Option<(Result<()>, Signature)> { let mut first_err = None; for (result, transaction) in fee_collection_results .iter() .zip(batch.sanitized_transactions()) { if let Err(ref err) = result { if first_err.is_none() { first_err = Some((result.clone(), *transaction.signature())); } warn!( "Unexpected validator error: {:?}, transaction: {:?}", err, transaction ); datapoint_error!( "validator_process_entry_error", ( "error", format!("error: {:?}, transaction: {:?}", err, transaction), String ) ); } } first_err } fn aggregate_total_execution_units(execute_timings: &ExecuteTimings) -> u64 { let mut execute_cost_units: u64 = 0; for (program_id, timing) in &execute_timings.details.per_program_timings { if timing.count < 1 { continue; } execute_cost_units = execute_cost_units.saturating_add(timing.accumulated_units / timing.count as u64); trace!("aggregated execution cost of {:?} {:?}", program_id, timing); } execute_cost_units } fn execute_batch( batch: &TransactionBatch, bank: &Arc<Bank>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { let record_token_balances = transaction_status_sender.is_some(); let mut mint_decimals: HashMap<Pubkey, u8> = HashMap::new(); let pre_token_balances = if record_token_balances { collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; let pre_process_units: u64 = aggregate_total_execution_units(timings); let (tx_results, balances) = batch.bank().load_execute_and_commit_transactions( batch, MAX_PROCESSING_AGE, transaction_status_sender.is_some(), transaction_status_sender.is_some(), transaction_status_sender.is_some(), timings, ); if bank .feature_set .is_active(&feature_set::gate_large_block::id()) { let execution_cost_units = aggregate_total_execution_units(timings) - pre_process_units; let remaining_block_cost_cap = cost_capacity_meter .write() .unwrap() .accumulate(execution_cost_units); debug!( "bank {} executed a batch, number of transactions {}, total execute cu {}, remaining block cost cap {}", bank.slot(), batch.sanitized_transactions().len(), execution_cost_units, remaining_block_cost_cap, ); if remaining_block_cost_cap == 0_u64 { return Err(TransactionError::WouldExceedMaxBlockCostLimit); } } bank_utils::find_and_send_votes( batch.sanitized_transactions(), &tx_results, replay_vote_sender, ); let TransactionResults { fee_collection_results, execution_results, rent_debits, .. } = tx_results; if let Some(transaction_status_sender) = transaction_status_sender { let transactions = batch.sanitized_transactions().to_vec(); let post_token_balances = if record_token_balances { collect_token_balances(bank, batch, &mut mint_decimals) } else { vec![] }; let token_balances = TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances); transaction_status_sender.send_transaction_status_batch( bank.clone(), transactions, execution_results, balances, token_balances, rent_debits, ); } let first_err = get_first_error(batch, fee_collection_results); first_err.map(|(result, _)| result).unwrap_or(Ok(())) } fn execute_batches( bank: &Arc<Bank>, batches: &[TransactionBatch], entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { inc_new_counter_debug!("bank-par_execute_entries-count", batches.len()); let (results, new_timings): (Vec<Result<()>>, Vec<ExecuteTimings>) = PAR_THREAD_POOL.with(|thread_pool| { thread_pool.borrow().install(|| { batches .into_par_iter() .map(|batch| { let mut timings = ExecuteTimings::default(); let result = execute_batch( batch, bank, transaction_status_sender, replay_vote_sender, &mut timings, cost_capacity_meter.clone(), ); if let Some(entry_callback) = entry_callback { entry_callback(bank); } (result, timings) }) .unzip() }) }); timings.total_batches_len += batches.len(); timings.num_execute_batches += 1; for timing in new_timings { timings.accumulate(&timing); } first_err(&results) } /// Process an ordered list of entries in parallel /// 1. In order lock accounts for each entry while the lock succeeds, up to a Tick entry /// 2. Process the locked group in parallel /// 3. Register the `Tick` if it's available /// 4. Update the leader scheduler, goto 1 pub fn process_entries_for_tests( bank: &Arc<Bank>, entries: Vec<Entry>, randomize: bool, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, ) -> Result<()> { let verify_transaction = { let bank = bank.clone(); move |versioned_tx: VersionedTransaction| -> Result<SanitizedTransaction> { bank.verify_transaction(versioned_tx, TransactionVerificationMode::FullVerification) } }; let mut timings = ExecuteTimings::default(); let mut entries = entry::verify_transactions(entries, Arc::new(verify_transaction))?; let result = process_entries_with_callback( bank, &mut entries, randomize, None, transaction_status_sender, replay_vote_sender, &mut timings, Arc::new(RwLock::new(BlockCostCapacityMeter::default())), ); debug!("process_entries: {:?}", timings); result } // Note: If randomize is true this will shuffle entries' transactions in-place. fn process_entries_with_callback( bank: &Arc<Bank>, entries: &mut [EntryType], randomize: bool, entry_callback: Option<&ProcessCallback>, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timings: &mut ExecuteTimings, cost_capacity_meter: Arc<RwLock<BlockCostCapacityMeter>>, ) -> Result<()> { // accumulator for entries that can be processed in parallel let mut batches = vec![]; let mut tick_hashes = vec![]; let mut rng = thread_rng(); for entry in entries { match entry { EntryType::Tick(hash) => { // If it's a tick, save it for later tick_hashes.push(hash); if bank.is_block_boundary(bank.tick_height() + tick_hashes.len() as u64) { // If it's a tick that will cause a new blockhash to be created, // execute the group and register the tick execute_batches( bank, &batches, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter.clone(), )?; batches.clear(); for hash in &tick_hashes { bank.register_tick(hash); } tick_hashes.clear(); } } EntryType::Transactions(transactions) => { if randomize { transactions.shuffle(&mut rng); } loop { // try to lock the accounts let batch = bank.prepare_sanitized_batch(transactions); let first_lock_err = first_err(batch.lock_results()); // if locking worked if first_lock_err.is_ok() { batches.push(batch); // done with this entry break; } // else we failed to lock, 2 possible reasons if batches.is_empty() { // An entry has account lock conflicts with *itself*, which should not happen // if generated by a properly functioning leader datapoint_error!( "validator_process_entry_error", ( "error", format!( "Lock accounts error, entry conflicts with itself, txs: {:?}", transactions ), String ) ); // bail first_lock_err?; } else { // else we have an entry that conflicts with a prior entry // execute the current queue and try to process this entry again execute_batches( bank, &batches, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter.clone(), )?; batches.clear(); } } } } } execute_batches( bank, &batches, entry_callback, transaction_status_sender, replay_vote_sender, timings, cost_capacity_meter, )?; for hash in tick_hashes { bank.register_tick(hash); } Ok(()) } #[derive(Error, Debug)] pub enum BlockstoreProcessorError { #[error("failed to load entries")] FailedToLoadEntries(#[from] BlockstoreError), #[error("failed to load meta")] FailedToLoadMeta, #[error("invalid block")] InvalidBlock(#[from] BlockError), #[error("invalid transaction")] InvalidTransaction(#[from] TransactionError), #[error("no valid forks found")] NoValidForksFound, #[error("invalid hard fork")] InvalidHardFork(Slot), #[error("root bank with mismatched capitalization at {0}")] RootBankWithMismatchedCapitalization(Slot), } /// Callback for accessing bank state while processing the blockstore pub type ProcessCallback = Arc<dyn Fn(&Bank) + Sync + Send>; #[derive(Default, Clone)] pub struct ProcessOptions { pub bpf_jit: bool, pub poh_verify: bool, pub full_leader_cache: bool, pub dev_halt_at_slot: Option<Slot>, pub entry_callback: Option<ProcessCallback>, pub override_num_threads: Option<usize>, pub new_hard_forks: Option<Vec<Slot>>, pub debug_keys: Option<Arc<HashSet<Pubkey>>>, pub account_indexes: AccountSecondaryIndexes, pub accounts_db_caching_enabled: bool, pub limit_load_slot_count_from_snapshot: Option<usize>, pub allow_dead_slots: bool, pub accounts_db_test_hash_calculation: bool, pub accounts_db_skip_shrink: bool, pub accounts_db_config: Option<AccountsDbConfig>, pub verify_index: bool, pub shrink_ratio: AccountShrinkThreshold, } pub fn process_blockstore( genesis_config: &GenesisConfig, blockstore: &Blockstore, account_paths: Vec<PathBuf>, opts: ProcessOptions, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, accounts_update_notifier: Option<AccountsUpdateNotifier>, ) -> BlockstoreProcessorResult { if let Some(num_threads) = opts.override_num_threads { PAR_THREAD_POOL.with(|pool| { *pool.borrow_mut() = rayon::ThreadPoolBuilder::new() .num_threads(num_threads) .build() .unwrap() }); } // Setup bank for slot 0 let bank0 = Bank::new_with_paths( genesis_config, account_paths, opts.debug_keys.clone(), Some(&crate::builtins::get(opts.bpf_jit)), opts.account_indexes.clone(), opts.accounts_db_caching_enabled, opts.shrink_ratio, false, opts.accounts_db_config.clone(), accounts_update_notifier, ); let bank0 = Arc::new(bank0); info!("processing ledger for slot 0..."); let recyclers = VerifyRecyclers::default(); process_bank_0( &bank0, blockstore, &opts, &recyclers, cache_block_meta_sender, ); do_process_blockstore_from_root( blockstore, bank0, &opts, &recyclers, None, cache_block_meta_sender, snapshot_config, accounts_package_sender, BankFromArchiveTimings::default(), None, ) } /// Process blockstore from a known root bank #[allow(clippy::too_many_arguments)] pub(crate) fn process_blockstore_from_root( blockstore: &Blockstore, bank: Bank, opts: &ProcessOptions, recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, timings: BankFromArchiveTimings, last_full_snapshot_slot: Slot, ) -> BlockstoreProcessorResult { do_process_blockstore_from_root( blockstore, Arc::new(bank), opts, recyclers, transaction_status_sender, cache_block_meta_sender, snapshot_config, accounts_package_sender, timings, Some(last_full_snapshot_slot), ) } #[allow(clippy::too_many_arguments)] fn do_process_blockstore_from_root( blockstore: &Blockstore, bank: Arc<Bank>, opts: &ProcessOptions, recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, timings: BankFromArchiveTimings, mut last_full_snapshot_slot: Option<Slot>, ) -> BlockstoreProcessorResult { info!("processing ledger from slot {}...", bank.slot()); // Starting slot must be a root, and thus has no parents assert!(bank.parent().is_none()); let start_slot = bank.slot(); let now = Instant::now(); let mut root = start_slot; if let Some(ref new_hard_forks) = opts.new_hard_forks { let hard_forks = bank.hard_forks(); for hard_fork_slot in new_hard_forks.iter() { if *hard_fork_slot > start_slot { hard_forks.write().unwrap().register(*hard_fork_slot); } else { warn!( "Hard fork at {} ignored, --hard-fork option can be removed.", hard_fork_slot ); } } } // ensure start_slot is rooted for correct replay if blockstore.is_primary_access() { blockstore .set_roots(std::iter::once(&start_slot)) .expect("Couldn't set root slot on startup"); } else { assert!(blockstore.is_root(start_slot), "starting slot isn't root and can't update due to being secondary blockstore access: {}", start_slot); } if let Ok(metas) = blockstore.slot_meta_iterator(start_slot) { if let Some((slot, _meta)) = metas.last() { info!("ledger holds data through slot {}", slot); } } let mut timing = ExecuteTimings::default(); // Iterate and replay slots from blockstore starting from `start_slot` let (initial_forks, leader_schedule_cache) = { if let Some(meta) = blockstore .meta(start_slot) .unwrap_or_else(|_| panic!("Failed to get meta for slot {}", start_slot)) { let epoch_schedule = bank.epoch_schedule(); let mut leader_schedule_cache = LeaderScheduleCache::new(*epoch_schedule, &bank); if opts.full_leader_cache { leader_schedule_cache.set_max_schedules(std::usize::MAX); } let mut initial_forks = load_frozen_forks( &bank, &meta, blockstore, &mut leader_schedule_cache, &mut root, opts, recyclers, transaction_status_sender, cache_block_meta_sender, snapshot_config, accounts_package_sender, &mut timing, &mut last_full_snapshot_slot, )?; initial_forks.sort_by_key(|bank| bank.slot()); (initial_forks, leader_schedule_cache) } else { // If there's no meta for the input `start_slot`, then we started from a snapshot // and there's no point in processing the rest of blockstore and implies blockstore // should be empty past this point. let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank); (vec![bank], leader_schedule_cache) } }; if initial_forks.is_empty() { return Err(BlockstoreProcessorError::NoValidForksFound); } let bank_forks = BankForks::new_from_banks(&initial_forks, root); let processing_time = now.elapsed(); let debug_verify = opts.accounts_db_test_hash_calculation; let mut time_cap = Measure::start("capitalization"); // We might be promptly restarted after bad capitalization was detected while creating newer snapshot. // In that case, we're most likely restored from the last good snapshot and replayed up to this root. // So again check here for the bad capitalization to avoid to continue until the next snapshot creation. if !bank_forks .root_bank() .calculate_and_verify_capitalization(debug_verify) { return Err(BlockstoreProcessorError::RootBankWithMismatchedCapitalization(root)); } time_cap.stop(); datapoint_info!( "process_blockstore_from_root", ("total_time_us", processing_time.as_micros(), i64), ("frozen_banks", bank_forks.frozen_banks().len(), i64), ("slot", bank_forks.root(), i64), ("forks", initial_forks.len(), i64), ("calculate_capitalization_us", time_cap.as_us(), i64), ( "full_snapshot_untar_us", timings.full_snapshot_untar_us, i64 ), ( "incremental_snapshot_untar_us", timings.incremental_snapshot_untar_us, i64 ), ( "rebuild_bank_from_snapshots_us", timings.rebuild_bank_from_snapshots_us, i64 ), ( "verify_snapshot_bank_us", timings.verify_snapshot_bank_us, i64 ), ); info!("ledger processing timing: {:?}", timing); info!( "ledger processed in {}. root slot is {}, {} fork{} at {}, with {} frozen bank{}", HumanTime::from(chrono::Duration::from_std(processing_time).unwrap()) .to_text_en(Accuracy::Precise, Tense::Present), bank_forks.root(), initial_forks.len(), if initial_forks.len() > 1 { "s" } else { "" }, initial_forks .iter() .map(|b| b.slot().to_string()) .join(", "), bank_forks.frozen_banks().len(), if bank_forks.frozen_banks().len() > 1 { "s" } else { "" }, ); assert!(bank_forks.active_banks().is_empty()); Ok((bank_forks, leader_schedule_cache, last_full_snapshot_slot)) } /// Verify that a segment of entries has the correct number of ticks and hashes pub fn verify_ticks( bank: &Arc<Bank>, entries: &[Entry], slot_full: bool, tick_hash_count: &mut u64, ) -> std::result::Result<(), BlockError> { let next_bank_tick_height = bank.tick_height() + entries.tick_count(); let max_bank_tick_height = bank.max_tick_height(); if next_bank_tick_height > max_bank_tick_height { warn!("Too many entry ticks found in slot: {}", bank.slot()); return Err(BlockError::TooManyTicks); } if next_bank_tick_height < max_bank_tick_height && slot_full { info!("Too few entry ticks found in slot: {}", bank.slot()); return Err(BlockError::TooFewTicks); } if next_bank_tick_height == max_bank_tick_height { let has_trailing_entry = entries.last().map(|e| !e.is_tick()).unwrap_or_default(); if has_trailing_entry { warn!("Slot: {} did not end with a tick entry", bank.slot()); return Err(BlockError::TrailingEntry); } if !slot_full { warn!("Slot: {} was not marked full", bank.slot()); return Err(BlockError::InvalidLastTick); } } let hashes_per_tick = bank.hashes_per_tick().unwrap_or(0); if !entries.verify_tick_hash_count(tick_hash_count, hashes_per_tick) { warn!( "Tick with invalid number of hashes found in slot: {}", bank.slot() ); return Err(BlockError::InvalidTickHashCount); } Ok(()) } fn confirm_full_slot( blockstore: &Blockstore, bank: &Arc<Bank>, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, timing: &mut ExecuteTimings, ) -> result::Result<(), BlockstoreProcessorError> { let mut confirmation_timing = ConfirmationTiming::default(); let skip_verification = !opts.poh_verify; confirm_slot( blockstore, bank, &mut confirmation_timing, progress, skip_verification, transaction_status_sender, replay_vote_sender, opts.entry_callback.as_ref(), recyclers, opts.allow_dead_slots, )?; timing.accumulate(&confirmation_timing.execute_timings); if !bank.is_complete() { Err(BlockstoreProcessorError::InvalidBlock( BlockError::Incomplete, )) } else { Ok(()) } } pub struct ConfirmationTiming { pub started: Instant, pub replay_elapsed: u64, pub poh_verify_elapsed: u64, pub transaction_verify_elapsed: u64, pub fetch_elapsed: u64, pub fetch_fail_elapsed: u64, pub execute_timings: ExecuteTimings, } impl Default for ConfirmationTiming { fn default() -> Self { Self { started: Instant::now(), replay_elapsed: 0, poh_verify_elapsed: 0, transaction_verify_elapsed: 0, fetch_elapsed: 0, fetch_fail_elapsed: 0, execute_timings: ExecuteTimings::default(), } } } #[derive(Default)] pub struct ConfirmationProgress { pub last_entry: Hash, pub tick_hash_count: u64, pub num_shreds: u64, pub num_entries: usize, pub num_txs: usize, } impl ConfirmationProgress { pub fn new(last_entry: Hash) -> Self { Self { last_entry, ..Self::default() } } } #[allow(clippy::too_many_arguments)] pub fn confirm_slot( blockstore: &Blockstore, bank: &Arc<Bank>, timing: &mut ConfirmationTiming, progress: &mut ConfirmationProgress, skip_verification: bool, transaction_status_sender: Option<&TransactionStatusSender>, replay_vote_sender: Option<&ReplayVoteSender>, entry_callback: Option<&ProcessCallback>, recyclers: &VerifyRecyclers, allow_dead_slots: bool, ) -> result::Result<(), BlockstoreProcessorError> { let slot = bank.slot(); let (entries, num_shreds, slot_full) = { let mut load_elapsed = Measure::start("load_elapsed"); let load_result = blockstore .get_slot_entries_with_shred_info(slot, progress.num_shreds, allow_dead_slots) .map_err(BlockstoreProcessorError::FailedToLoadEntries); load_elapsed.stop(); if load_result.is_err() { timing.fetch_fail_elapsed += load_elapsed.as_us(); } else { timing.fetch_elapsed += load_elapsed.as_us(); } load_result }?; let num_entries = entries.len(); let num_txs = entries.iter().map(|e| e.transactions.len()).sum::<usize>(); trace!( "Fetched entries for slot {}, num_entries: {}, num_shreds: {}, num_txs: {}, slot_full: {}", slot, num_entries, num_shreds, num_txs, slot_full, ); if !skip_verification { let tick_hash_count = &mut progress.tick_hash_count; verify_ticks(bank, &entries, slot_full, tick_hash_count).map_err(|err| { warn!( "{:#?}, slot: {}, entry len: {}, tick_height: {}, last entry: {}, last_blockhash: {}, shred_index: {}, slot_full: {}", err, slot, num_entries, bank.tick_height(), progress.last_entry, bank.last_blockhash(), num_shreds, slot_full, ); err })?; } let last_entry_hash = entries.last().map(|e| e.hash); let verifier = if !skip_verification { datapoint_debug!("verify-batch-size", ("size", num_entries as i64, i64)); let entry_state = entries.start_verify(&progress.last_entry, recyclers.clone()); if entry_state.status() == EntryVerificationStatus::Failure { warn!("Ledger proof of history failed at slot: {}", slot); return Err(BlockError::InvalidEntryHash.into()); } Some(entry_state) } else { None }; let verify_transaction = { let bank = bank.clone(); move |versioned_tx: VersionedTransaction, verification_mode: TransactionVerificationMode| -> Result<SanitizedTransaction> { bank.verify_transaction(versioned_tx, verification_mode) } }; let check_start = Instant::now(); let check_result = entry::start_verify_transactions( entries, skip_verification, recyclers.clone(), Arc::new(verify_transaction), ); let transaction_cpu_duration_us = timing::duration_as_us(&check_start.elapsed()); match check_result { Ok(mut check_result) => { let entries = check_result.entries(); assert!(entries.is_some()); let mut replay_elapsed = Measure::start("replay_elapsed"); let mut execute_timings = ExecuteTimings::default(); let cost_capacity_meter = Arc::new(RwLock::new(BlockCostCapacityMeter::default())); // Note: This will shuffle entries' transactions in-place. let process_result = process_entries_with_callback( bank, &mut entries.unwrap(), true, // shuffle transactions. entry_callback, transaction_status_sender, replay_vote_sender, &mut execute_timings, cost_capacity_meter, ) .map_err(BlockstoreProcessorError::from); replay_elapsed.stop(); timing.replay_elapsed += replay_elapsed.as_us(); timing.execute_timings.accumulate(&execute_timings); // If running signature verification on the GPU, wait for that // computation to finish, and get the result of it. If we did the // signature verification on the CPU, this just returns the // already-computed result produced in start_verify_transactions. // Either way, check the result of the signature verification. if !check_result.finish_verify() { warn!("Ledger proof of history failed at slot: {}", bank.slot()); return Err(TransactionError::SignatureFailure.into()); } if let Some(mut verifier) = verifier { let verified = verifier.finish_verify(); timing.poh_verify_elapsed += verifier.poh_duration_us(); // The GPU Entry verification (if any) is kicked off right when the CPU-side // Entry verification finishes, so these times should be disjoint timing.transaction_verify_elapsed += transaction_cpu_duration_us + check_result.gpu_verify_duration(); if !verified { warn!("Ledger proof of history failed at slot: {}", bank.slot()); return Err(BlockError::InvalidEntryHash.into()); } } process_result?; progress.num_shreds += num_shreds; progress.num_entries += num_entries; progress.num_txs += num_txs; if let Some(last_entry_hash) = last_entry_hash { progress.last_entry = last_entry_hash; } Ok(()) } Err(err) => { warn!("Ledger proof of history failed at slot: {}", bank.slot()); Err(err.into()) } } } // Special handling required for processing the entries in slot 0 fn process_bank_0( bank0: &Arc<Bank>, blockstore: &Blockstore, opts: &ProcessOptions, recyclers: &VerifyRecyclers, cache_block_meta_sender: Option<&CacheBlockMetaSender>, ) { assert_eq!(bank0.slot(), 0); let mut progress = ConfirmationProgress::new(bank0.last_blockhash()); confirm_full_slot( blockstore, bank0, opts, recyclers, &mut progress, None, None, &mut ExecuteTimings::default(), ) .expect("processing for bank 0 must succeed"); bank0.freeze(); blockstore.insert_bank_hash(bank0.slot(), bank0.hash(), false); cache_block_meta(bank0, cache_block_meta_sender); } // Given a bank, add its children to the pending slots queue if those children slots are // complete fn process_next_slots( bank: &Arc<Bank>, meta: &SlotMeta, blockstore: &Blockstore, leader_schedule_cache: &LeaderScheduleCache, pending_slots: &mut Vec<(SlotMeta, Arc<Bank>, Hash)>, initial_forks: &mut HashMap<Slot, Arc<Bank>>, ) -> result::Result<(), BlockstoreProcessorError> { if let Some(parent) = bank.parent() { initial_forks.remove(&parent.slot()); } initial_forks.insert(bank.slot(), bank.clone()); if meta.next_slots.is_empty() { return Ok(()); } // This is a fork point if there are multiple children, create a new child bank for each fork for next_slot in &meta.next_slots { let next_meta = blockstore .meta(*next_slot) .map_err(|err| { warn!("Failed to load meta for slot {}: {:?}", next_slot, err); BlockstoreProcessorError::FailedToLoadMeta })? .unwrap(); // Only process full slots in blockstore_processor, replay_stage // handles any partials if next_meta.is_full() { let next_bank = Arc::new(Bank::new_from_parent( bank, &leader_schedule_cache .slot_leader_at(*next_slot, Some(bank)) .unwrap(), *next_slot, )); trace!( "New bank for slot {}, parent slot is {}", next_slot, bank.slot(), ); pending_slots.push((next_meta, next_bank, bank.last_blockhash())); } } // Reverse sort by slot, so the next slot to be processed can be popped pending_slots.sort_by(|a, b| b.1.slot().cmp(&a.1.slot())); Ok(()) } // Iterate through blockstore processing slots starting from the root slot pointed to by the // given `meta` and return a vector of frozen bank forks #[allow(clippy::too_many_arguments)] fn load_frozen_forks( root_bank: &Arc<Bank>, root_meta: &SlotMeta, blockstore: &Blockstore, leader_schedule_cache: &mut LeaderScheduleCache, root: &mut Slot, opts: &ProcessOptions, recyclers: &VerifyRecyclers, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, snapshot_config: Option<&SnapshotConfig>, accounts_package_sender: AccountsPackageSender, timing: &mut ExecuteTimings, last_full_snapshot_slot: &mut Option<Slot>, ) -> result::Result<Vec<Arc<Bank>>, BlockstoreProcessorError> { let mut initial_forks = HashMap::new(); let mut all_banks = HashMap::new(); let mut last_status_report = Instant::now(); let mut last_free = Instant::now(); let mut pending_slots = vec![]; let mut last_root = root_bank.slot(); let mut slots_elapsed = 0; let mut txs = 0; let blockstore_max_root = blockstore.max_root(); let max_root = std::cmp::max(root_bank.slot(), blockstore_max_root); info!( "load_frozen_forks() latest root from blockstore: {}, max_root: {}", blockstore_max_root, max_root, ); process_next_slots( root_bank, root_meta, blockstore, leader_schedule_cache, &mut pending_slots, &mut initial_forks, )?; let dev_halt_at_slot = opts.dev_halt_at_slot.unwrap_or(std::u64::MAX); if root_bank.slot() != dev_halt_at_slot { while !pending_slots.is_empty() { timing.details.per_program_timings.clear(); let (meta, bank, last_entry_hash) = pending_slots.pop().unwrap(); let slot = bank.slot(); if last_status_report.elapsed() > Duration::from_secs(2) { let secs = last_status_report.elapsed().as_secs() as f32; last_status_report = Instant::now(); info!( "processing ledger: slot={}, last root slot={} slots={} slots/s={:?} txs/s={}", slot, last_root, slots_elapsed, slots_elapsed as f32 / secs, txs as f32 / secs, ); slots_elapsed = 0; txs = 0; } let mut progress = ConfirmationProgress::new(last_entry_hash); if process_single_slot( blockstore, &bank, opts, recyclers, &mut progress, transaction_status_sender, cache_block_meta_sender, None, timing, ) .is_err() { continue; } txs += progress.num_txs; // Block must be frozen by this point, otherwise `process_single_slot` would // have errored above assert!(bank.is_frozen()); all_banks.insert(bank.slot(), bank.clone()); // If we've reached the last known root in blockstore, start looking // for newer cluster confirmed roots let new_root_bank = { if *root >= max_root { supermajority_root_from_vote_accounts( bank.slot(), bank.total_epoch_stake(), &bank.vote_accounts(), ).and_then(|supermajority_root| { if supermajority_root > *root { // If there's a cluster confirmed root greater than our last // replayed root, then because the cluster confirmed root should // be descended from our last root, it must exist in `all_banks` let cluster_root_bank = all_banks.get(&supermajority_root).unwrap(); // cluster root must be a descendant of our root, otherwise something // is drastically wrong assert!(cluster_root_bank.ancestors.contains_key(root)); info!("blockstore processor found new cluster confirmed root: {}, observed in bank: {}", cluster_root_bank.slot(), bank.slot()); // Ensure cluster-confirmed root and parents are set as root in blockstore let mut rooted_slots = vec![]; let mut new_root_bank = cluster_root_bank.clone(); loop { if new_root_bank.slot() == *root { break; } // Found the last root in the chain, yay! assert!(new_root_bank.slot() > *root); rooted_slots.push((new_root_bank.slot(), new_root_bank.hash())); // As noted, the cluster confirmed root should be descended from // our last root; therefore parent should be set new_root_bank = new_root_bank.parent().unwrap(); } inc_new_counter_info!("load_frozen_forks-cluster-confirmed-root", rooted_slots.len()); blockstore.set_roots(rooted_slots.iter().map(|(slot, _hash)| slot)).expect("Blockstore::set_roots should succeed"); blockstore.set_duplicate_confirmed_slots_and_hashes(rooted_slots.into_iter()).expect("Blockstore::set_duplicate_confirmed should succeed"); Some(cluster_root_bank) } else { None } }) } else if blockstore.is_root(slot) { Some(&bank) } else { None } }; if let Some(new_root_bank) = new_root_bank { *root = new_root_bank.slot(); last_root = new_root_bank.slot(); leader_schedule_cache.set_root(new_root_bank); new_root_bank.squash(); if let Some(snapshot_config) = snapshot_config { let block_height = new_root_bank.block_height(); if snapshot_utils::should_take_full_snapshot( block_height, snapshot_config.full_snapshot_archive_interval_slots, ) { info!("Taking snapshot of new root bank that has crossed the full snapshot interval! slot: {}", *root); *last_full_snapshot_slot = Some(*root); new_root_bank.exhaustively_free_unused_resource(*last_full_snapshot_slot); last_free = Instant::now(); new_root_bank.update_accounts_hash_with_index_option( snapshot_config.accounts_hash_use_index, snapshot_config.accounts_hash_debug_verify, Some(new_root_bank.epoch_schedule().slots_per_epoch), false, ); snapshot_utils::snapshot_bank( new_root_bank, new_root_bank.src.slot_deltas(&new_root_bank.src.roots()), &accounts_package_sender, &snapshot_config.bank_snapshots_dir, &snapshot_config.snapshot_archives_dir, snapshot_config.snapshot_version, snapshot_config.archive_format, None, Some(SnapshotType::FullSnapshot), ) .expect("Failed to snapshot bank while loading frozen banks"); trace!( "took bank snapshot for new root bank, block height: {}, slot: {}", block_height, *root ); } } if last_free.elapsed() > Duration::from_secs(10) { // Must be called after `squash()`, so that AccountsDb knows what // the roots are for the cache flushing in exhaustively_free_unused_resource(). // This could take few secs; so update last_free later new_root_bank.exhaustively_free_unused_resource(*last_full_snapshot_slot); last_free = Instant::now(); } // Filter out all non descendants of the new root pending_slots .retain(|(_, pending_bank, _)| pending_bank.ancestors.contains_key(root)); initial_forks.retain(|_, fork_tip_bank| fork_tip_bank.ancestors.contains_key(root)); all_banks.retain(|_, bank| bank.ancestors.contains_key(root)); } slots_elapsed += 1; trace!( "Bank for {}slot {} is complete", if last_root == slot { "root " } else { "" }, slot, ); process_next_slots( &bank, &meta, blockstore, leader_schedule_cache, &mut pending_slots, &mut initial_forks, )?; if slot >= dev_halt_at_slot { break; } } } Ok(initial_forks.values().cloned().collect::<Vec<_>>()) } // `roots` is sorted largest to smallest by root slot fn supermajority_root(roots: &[(Slot, u64)], total_epoch_stake: u64) -> Option<Slot> { if roots.is_empty() { return None; } // Find latest root let mut total = 0; let mut prev_root = roots[0].0; for (root, stake) in roots.iter() { assert!(*root <= prev_root); total += stake; if total as f64 / total_epoch_stake as f64 > VOTE_THRESHOLD_SIZE { return Some(*root); } prev_root = *root; } None } fn supermajority_root_from_vote_accounts( bank_slot: Slot, total_epoch_stake: u64, vote_accounts: &HashMap<Pubkey, (/*stake:*/ u64, VoteAccount)>, ) -> Option<Slot> { let mut roots_stakes: Vec<(Slot, u64)> = vote_accounts .iter() .filter_map(|(key, (stake, account))| { if *stake == 0 { return None; } match account.vote_state().as_ref() { Err(_) => { warn!( "Unable to get vote_state from account {} in bank: {}", key, bank_slot ); None } Ok(vote_state) => Some((vote_state.root_slot?, *stake)), } }) .collect(); // Sort from greatest to smallest slot roots_stakes.sort_unstable_by(|a, b| a.0.cmp(&b.0).reverse()); // Find latest root supermajority_root(&roots_stakes, total_epoch_stake) } // Processes and replays the contents of a single slot, returns Error // if failed to play the slot fn process_single_slot( blockstore: &Blockstore, bank: &Arc<Bank>, opts: &ProcessOptions, recyclers: &VerifyRecyclers, progress: &mut ConfirmationProgress, transaction_status_sender: Option<&TransactionStatusSender>, cache_block_meta_sender: Option<&CacheBlockMetaSender>, replay_vote_sender: Option<&ReplayVoteSender>, timing: &mut ExecuteTimings, ) -> result::Result<(), BlockstoreProcessorError> { // Mark corrupt slots as dead so validators don't replay this slot and // see AlreadyProcessed errors later in ReplayStage confirm_full_slot(blockstore, bank, opts, recyclers, progress, transaction_status_sender, replay_vote_sender, timing).map_err(|err| { let slot = bank.slot(); warn!("slot {} failed to verify: {}", slot, err); if blockstore.is_primary_access() { blockstore .set_dead_slot(slot) .expect("Failed to mark slot as dead in blockstore"); } else { assert!(blockstore.is_dead(slot), "Failed slot isn't dead and can't update due to being secondary blockstore access: {}", slot); } err })?; bank.freeze(); // all banks handled by this routine are created from complete slots blockstore.insert_bank_hash(bank.slot(), bank.hash(), false); cache_block_meta(bank, cache_block_meta_sender); Ok(()) } #[allow(clippy::large_enum_variant)] pub enum TransactionStatusMessage { Batch(TransactionStatusBatch), Freeze(Slot), } pub struct TransactionStatusBatch { pub bank: Arc<Bank>, pub transactions: Vec<SanitizedTransaction>, pub execution_results: Vec<TransactionExecutionResult>, pub balances: TransactionBalancesSet, pub token_balances: TransactionTokenBalancesSet, pub rent_debits: Vec<RentDebits>, } #[derive(Clone)] pub struct TransactionStatusSender { pub sender: Sender<TransactionStatusMessage>, pub enable_cpi_and_log_storage: bool, } impl TransactionStatusSender { pub fn send_transaction_status_batch( &self, bank: Arc<Bank>, transactions: Vec<SanitizedTransaction>, mut execution_results: Vec<TransactionExecutionResult>, balances: TransactionBalancesSet, token_balances: TransactionTokenBalancesSet, rent_debits: Vec<RentDebits>, ) { let slot = bank.slot(); if !self.enable_cpi_and_log_storage { execution_results.iter_mut().for_each(|execution_result| { if let TransactionExecutionResult::Executed(details) = execution_result { details.log_messages.take(); details.inner_instructions.take(); } }); } if let Err(e) = self .sender .send(TransactionStatusMessage::Batch(TransactionStatusBatch { bank, transactions, execution_results, balances, token_balances, rent_debits, })) { trace!( "Slot {} transaction_status send batch failed: {:?}", slot, e ); } } pub fn send_transaction_status_freeze_message(&self, bank: &Arc<Bank>) { let slot = bank.slot(); if let Err(e) = self.sender.send(TransactionStatusMessage::Freeze(slot)) { trace!( "Slot {} transaction_status send freeze message failed: {:?}", slot, e ); } } } pub type CacheBlockMetaSender = Sender<Arc<Bank>>; pub fn cache_block_meta(bank: &Arc<Bank>, cache_block_meta_sender: Option<&CacheBlockMetaSender>) { if let Some(cache_block_meta_sender) = cache_block_meta_sender { cache_block_meta_sender .send(bank.clone()) .unwrap_or_else(|err| warn!("cache_block_meta_sender failed: {:?}", err)); } } // used for tests only pub fn fill_blockstore_slot_with_ticks( blockstore: &Blockstore, ticks_per_slot: u64, slot: u64, parent_slot: u64, last_entry_hash: Hash, ) -> Hash { // Only slot 0 can be equal to the parent_slot assert!(slot.saturating_sub(1) >= parent_slot); let num_slots = (slot - parent_slot).max(1); let entries = create_ticks(num_slots * ticks_per_slot, 0, last_entry_hash); let last_entry_hash = entries.last().unwrap().hash; blockstore .write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ) .unwrap(); last_entry_hash } #[cfg(test)] pub mod tests { use { super::*, crate::genesis_utils::{ create_genesis_config, create_genesis_config_with_leader, GenesisConfigInfo, }, crossbeam_channel::unbounded, matches::assert_matches, rand::{thread_rng, Rng}, solana_entry::entry::{create_ticks, next_entry, next_entry_mut}, solana_runtime::genesis_utils::{ self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, hash::Hash, pubkey::Pubkey, signature::{Keypair, Signer}, system_instruction::SystemError, system_transaction, transaction::{Transaction, TransactionError}, }, solana_vote_program::{ self, vote_state::{VoteState, VoteStateVersions, MAX_LOCKOUT_HISTORY}, vote_transaction, }, std::{collections::BTreeSet, sync::RwLock}, tempfile::TempDir, trees::tr, }; fn test_process_blockstore( genesis_config: &GenesisConfig, blockstore: &Blockstore, opts: ProcessOptions, ) -> BlockstoreProcessorInner { let (accounts_package_sender, _) = unbounded(); process_blockstore( genesis_config, blockstore, Vec::new(), opts, None, None, accounts_package_sender, None, ) .unwrap() } #[test] fn test_process_blockstore_with_missing_hashes() { solana_logger::setup(); let hashes_per_tick = 2; let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(10_000); genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let parent_slot = 0; let slot = 1; let entries = create_ticks(ticks_per_slot, hashes_per_tick - 1, blockhash); assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); let (bank_forks, ..) = test_process_blockstore( &genesis_config, &blockstore, ProcessOptions { poh_verify: true, ..ProcessOptions::default() }, ); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); } #[test] fn test_process_blockstore_with_invalid_slot_tick_count() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Write slot 1 with one tick missing let parent_slot = 0; let slot = 1; let entries = create_ticks(ticks_per_slot - 1, 0, blockhash); assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); // Should return slot 0, the last slot on the fork that is valid let (bank_forks, ..) = test_process_blockstore( &genesis_config, &blockstore, ProcessOptions { poh_verify: true, ..ProcessOptions::default() }, ); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); // Write slot 2 fully let _last_slot2_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash); let (bank_forks, ..) = test_process_blockstore( &genesis_config, &blockstore, ProcessOptions { poh_verify: true, ..ProcessOptions::default() }, ); // One valid fork, one bad fork. process_blockstore() should only return the valid fork assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 2]); assert_eq!(bank_forks.working_bank().slot(), 2); assert_eq!(bank_forks.root(), 0); } #[test] fn test_process_blockstore_with_slot_with_trailing_entry() { solana_logger::setup(); let GenesisConfigInfo { mint_keypair, genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let mut entries = create_ticks(ticks_per_slot, 0, blockhash); let trailing_entry = { let keypair = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash); next_entry(&blockhash, 1, vec![tx]) }; entries.push(trailing_entry); // Tricks blockstore into writing the trailing entry by lying that there is one more tick // per slot. let parent_slot = 0; let slot = 1; assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot + 1, Some(parent_slot), true, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); } #[test] fn test_process_blockstore_with_incomplete_slot() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; /* Build a blockstore in the ledger with the following fork structure: slot 0 (all ticks) | slot 1 (all ticks but one) | slot 2 (all ticks) where slot 1 is incomplete (missing 1 tick at the end) */ // Create a new ledger with slot 0 full of ticks let (ledger_path, mut blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Write slot 1 // slot 1, points at slot 0. Missing one tick { let parent_slot = 0; let slot = 1; let mut entries = create_ticks(ticks_per_slot, 0, blockhash); blockhash = entries.last().unwrap().hash; // throw away last one entries.pop(); assert_matches!( blockstore.write_entries( slot, 0, 0, ticks_per_slot, Some(parent_slot), false, &Arc::new(Keypair::new()), entries, 0, ), Ok(_) ); } // slot 2, points at slot 1 fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, blockhash); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); // slot 1 isn't "full", we stop at slot zero /* Add a complete slot such that the store looks like: slot 0 (all ticks) / \ slot 1 (all ticks but one) slot 3 (all ticks) | slot 2 (all ticks) */ let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 0, blockhash); // Slot 0 should not show up in the ending bank_forks_info let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // slot 1 isn't "full", we stop at slot zero assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 3]); } #[test] fn test_process_blockstore_with_two_forks_and_squash() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let mut last_entry_hash = blockhash; /* Build a blockstore in the ledger with the following fork structure: slot 0 | slot 1 / \ slot 2 | / | slot 3 | | slot 4 <-- set_root(true) */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Fork 1, ending at slot 3 let last_slot1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash); last_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 2, 1, last_slot1_entry_hash, ); let last_fork1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash); // Fork 2, ending at slot 4 let last_fork2_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 4, 1, last_slot1_entry_hash, ); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); blockstore.set_roots(vec![0, 1, 4].iter()).unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // One fork, other one is ignored b/c not a descendant of the root assert_eq!(frozen_bank_slots(&bank_forks), vec![4]); assert!(&bank_forks[4] .parents() .iter() .map(|bank| bank.slot()) .next() .is_none()); // Ensure bank_forks holds the right banks verify_fork_infos(&bank_forks); assert_eq!(bank_forks.root(), 4); } #[test] fn test_process_blockstore_with_two_forks() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let mut last_entry_hash = blockhash; /* Build a blockstore in the ledger with the following fork structure: slot 0 | slot 1 <-- set_root(true) / \ slot 2 | / | slot 3 | | slot 4 */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Fork 1, ending at slot 3 let last_slot1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, last_entry_hash); last_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 2, 1, last_slot1_entry_hash, ); let last_fork1_entry_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 2, last_entry_hash); // Fork 2, ending at slot 4 let last_fork2_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, 4, 1, last_slot1_entry_hash, ); info!("last_fork1_entry.hash: {:?}", last_fork1_entry_hash); info!("last_fork2_entry.hash: {:?}", last_fork2_entry_hash); blockstore.set_roots(vec![0, 1].iter()).unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![1, 2, 3, 4]); assert_eq!(bank_forks.working_bank().slot(), 4); assert_eq!(bank_forks.root(), 1); assert_eq!( &bank_forks[3] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[2, 1] ); assert_eq!( &bank_forks[4] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1] ); assert_eq!(bank_forks.root(), 1); // Ensure bank_forks holds the right banks verify_fork_infos(&bank_forks); } #[test] fn test_process_blockstore_with_dead_slot() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); /* slot 0 | slot 1 / \ / \ slot 2 (dead) \ \ slot 3 */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let slot1_blockhash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash); blockstore.set_dead_slot(2).unwrap(); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, ProcessOptions::default()); assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 3]); assert_eq!(bank_forks.working_bank().slot(), 3); assert_eq!( &bank_forks[3] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1, 0] ); verify_fork_infos(&bank_forks); } #[test] fn test_process_blockstore_with_dead_child() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); /* slot 0 | slot 1 / \ / \ slot 2 \ / \ slot 4 (dead) slot 3 */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let slot1_blockhash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); let slot2_blockhash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 1, slot1_blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 4, 2, slot2_blockhash); blockstore.set_dead_slot(4).unwrap(); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 3, 1, slot1_blockhash); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, ProcessOptions::default()); // Should see the parent of the dead child assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1, 2, 3]); assert_eq!(bank_forks.working_bank().slot(), 3); assert_eq!( &bank_forks[3] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1, 0] ); assert_eq!( &bank_forks[2] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[1, 0] ); assert_eq!(bank_forks.working_bank().slot(), 3); verify_fork_infos(&bank_forks); } #[test] fn test_root_with_all_dead_children() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); /* slot 0 / \ / \ slot 1 (dead) slot 2 (dead) */ let blockstore = Blockstore::open(ledger_path.path()).unwrap(); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 1, 0, blockhash); fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, 2, 0, blockhash); blockstore.set_dead_slot(1).unwrap(); blockstore.set_dead_slot(2).unwrap(); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, ProcessOptions::default()); // Should see only the parent of the dead children assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); verify_fork_infos(&bank_forks); } #[test] fn test_process_blockstore_epoch_boundary_root() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let ticks_per_slot = genesis_config.ticks_per_slot; // Create a new ledger with slot 0 full of ticks let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let mut last_entry_hash = blockhash; let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Let `last_slot` be the number of slots in the first two epochs let epoch_schedule = get_epoch_schedule(&genesis_config, Vec::new()); let last_slot = epoch_schedule.get_last_slot_in_epoch(1); // Create a single chain of slots with all indexes in the range [0, v + 1] for i in 1..=last_slot + 1 { last_entry_hash = fill_blockstore_slot_with_ticks( &blockstore, ticks_per_slot, i, i - 1, last_entry_hash, ); } // Set a root on the last slot of the last confirmed epoch let rooted_slots: Vec<Slot> = (0..=last_slot).collect(); blockstore.set_roots(rooted_slots.iter()).unwrap(); // Set a root on the next slot of the confirmed epoch blockstore .set_roots(std::iter::once(&(last_slot + 1))) .unwrap(); // Check that we can properly restart the ledger / leader scheduler doesn't fail let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // There is one fork, head is last_slot + 1 assert_eq!(frozen_bank_slots(&bank_forks), vec![last_slot + 1]); // The latest root should have purged all its parents assert!(&bank_forks[last_slot + 1] .parents() .iter() .map(|bank| bank.slot()) .next() .is_none()); } #[test] fn test_first_err() { assert_eq!(first_err(&[Ok(())]), Ok(())); assert_eq!( first_err(&[Ok(()), Err(TransactionError::AlreadyProcessed)]), Err(TransactionError::AlreadyProcessed) ); assert_eq!( first_err(&[ Ok(()), Err(TransactionError::AlreadyProcessed), Err(TransactionError::AccountInUse) ]), Err(TransactionError::AlreadyProcessed) ); assert_eq!( first_err(&[ Ok(()), Err(TransactionError::AccountInUse), Err(TransactionError::AlreadyProcessed) ]), Err(TransactionError::AccountInUse) ); assert_eq!( first_err(&[ Err(TransactionError::AccountInUse), Ok(()), Err(TransactionError::AlreadyProcessed) ]), Err(TransactionError::AccountInUse) ); } #[test] fn test_process_empty_entry_is_registered() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(2); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair = Keypair::new(); let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash()); let tx = system_transaction::transfer( &mint_keypair, &keypair.pubkey(), 1, slot_entries.last().unwrap().hash, ); // First, ensure the TX is rejected because of the unregistered last ID assert_eq!( bank.process_transaction(&tx), Err(TransactionError::BlockhashNotFound) ); // Now ensure the TX is accepted despite pointing to the ID of an empty entry. process_entries_for_tests(&bank, slot_entries, true, None, None).unwrap(); assert_eq!(bank.process_transaction(&tx), Ok(())); } #[test] fn test_process_ledger_simple() { solana_logger::setup(); let leader_pubkey = solana_sdk::pubkey::new_rand(); let mint = 100; let hashes_per_tick = 10; let GenesisConfigInfo { mut genesis_config, mint_keypair, .. } = create_genesis_config_with_leader(mint, &leader_pubkey, 50); genesis_config.poh_config.hashes_per_tick = Some(hashes_per_tick); let (ledger_path, mut last_entry_hash) = create_new_tmp_ledger_auto_delete!(&genesis_config); debug!("ledger_path: {:?}", ledger_path); let deducted_from_mint = 3; let mut entries = vec![]; let blockhash = genesis_config.hash(); for _ in 0..deducted_from_mint { // Transfer one token from the mint to a random account let keypair = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, blockhash); let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]); entries.push(entry); // Add a second Transaction that will produce a // InstructionError<0, ResultWithNegativeLamports> error when processed let keypair2 = Keypair::new(); let tx = system_transaction::transfer(&mint_keypair, &keypair2.pubkey(), 101, blockhash); let entry = next_entry_mut(&mut last_entry_hash, 1, vec![tx]); entries.push(entry); } let remaining_hashes = hashes_per_tick - entries.len() as u64; let tick_entry = next_entry_mut(&mut last_entry_hash, remaining_hashes, vec![]); entries.push(tick_entry); // Fill up the rest of slot 1 with ticks entries.extend(create_ticks( genesis_config.ticks_per_slot - 1, genesis_config.poh_config.hashes_per_tick.unwrap(), last_entry_hash, )); let last_blockhash = entries.last().unwrap().hash; let blockstore = Blockstore::open(ledger_path.path()).unwrap(); blockstore .write_entries( 1, 0, 0, genesis_config.ticks_per_slot, None, true, &Arc::new(Keypair::new()), entries, 0, ) .unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0, 1]); assert_eq!(bank_forks.root(), 0); assert_eq!(bank_forks.working_bank().slot(), 1); let bank = bank_forks[1].clone(); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), mint - deducted_from_mint ); assert_eq!(bank.tick_height(), 2 * genesis_config.ticks_per_slot); assert_eq!(bank.last_blockhash(), last_blockhash); } #[test] fn test_process_ledger_with_one_tick_per_slot() { let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); genesis_config.ticks_per_slot = 1; let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(frozen_bank_slots(&bank_forks), vec![0]); let bank = bank_forks[0].clone(); assert_eq!(bank.tick_height(), 1); } #[test] fn test_process_ledger_options_override_threads() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let opts = ProcessOptions { override_num_threads: Some(1), accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; test_process_blockstore(&genesis_config, &blockstore, opts); PAR_THREAD_POOL.with(|pool| { assert_eq!(pool.borrow().current_num_threads(), 1); }); } #[test] fn test_process_ledger_options_full_leader_cache() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); let (ledger_path, _blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let opts = ProcessOptions { full_leader_cache: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (_bank_forks, leader_schedule, _) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(leader_schedule.max_schedules(), std::usize::MAX); } #[test] fn test_process_ledger_options_entry_callback() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let (ledger_path, last_entry_hash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let blockhash = genesis_config.hash(); let keypairs = [Keypair::new(), Keypair::new(), Keypair::new()]; let tx = system_transaction::transfer(&mint_keypair, &keypairs[0].pubkey(), 1, blockhash); let entry_1 = next_entry(&last_entry_hash, 1, vec![tx]); let tx = system_transaction::transfer(&mint_keypair, &keypairs[1].pubkey(), 1, blockhash); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); let mut entries = vec![entry_1, entry_2]; entries.extend(create_ticks( genesis_config.ticks_per_slot, 0, last_entry_hash, )); blockstore .write_entries( 1, 0, 0, genesis_config.ticks_per_slot, None, true, &Arc::new(Keypair::new()), entries, 0, ) .unwrap(); let callback_counter: Arc<RwLock<usize>> = Arc::default(); let entry_callback = { let counter = callback_counter.clone(); let pubkeys: Vec<Pubkey> = keypairs.iter().map(|k| k.pubkey()).collect(); Arc::new(move |bank: &Bank| { let mut counter = counter.write().unwrap(); assert_eq!(bank.get_balance(&pubkeys[*counter]), 1); assert_eq!(bank.get_balance(&pubkeys[*counter + 1]), 0); *counter += 1; }) }; let opts = ProcessOptions { override_num_threads: Some(1), entry_callback: Some(entry_callback), accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(*callback_counter.write().unwrap(), 2); } #[test] fn test_process_entries_tick() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); // ensure bank can process a tick assert_eq!(bank.tick_height(), 0); let tick = next_entry(&genesis_config.hash(), 1, vec![]); assert_eq!( process_entries_for_tests(&bank, vec![tick], true, None, None), Ok(()) ); assert_eq!(bank.tick_height(), 1); } #[test] fn test_process_entries_2_entries_collision() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let blockhash = bank.last_blockhash(); // ensure bank can process 2 entries that have a common account and no tick is registered let tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 2, bank.last_blockhash(), ); let entry_1 = next_entry(&blockhash, 1, vec![tx]); let tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 2, bank.last_blockhash(), ); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None), Ok(()) ); assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.last_blockhash(), blockhash); } #[test] fn test_process_entries_2_txes_collision() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); // fund: put 4 in each of 1 and 2 assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_)); // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 1, bank.last_blockhash(), )], ); let entry_2_to_3_mint_to_1 = next_entry( &entry_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair2, &keypair3.pubkey(), 2, bank.last_blockhash(), ), // should be fine system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 2, bank.last_blockhash(), ), // will collide ], ); assert_eq!( process_entries_for_tests( &bank, vec![entry_1_to_mint, entry_2_to_3_mint_to_1], false, None, None, ), Ok(()) ); assert_eq!(bank.get_balance(&keypair1.pubkey()), 1); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.get_balance(&keypair3.pubkey()), 2); } #[test] fn test_process_entries_2_txes_collision_and_error() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); // fund: put 4 in each of 1 and 2 assert_matches!(bank.transfer(4, &mint_keypair, &keypair1.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair4.pubkey()), Ok(_)); // construct an Entry whose 2nd transaction would cause a lock conflict with previous entry let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![ system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 1, bank.last_blockhash(), ), system_transaction::transfer( &keypair4, &keypair4.pubkey(), 1, Hash::default(), // Should cause a transaction failure with BlockhashNotFound ), ], ); let entry_2_to_3_mint_to_1 = next_entry( &entry_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair2, &keypair3.pubkey(), 2, bank.last_blockhash(), ), // should be fine system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 2, bank.last_blockhash(), ), // will collide ], ); assert!(process_entries_for_tests( &bank, vec![entry_1_to_mint.clone(), entry_2_to_3_mint_to_1.clone()], false, None, None, ) .is_err()); // First transaction in first entry succeeded, so keypair1 lost 1 lamport assert_eq!(bank.get_balance(&keypair1.pubkey()), 3); assert_eq!(bank.get_balance(&keypair2.pubkey()), 4); // Check all accounts are unlocked let txs1 = entry_1_to_mint.transactions; let txs2 = entry_2_to_3_mint_to_1.transactions; let batch1 = bank.prepare_entry_batch(txs1).unwrap(); for result in batch1.lock_results() { assert!(result.is_ok()); } // txs1 and txs2 have accounts that conflict, so we must drop txs1 first drop(batch1); let batch2 = bank.prepare_entry_batch(txs2).unwrap(); for result in batch2.lock_results() { assert!(result.is_ok()); } } #[test] fn test_process_entries_2nd_entry_collision_with_self_and_error() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); // fund: put some money in each of 1 and 2 assert_matches!(bank.transfer(5, &mint_keypair, &keypair1.pubkey()), Ok(_)); assert_matches!(bank.transfer(4, &mint_keypair, &keypair2.pubkey()), Ok(_)); // 3 entries: first has a transfer, 2nd has a conflict with 1st, 3rd has a conflict with itself let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 1, bank.last_blockhash(), )], ); // should now be: // keypair1=4 // keypair2=4 // keypair3=0 let entry_2_to_3_and_1_to_mint = next_entry( &entry_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair2, &keypair3.pubkey(), 2, bank.last_blockhash(), ), // should be fine system_transaction::transfer( &keypair1, &mint_keypair.pubkey(), 2, bank.last_blockhash(), ), // will collide with predecessor ], ); // should now be: // keypair1=2 // keypair2=2 // keypair3=2 let entry_conflict_itself = next_entry( &entry_2_to_3_and_1_to_mint.hash, 1, vec![ system_transaction::transfer( &keypair1, &keypair3.pubkey(), 1, bank.last_blockhash(), ), system_transaction::transfer( &keypair1, &keypair2.pubkey(), 1, bank.last_blockhash(), ), // should be fine ], ); // would now be: // keypair1=0 // keypair2=3 // keypair3=3 assert!(process_entries_for_tests( &bank, vec![ entry_1_to_mint, entry_2_to_3_and_1_to_mint, entry_conflict_itself, ], false, None, None, ) .is_err()); // last entry should have been aborted before par_execute_entries assert_eq!(bank.get_balance(&keypair1.pubkey()), 2); assert_eq!(bank.get_balance(&keypair2.pubkey()), 2); assert_eq!(bank.get_balance(&keypair3.pubkey()), 2); } #[test] fn test_process_entries_2_entries_par() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); //load accounts let tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); // ensure bank can process 2 entries that do not have a common account and no tick is registered let blockhash = bank.last_blockhash(); let tx = system_transaction::transfer(&keypair1, &keypair3.pubkey(), 1, bank.last_blockhash()); let entry_1 = next_entry(&blockhash, 1, vec![tx]); let tx = system_transaction::transfer(&keypair2, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&entry_1.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests(&bank, vec![entry_1, entry_2], true, None, None), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); assert_eq!(bank.last_blockhash(), blockhash); } #[test] fn test_process_entry_tx_random_execution_with_error() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; // large enough to scramble locks and results let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect(); // give everybody one lamport for keypair in &keypairs { bank.transfer(1, &mint_keypair, &keypair.pubkey()) .expect("funding failed"); } let mut hash = bank.last_blockhash(); let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) .map(|i| { let mut transactions = (0..NUM_TRANSFERS_PER_ENTRY) .map(|j| { system_transaction::transfer( &keypairs[i + j], &keypairs[i + j + NUM_TRANSFERS].pubkey(), 1, bank.last_blockhash(), ) }) .collect::<Vec<_>>(); transactions.push(system_transaction::create_account( &mint_keypair, &present_account_key, // puts a TX error in results bank.last_blockhash(), 1, 0, &solana_sdk::pubkey::new_rand(), )); next_entry_mut(&mut hash, 0, transactions) }) .collect(); assert_eq!( process_entries_for_tests(&bank, entries, true, None, None), Ok(()) ); } #[test] fn test_process_entry_tx_random_execution_no_error() { // entropy multiplier should be big enough to provide sufficient entropy // but small enough to not take too much time while executing the test. let entropy_multiplier: usize = 25; let initial_lamports = 100; // number of accounts need to be in multiple of 4 for correct // execution of the test. let num_accounts = entropy_multiplier * 4; let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let mut keypairs: Vec<Keypair> = vec![]; for _ in 0..num_accounts { let keypair = Keypair::new(); let create_account_tx = system_transaction::transfer( &mint_keypair, &keypair.pubkey(), 0, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&create_account_tx), Ok(())); assert_matches!( bank.transfer(initial_lamports, &mint_keypair, &keypair.pubkey()), Ok(_) ); keypairs.push(keypair); } let mut tx_vector: Vec<Transaction> = vec![]; for i in (0..num_accounts).step_by(4) { tx_vector.append(&mut vec![ system_transaction::transfer( &keypairs[i + 1], &keypairs[i].pubkey(), initial_lamports, bank.last_blockhash(), ), system_transaction::transfer( &keypairs[i + 3], &keypairs[i + 2].pubkey(), initial_lamports, bank.last_blockhash(), ), ]); } // Transfer lamports to each other let entry = next_entry(&bank.last_blockhash(), 1, tx_vector); assert_eq!( process_entries_for_tests(&bank, vec![entry], true, None, None), Ok(()) ); bank.squash(); // Even number keypair should have balance of 2 * initial_lamports and // odd number keypair should have balance of 0, which proves // that even in case of random order of execution, overall state remains // consistent. for (i, keypair) in keypairs.iter().enumerate() { if i % 2 == 0 { assert_eq!(bank.get_balance(&keypair.pubkey()), 2 * initial_lamports); } else { assert_eq!(bank.get_balance(&keypair.pubkey()), 0); } } } #[test] fn test_process_entries_2_entries_tick() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); let keypair4 = Keypair::new(); //load accounts let tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 1, bank.last_blockhash(), ); assert_eq!(bank.process_transaction(&tx), Ok(())); let blockhash = bank.last_blockhash(); while blockhash == bank.last_blockhash() { bank.register_tick(&Hash::default()); } // ensure bank can process 2 entries that do not have a common account and tick is registered let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, blockhash); let entry_1 = next_entry(&blockhash, 1, vec![tx]); let tick = next_entry(&entry_1.hash, 1, vec![]); let tx = system_transaction::transfer(&keypair1, &keypair4.pubkey(), 1, bank.last_blockhash()); let entry_2 = next_entry(&tick.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests( &bank, vec![entry_1, tick, entry_2.clone()], true, None, None ), Ok(()) ); assert_eq!(bank.get_balance(&keypair3.pubkey()), 1); assert_eq!(bank.get_balance(&keypair4.pubkey()), 1); // ensure that an error is returned for an empty account (keypair2) let tx = system_transaction::transfer(&keypair2, &keypair3.pubkey(), 1, bank.last_blockhash()); let entry_3 = next_entry(&entry_2.hash, 1, vec![tx]); assert_eq!( process_entries_for_tests(&bank, vec![entry_3], true, None, None), Err(TransactionError::AccountNotFound) ); } #[test] fn test_update_transaction_statuses() { // Make sure instruction errors still update the signature cache let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(11_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); assert_eq!(bank.get_balance(&pubkey), 1_000); assert_eq!( bank.transfer(10_001, &mint_keypair, &pubkey), Err(TransactionError::InstructionError( 0, SystemError::ResultWithNegativeLamports.into(), )) ); assert_eq!( bank.transfer(10_001, &mint_keypair, &pubkey), Err(TransactionError::AlreadyProcessed) ); // Make sure other errors don't update the signature cache let tx = system_transaction::transfer(&mint_keypair, &pubkey, 1000, Hash::default()); let signature = tx.signatures[0]; // Should fail with blockhash not found assert_eq!( bank.process_transaction(&tx).map(|_| signature), Err(TransactionError::BlockhashNotFound) ); // Should fail again with blockhash not found assert_eq!( bank.process_transaction(&tx).map(|_| signature), Err(TransactionError::BlockhashNotFound) ); } #[test] fn test_update_transaction_statuses_fail() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(11_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let success_tx = system_transaction::transfer( &mint_keypair, &keypair1.pubkey(), 1, bank.last_blockhash(), ); let fail_tx = system_transaction::transfer( &mint_keypair, &keypair2.pubkey(), 2, bank.last_blockhash(), ); let entry_1_to_mint = next_entry( &bank.last_blockhash(), 1, vec![ success_tx, fail_tx.clone(), // will collide ], ); assert_eq!( process_entries_for_tests(&bank, vec![entry_1_to_mint], false, None, None), Err(TransactionError::AccountInUse) ); // Should not see duplicate signature error assert_eq!(bank.process_transaction(&fail_tx), Ok(())); } #[test] fn test_halt_at_slot_starting_snapshot_root() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(123); // Create roots at slots 0, 1 let forks = tr(0) / tr(1); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); blockstore.add_tree( forks, false, true, genesis_config.ticks_per_slot, genesis_config.hash(), ); blockstore.set_roots(vec![0, 1].iter()).unwrap(); // Specify halting at slot 0 let opts = ProcessOptions { poh_verify: true, dev_halt_at_slot: Some(0), accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); // Should be able to fetch slot 0 because we specified halting at slot 0, even // if there is a greater root at slot 1. assert!(bank_forks.get(0).is_some()); } #[test] fn test_process_blockstore_from_root() { let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); let ticks_per_slot = 1; genesis_config.ticks_per_slot = ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); /* Build a blockstore in the ledger with the following fork structure: slot 0 (all ticks) | slot 1 (all ticks) | slot 2 (all ticks) | slot 3 (all ticks) -> root | slot 4 (all ticks) | slot 5 (all ticks) -> root | slot 6 (all ticks) */ let mut last_hash = blockhash; for i in 0..6 { last_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); } blockstore.set_roots(vec![3, 5].iter()).unwrap(); // Set up bank1 let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); process_bank_0(&bank0, &blockstore, &opts, &recyclers, None); let bank1 = Arc::new(Bank::new_from_parent(&bank0, &Pubkey::default(), 1)); confirm_full_slot( &blockstore, &bank1, &opts, &recyclers, &mut ConfirmationProgress::new(bank0.last_blockhash()), None, None, &mut ExecuteTimings::default(), ) .unwrap(); bank1.squash(); // Test process_blockstore_from_root() from slot 1 onwards let (accounts_package_sender, _) = unbounded(); let (bank_forks, ..) = do_process_blockstore_from_root( &blockstore, bank1, &opts, &recyclers, None, None, None, accounts_package_sender, BankFromArchiveTimings::default(), None, ) .unwrap(); assert_eq!(frozen_bank_slots(&bank_forks), vec![5, 6]); assert_eq!(bank_forks.working_bank().slot(), 6); assert_eq!(bank_forks.root(), 5); // Verify the parents of the head of the fork assert_eq!( &bank_forks[6] .parents() .iter() .map(|bank| bank.slot()) .collect::<Vec<_>>(), &[5] ); // Check that bank forks has the correct banks verify_fork_infos(&bank_forks); } /// Test that processing the blockstore is aware of incremental snapshots. When processing the /// blockstore from a root, like what happens when loading from a snapshot, there may be new /// roots that cross a full snapshot interval. In these cases, a bank snapshot must be taken, /// so that a full snapshot archive is created and available by the time the background /// services spin up. /// /// For this test, process enough roots to cross the full snapshot interval multiple times. /// Ensure afterwards that the snapshots were created. #[test] fn test_process_blockstore_from_root_with_snapshots() { solana_logger::setup(); let GenesisConfigInfo { mut genesis_config, .. } = create_genesis_config(123); let ticks_per_slot = 1; genesis_config.ticks_per_slot = ticks_per_slot; let (ledger_path, blockhash) = create_new_tmp_ledger_auto_delete!(&genesis_config); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); const ROOT_INTERVAL_SLOTS: Slot = 2; const FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = ROOT_INTERVAL_SLOTS * 5; const LAST_SLOT: Slot = FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS * 4; let mut last_hash = blockhash; for i in 1..=LAST_SLOT { last_hash = fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i, i - 1, last_hash); } let roots_to_set = (0..=LAST_SLOT) .step_by(ROOT_INTERVAL_SLOTS as usize) .collect_vec(); blockstore.set_roots(roots_to_set.iter()).unwrap(); // Set up bank1 let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let recyclers = VerifyRecyclers::default(); process_bank_0(&bank0, &blockstore, &opts, &recyclers, None); let slot_start_processing = 1; let bank = Arc::new(Bank::new_from_parent( &bank0, &Pubkey::default(), slot_start_processing, )); confirm_full_slot( &blockstore, &bank, &opts, &recyclers, &mut ConfirmationProgress::new(bank0.last_blockhash()), None, None, &mut ExecuteTimings::default(), ) .unwrap(); bank.squash(); let bank_snapshots_tempdir = TempDir::new().unwrap(); let snapshot_config = SnapshotConfig { full_snapshot_archive_interval_slots: FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, bank_snapshots_dir: bank_snapshots_tempdir.path().to_path_buf(), ..SnapshotConfig::default() }; let (accounts_package_sender, accounts_package_receiver) = unbounded(); do_process_blockstore_from_root( &blockstore, bank, &opts, &recyclers, None, None, Some(&snapshot_config), accounts_package_sender.clone(), BankFromArchiveTimings::default(), None, ) .unwrap(); // The `drop()` is necessary here in order to call `.iter()` on the channel below drop(accounts_package_sender); // Ensure all the AccountsPackages were created and sent to the AccountsPackageReceiver let received_accounts_package_slots = accounts_package_receiver .iter() .map(|accounts_package| accounts_package.slot) .collect::<Vec<_>>(); let expected_slots = (slot_start_processing..=LAST_SLOT) .filter(|slot| slot % FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS == 0) .collect::<Vec<_>>(); assert_eq!(received_accounts_package_slots, expected_slots); // Ensure all the bank snapshots were created let bank_snapshots = snapshot_utils::get_bank_snapshots(&bank_snapshots_tempdir); let mut bank_snapshot_slots = bank_snapshots .into_iter() .map(|bank_snapshot| bank_snapshot.slot) .collect::<Vec<_>>(); bank_snapshot_slots.sort_unstable(); assert_eq!(bank_snapshot_slots, expected_slots); } #[test] #[ignore] fn test_process_entries_stress() { // this test throws lots of rayon threads at process_entries() // finds bugs in very low-layer stuff solana_logger::setup(); let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000); let mut bank = Arc::new(Bank::new_for_tests(&genesis_config)); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; let keypairs: Vec<_> = (0..NUM_TRANSFERS * 2).map(|_| Keypair::new()).collect(); // give everybody one lamport for keypair in &keypairs { bank.transfer(1, &mint_keypair, &keypair.pubkey()) .expect("funding failed"); } let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let mut i = 0; let mut hash = bank.last_blockhash(); let mut root: Option<Arc<Bank>> = None; loop { let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) .map(|i| { next_entry_mut(&mut hash, 0, { let mut transactions = (i..i + NUM_TRANSFERS_PER_ENTRY) .map(|i| { system_transaction::transfer( &keypairs[i], &keypairs[i + NUM_TRANSFERS].pubkey(), 1, bank.last_blockhash(), ) }) .collect::<Vec<_>>(); transactions.push(system_transaction::create_account( &mint_keypair, &present_account_key, // puts a TX error in results bank.last_blockhash(), 100, 100, &solana_sdk::pubkey::new_rand(), )); transactions }) }) .collect(); info!("paying iteration {}", i); process_entries_for_tests(&bank, entries, true, None, None).expect("paying failed"); let entries: Vec<_> = (0..NUM_TRANSFERS) .step_by(NUM_TRANSFERS_PER_ENTRY) .map(|i| { next_entry_mut( &mut hash, 0, (i..i + NUM_TRANSFERS_PER_ENTRY) .map(|i| { system_transaction::transfer( &keypairs[i + NUM_TRANSFERS], &keypairs[i].pubkey(), 1, bank.last_blockhash(), ) }) .collect::<Vec<_>>(), ) }) .collect(); info!("refunding iteration {}", i); process_entries_for_tests(&bank, entries, true, None, None).expect("refunding failed"); // advance to next block process_entries_for_tests( &bank, (0..bank.ticks_per_slot()) .map(|_| next_entry_mut(&mut hash, 1, vec![])) .collect::<Vec<_>>(), true, None, None, ) .expect("process ticks failed"); if i % 16 == 0 { if let Some(old_root) = root { old_root.squash(); } root = Some(bank.clone()); } i += 1; bank = Arc::new(Bank::new_from_parent( &bank, &Pubkey::default(), bank.slot() + thread_rng().gen_range(1, 3), )); } } #[test] fn test_process_ledger_ticks_ordering() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(100); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); let genesis_hash = genesis_config.hash(); let keypair = Keypair::new(); // Simulate a slot of virtual ticks, creates a new blockhash let mut entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_hash); // The new blockhash is going to be the hash of the last tick in the block let new_blockhash = entries.last().unwrap().hash; // Create an transaction that references the new blockhash, should still // be able to find the blockhash if we process transactions all in the same // batch let tx = system_transaction::transfer(&mint_keypair, &keypair.pubkey(), 1, new_blockhash); let entry = next_entry(&new_blockhash, 1, vec![tx]); entries.push(entry); process_entries_for_tests(&bank0, entries, true, None, None).unwrap(); assert_eq!(bank0.get_balance(&keypair.pubkey()), 1) } fn get_epoch_schedule( genesis_config: &GenesisConfig, account_paths: Vec<PathBuf>, ) -> EpochSchedule { let bank = Bank::new_with_paths_for_tests( genesis_config, account_paths, None, None, AccountSecondaryIndexes::default(), false, AccountShrinkThreshold::default(), false, ); *bank.epoch_schedule() } fn frozen_bank_slots(bank_forks: &BankForks) -> Vec<Slot> { let mut slots: Vec<_> = bank_forks.frozen_banks().keys().cloned().collect(); slots.sort_unstable(); slots } // Check that `bank_forks` contains all the ancestors and banks for each fork identified in // `bank_forks_info` fn verify_fork_infos(bank_forks: &BankForks) { for slot in frozen_bank_slots(bank_forks) { let head_bank = &bank_forks[slot]; let mut parents = head_bank.parents(); parents.push(head_bank.clone()); // Ensure the tip of each fork and all its parents are in the given bank_forks for parent in parents { let parent_bank = &bank_forks[parent.slot()]; assert_eq!(parent_bank.slot(), parent.slot()); assert!(parent_bank.is_frozen()); } } } #[test] fn test_get_first_error() { let GenesisConfigInfo { genesis_config, mint_keypair, .. } = create_genesis_config(1_000_000_000); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); bank.store_account(&present_account_key.pubkey(), &present_account); let keypair = Keypair::new(); // Create array of two transactions which throw different errors let account_not_found_tx = system_transaction::transfer( &keypair, &solana_sdk::pubkey::new_rand(), 42, bank.last_blockhash(), ); let account_not_found_sig = account_not_found_tx.signatures[0]; let invalid_blockhash_tx = system_transaction::transfer( &mint_keypair, &solana_sdk::pubkey::new_rand(), 42, Hash::default(), ); let txs = vec![account_not_found_tx, invalid_blockhash_tx]; let batch = bank.prepare_batch_for_tests(txs); let ( TransactionResults { fee_collection_results, .. }, _balances, ) = batch.bank().load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, false, false, false, &mut ExecuteTimings::default(), ); let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap(); assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound); assert_eq!(signature, account_not_found_sig); } #[test] fn test_replay_vote_sender() { let validator_keypairs: Vec<_> = (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); let GenesisConfigInfo { genesis_config, voting_keypair: _, .. } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, vec![100; validator_keypairs.len()], ); let bank0 = Arc::new(Bank::new_for_tests(&genesis_config)); bank0.freeze(); let bank1 = Arc::new(Bank::new_from_parent( &bank0, &solana_sdk::pubkey::new_rand(), 1, )); // The new blockhash is going to be the hash of the last tick in the block let bank_1_blockhash = bank1.last_blockhash(); // Create an transaction that references the new blockhash, should still // be able to find the blockhash if we process transactions all in the same // batch let mut expected_successful_voter_pubkeys = BTreeSet::new(); let vote_txs: Vec<_> = validator_keypairs .iter() .enumerate() .map(|(i, validator_keypairs)| { if i % 3 == 0 { // These votes are correct expected_successful_voter_pubkeys .insert(validator_keypairs.vote_keypair.pubkey()); vote_transaction::new_vote_transaction( vec![0], bank0.hash(), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ) } else if i % 3 == 1 { // These have the wrong authorized voter vote_transaction::new_vote_transaction( vec![0], bank0.hash(), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &Keypair::new(), None, ) } else { // These have an invalid vote for non-existent bank 2 vote_transaction::new_vote_transaction( vec![bank1.slot() + 1], bank0.hash(), bank_1_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ) } }) .collect(); let entry = next_entry(&bank_1_blockhash, 1, vote_txs); let (replay_vote_sender, replay_vote_receiver) = unbounded(); let _ = process_entries_for_tests(&bank1, vec![entry], true, None, Some(&replay_vote_sender)); let successes: BTreeSet<Pubkey> = replay_vote_receiver .try_iter() .map(|(vote_pubkey, _, _)| vote_pubkey) .collect(); assert_eq!(successes, expected_successful_voter_pubkeys); } fn make_slot_with_vote_tx( blockstore: &Blockstore, ticks_per_slot: u64, tx_landed_slot: Slot, parent_slot: Slot, parent_blockhash: &Hash, vote_tx: Transaction, slot_leader_keypair: &Arc<Keypair>, ) { // Add votes to `last_slot` so that `root` will be confirmed let vote_entry = next_entry(parent_blockhash, 1, vec![vote_tx]); let mut entries = create_ticks(ticks_per_slot, 0, vote_entry.hash); entries.insert(0, vote_entry); blockstore .write_entries( tx_landed_slot, 0, 0, ticks_per_slot, Some(parent_slot), true, slot_leader_keypair, entries, 0, ) .unwrap(); } fn run_test_process_blockstore_with_supermajority_root(blockstore_root: Option<Slot>) { solana_logger::setup(); /* Build fork structure: slot 0 | slot 1 <- (blockstore root) / \ slot 2 | | | slot 4 | slot 5 | `expected_root_slot` / \ ... minor fork / `last_slot` | `really_last_slot` */ let starting_fork_slot = 5; let mut main_fork = tr(starting_fork_slot); let mut main_fork_ref = main_fork.root_mut().get_mut(); // Make enough slots to make a root slot > blockstore_root let expected_root_slot = starting_fork_slot + blockstore_root.unwrap_or(0); let really_expected_root_slot = expected_root_slot + 1; let last_main_fork_slot = expected_root_slot + MAX_LOCKOUT_HISTORY as u64 + 1; let really_last_main_fork_slot = last_main_fork_slot + 1; // Make `minor_fork` let last_minor_fork_slot = really_last_main_fork_slot + 1; let minor_fork = tr(last_minor_fork_slot); // Make 'main_fork` for slot in starting_fork_slot + 1..last_main_fork_slot { if slot - 1 == expected_root_slot { main_fork_ref.push_front(minor_fork.clone()); } main_fork_ref.push_front(tr(slot)); main_fork_ref = main_fork_ref.front_mut().unwrap().get_mut(); } let forks = tr(0) / (tr(1) / (tr(2) / (tr(4))) / main_fork); let validator_keypairs = ValidatorVoteKeypairs::new_rand(); let GenesisConfigInfo { genesis_config, .. } = genesis_utils::create_genesis_config_with_vote_accounts( 10_000, &[&validator_keypairs], vec![100], ); let ticks_per_slot = genesis_config.ticks_per_slot(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); blockstore.add_tree(forks, false, true, ticks_per_slot, genesis_config.hash()); if let Some(blockstore_root) = blockstore_root { blockstore .set_roots(std::iter::once(&blockstore_root)) .unwrap(); } let opts = ProcessOptions { poh_verify: true, accounts_db_test_hash_calculation: true, ..ProcessOptions::default() }; let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts.clone()); // prepare to add votes let last_vote_bank_hash = bank_forks.get(last_main_fork_slot - 1).unwrap().hash(); let last_vote_blockhash = bank_forks .get(last_main_fork_slot - 1) .unwrap() .last_blockhash(); let slots: Vec<_> = (expected_root_slot..last_main_fork_slot).collect(); let vote_tx = vote_transaction::new_vote_transaction( slots, last_vote_bank_hash, last_vote_blockhash, &validator_keypairs.node_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ); // Add votes to `last_slot` so that `root` will be confirmed let leader_keypair = Arc::new(validator_keypairs.node_keypair); make_slot_with_vote_tx( &blockstore, ticks_per_slot, last_main_fork_slot, last_main_fork_slot - 1, &last_vote_blockhash, vote_tx, &leader_keypair, ); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts.clone()); assert_eq!(bank_forks.root(), expected_root_slot); assert_eq!( bank_forks.frozen_banks().len() as u64, last_minor_fork_slot - really_expected_root_slot + 1 ); // Minor fork at `last_main_fork_slot + 1` was above the `expected_root_slot` // so should not have been purged // // Fork at slot 2 was purged because it was below the `expected_root_slot` for slot in 0..=last_minor_fork_slot { // this slot will be created below if slot == really_last_main_fork_slot { continue; } if slot >= expected_root_slot { let bank = bank_forks.get(slot).unwrap(); assert_eq!(bank.slot(), slot); assert!(bank.is_frozen()); } else { assert!(bank_forks.get(slot).is_none()); } } // really prepare to add votes let last_vote_bank_hash = bank_forks.get(last_main_fork_slot).unwrap().hash(); let last_vote_blockhash = bank_forks .get(last_main_fork_slot) .unwrap() .last_blockhash(); let slots: Vec<_> = vec![last_main_fork_slot]; let vote_tx = vote_transaction::new_vote_transaction( slots, last_vote_bank_hash, last_vote_blockhash, &leader_keypair, &validator_keypairs.vote_keypair, &validator_keypairs.vote_keypair, None, ); // Add votes to `really_last_slot` so that `root` will be confirmed again make_slot_with_vote_tx( &blockstore, ticks_per_slot, really_last_main_fork_slot, last_main_fork_slot, &last_vote_blockhash, vote_tx, &leader_keypair, ); let (bank_forks, ..) = test_process_blockstore(&genesis_config, &blockstore, opts); assert_eq!(bank_forks.root(), really_expected_root_slot); } #[test] fn test_process_blockstore_with_supermajority_root_without_blockstore_root() { run_test_process_blockstore_with_supermajority_root(None); } #[test] fn test_process_blockstore_with_supermajority_root_with_blockstore_root() { run_test_process_blockstore_with_supermajority_root(Some(1)) } #[test] #[allow(clippy::field_reassign_with_default)] fn test_supermajority_root_from_vote_accounts() { let convert_to_vote_accounts = |roots_stakes: Vec<(Slot, u64)>| -> HashMap<Pubkey, (u64, VoteAccount)> { roots_stakes .into_iter() .map(|(root, stake)| { let mut vote_state = VoteState::default(); vote_state.root_slot = Some(root); let mut vote_account = AccountSharedData::new( 1, VoteState::size_of(), &solana_vote_program::id(), ); let versioned = VoteStateVersions::new_current(vote_state); VoteState::serialize(&versioned, vote_account.data_as_mut_slice()).unwrap(); ( solana_sdk::pubkey::new_rand(), (stake, VoteAccount::from(vote_account)), ) }) .collect() }; let total_stake = 10; let slot = 100; // Supermajority root should be None assert!( supermajority_root_from_vote_accounts(slot, total_stake, &HashMap::default()).is_none() ); // Supermajority root should be None let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 1)]; let accounts = convert_to_vote_accounts(roots_stakes); assert!(supermajority_root_from_vote_accounts(slot, total_stake, &accounts).is_none()); // Supermajority root should be 4, has 7/10 of the stake let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 5)]; let accounts = convert_to_vote_accounts(roots_stakes); assert_eq!( supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(), 4 ); // Supermajority root should be 8, it has 7/10 of the stake let roots_stakes = vec![(8, 1), (3, 1), (4, 1), (8, 6)]; let accounts = convert_to_vote_accounts(roots_stakes); assert_eq!( supermajority_root_from_vote_accounts(slot, total_stake, &accounts).unwrap(), 8 ); } }
35.488494
167
0.558206
f56983c14b1bc88e560746d0b7c483bea01f1a67
975
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // force-host #![feature(plugin_registrar)] #![feature(box_syntax, rustc_private)] extern crate rustc; extern crate rustc_plugin; use std::any::Any; use std::cell::RefCell; use rustc_plugin::Registry; struct Foo { foo: isize } impl Drop for Foo { fn drop(&mut self) {} } #[plugin_registrar] pub fn registrar(_: &mut Registry) { thread_local!(static FOO: RefCell<Option<Box<Any+Send>>> = RefCell::new(None)); FOO.with(|s| *s.borrow_mut() = Some(box Foo { foo: 10 } as Box<Any+Send>)); }
27.083333
83
0.708718
6980a9f392343693feaca9275a3485073167e468
25,604
use super::*; use crate::{ constants::{aggregations::*, output_fields::*}, schema::{IntoArc, ObjectTypeStrongRef, OutputType, OutputTypeRef, ScalarType}, CoreError, DatabaseEnumType, EnumType, OutputFieldRef, QueryResult, RecordAggregations, RecordSelection, }; use bigdecimal::ToPrimitive; use connector::{AggregationResult, RelAggregationResult, RelAggregationRow}; use indexmap::IndexMap; use itertools::Itertools; use prisma_models::{CompositeFieldRef, Field, PrismaValue, SelectionResult}; use std::{borrow::Borrow, collections::HashMap}; /// A grouping of items to their parent record. /// The item implicitly holds the information of the type of item contained. /// E.g., if the output type of a field designates a single object, the item will be /// Item::Map(map), if it's a list, Item::List(list), etc. (hence "checked") type CheckedItemsWithParents = IndexMap<Option<SelectionResult>, Item>; /// A grouping of items to their parent record. /// As opposed to the checked mapping, this map isn't holding final information about /// the contained items, i.e. the Items are all unchecked. type UncheckedItemsWithParents = IndexMap<Option<SelectionResult>, Vec<Item>>; /// The query validation makes sure that the output selection already has the correct shape. /// This means that we can make the following assumptions: /// - Objects don't need to check required fields. /// - Objects don't need to check extra fields - just pick the selected ones and ignore the rest. /// /// The output validation has to make sure that returned values: /// - Are of the correct type. /// - Are nullable if not present. /// /// The is_list and is_optional flags dictate how object checks are done. /// // todo more here /// /// Returns a map of pairs of (parent ID, response) #[tracing::instrument(skip(result, field, is_list))] pub fn serialize_internal( result: QueryResult, field: &OutputFieldRef, is_list: bool, ) -> crate::Result<CheckedItemsWithParents> { match result { QueryResult::RecordSelection(rs) => serialize_record_selection(*rs, field, &field.field_type, is_list), QueryResult::RecordAggregations(ras) => serialize_aggregations(field, ras), QueryResult::Count(c) => { // Todo needs a real implementation or needs to move to RecordAggregation let mut map: Map = IndexMap::with_capacity(1); let mut result = CheckedItemsWithParents::new(); map.insert(AFFECTED_COUNT.into(), Item::Value(PrismaValue::Int(c as i64))); result.insert(None, Item::Map(map)); Ok(result) } QueryResult::Json(_) => unimplemented!(), QueryResult::Id(_) => unimplemented!(), QueryResult::Unit => unimplemented!(), } } #[tracing::instrument(skip(output_field, record_aggregations))] fn serialize_aggregations( output_field: &OutputFieldRef, record_aggregations: RecordAggregations, ) -> crate::Result<CheckedItemsWithParents> { let ordering = record_aggregations.selection_order; let aggregate_object_type = extract_aggregate_object_type(output_field.field_type.borrow()); let mut results = vec![]; for row in record_aggregations.results { let mut flattened = HashMap::with_capacity(ordering.len()); for result in row { match result { AggregationResult::Field(field, value) => { let output_field = aggregate_object_type.find_field(&field.name).unwrap(); flattened.insert(field.name.clone(), serialize_scalar(&output_field, value)?); } AggregationResult::Count(field, count) => { if let Some(f) = field { flattened.insert(format!("_count_{}", &f.name), Item::Value(count)); } else { flattened.insert("_count__all".to_owned(), Item::Value(count)); } } AggregationResult::Average(field, value) => { let output_field = find_nested_aggregate_output_field(&aggregate_object_type, UNDERSCORE_AVG, &field.name); flattened.insert(format!("_avg_{}", &field.name), serialize_scalar(&output_field, value)?); } AggregationResult::Sum(field, value) => { let output_field = find_nested_aggregate_output_field(&aggregate_object_type, UNDERSCORE_SUM, &field.name); flattened.insert(format!("_sum_{}", &field.name), serialize_scalar(&output_field, value)?); } AggregationResult::Min(field, value) => { let output_field = find_nested_aggregate_output_field(&aggregate_object_type, UNDERSCORE_MIN, &field.name); flattened.insert( format!("_min_{}", &field.name), serialize_scalar(&output_field, coerce_non_numeric(value, &output_field.field_type))?, ); } AggregationResult::Max(field, value) => { let output_field = find_nested_aggregate_output_field(&aggregate_object_type, UNDERSCORE_MAX, &field.name); flattened.insert( format!("_max_{}", &field.name), serialize_scalar(&output_field, coerce_non_numeric(value, &output_field.field_type))?, ); } } } // Reorder fields based on the original query selection. // Temporary: The original selection may be done with _ or no underscore (deprecated). let mut inner_map: Map = IndexMap::with_capacity(ordering.len()); for (query, field_order) in ordering.iter() { if let Some(order) = field_order { let mut nested_map = Map::new(); for field in order { let item = flattened .remove(&format!("{}_{}", query, field)) .or_else(|| flattened.remove(&format!("_{}_{}", query, field))) .unwrap(); nested_map.insert(field.clone(), item); } inner_map.insert(query.clone(), Item::Map(nested_map)); } else { let item = flattened .remove(&query.clone()) .or_else(|| flattened.remove(&format!("_{}", query))) .unwrap(); inner_map.insert(query.clone(), item); } } results.push(Item::Map(inner_map)); } let mut envelope = CheckedItemsWithParents::new(); match output_field.field_type.borrow() { OutputType::List(_) => { envelope.insert(None, Item::List(results.into())); } OutputType::Object(_) => { if let Some(item) = results.pop() { envelope.insert(None, item); }; } _ => unreachable!(), }; Ok(envelope) } fn write_rel_aggregation_row(row: &RelAggregationRow, map: &mut HashMap<String, Item>) { for result in row.iter() { match result { RelAggregationResult::Count(rf, count) => match map.get_mut(UNDERSCORE_COUNT) { Some(item) => match item { Item::Map(inner_map) => inner_map.insert(rf.name.clone(), Item::Value(count.clone())), _ => unreachable!(), }, None => { let mut inner_map: Map = Map::new(); inner_map.insert(rf.name.clone(), Item::Value(count.clone())); map.insert(UNDERSCORE_COUNT.to_owned(), Item::Map(inner_map)) } }, }; } } fn extract_aggregate_object_type(output_type: &OutputType) -> ObjectTypeStrongRef { match output_type { OutputType::Object(obj) => obj.into_arc(), OutputType::List(inner) => extract_aggregate_object_type(inner), _ => unreachable!("Aggregate output must be a list or an object."), } } // Workaround until we streamline serialization. fn find_nested_aggregate_output_field( object_type: &ObjectTypeStrongRef, nested_obj_name: &str, nested_field_name: &str, ) -> OutputFieldRef { let nested_field = object_type.find_field(nested_obj_name).unwrap(); let nested_object_type = match nested_field.field_type.borrow() { OutputType::Object(obj) => obj.into_arc(), _ => unreachable!("{} output must be an object.", nested_obj_name), }; nested_object_type.find_field(nested_field_name).unwrap() } fn coerce_non_numeric(value: PrismaValue, output: &OutputType) -> PrismaValue { match (value, output.borrow()) { (PrismaValue::Int(x), OutputType::Scalar(ScalarType::String)) if x == 0 => PrismaValue::Null, (x, _) => x, } } #[tracing::instrument(skip(record_selection, field, typ, is_list))] fn serialize_record_selection( record_selection: RecordSelection, field: &OutputFieldRef, typ: &OutputTypeRef, // We additionally pass the type to allow recursing into nested type definitions of a field. is_list: bool, ) -> crate::Result<CheckedItemsWithParents> { let name = record_selection.name.clone(); match typ.borrow() { OutputType::List(inner) => serialize_record_selection(record_selection, field, inner, true), OutputType::Object(obj) => { let result = serialize_objects(record_selection, obj.into_arc())?; let is_optional = field.is_nullable; // Items will be ref'ed on the top level to allow cheap clones in nested scenarios. match (is_list, is_optional) { // List(Opt(_)) | List(_) (true, opt) => { result .into_iter() .map(|(parent, items)| { if !opt { // Check that all items are non-null if items.iter().any(|item| matches!(item, Item::Value(PrismaValue::Null))) { return Err(CoreError::null_serialization_error(&name)); } } Ok((parent, Item::Ref(ItemRef::new(Item::list(items))))) }) .collect() } // Opt(_) (false, opt) => { result .into_iter() .map(|(parent, mut items)| { // As it's not a list, we require a single result if items.len() > 1 { items.reverse(); let first = items.pop().unwrap(); // Simple return the first record in the list. Ok((parent, Item::Ref(ItemRef::new(first)))) } else if items.is_empty() && opt { Ok((parent, Item::Ref(ItemRef::new(Item::Value(PrismaValue::Null))))) } else if items.is_empty() && opt { Err(CoreError::null_serialization_error(&name)) } else { Ok((parent, Item::Ref(ItemRef::new(items.pop().unwrap())))) } }) .collect() } } } _ => unreachable!(), // We always serialize record selections into objects or lists on the top levels. Scalars and enums are handled separately. } } /// Serializes the given result into objects of given type. /// Doesn't validate the shape of the result set ("unchecked" result). /// Returns a vector of serialized objects (as Item::Map), grouped into a map by parent, if present. #[tracing::instrument(skip(result, typ))] fn serialize_objects( mut result: RecordSelection, typ: ObjectTypeStrongRef, ) -> crate::Result<UncheckedItemsWithParents> { // The way our query execution works, we only need to look at nested + lists if we hit an object. // Move nested out of result for separate processing. let nested = std::mem::take(&mut result.nested); // { <nested field name> -> { parent ID -> items } } let mut nested_mapping: HashMap<String, CheckedItemsWithParents> = process_nested_results(nested, &typ)?; // We need the Arcs to solve the issue where we have multiple parents claiming the same data (we want to move the data out of the nested structure // to prevent expensive copying during serialization). // Finally, serialize the objects based on the selected fields. let mut object_mapping = UncheckedItemsWithParents::with_capacity(result.scalars.records.len()); let db_field_names = result.scalars.field_names; let model = result.model; let fields: Vec<_> = db_field_names .iter() .filter_map(|f| model.fields().find_from_non_virtual_by_db_name(f).ok()) .collect(); // Write all fields, nested and list fields unordered into a map, afterwards order all into the final order. // If nothing is written to the object, write null instead. for (r_index, record) in result.scalars.records.into_iter().enumerate() { let record_id = Some(record.extract_selection_result(&db_field_names, &model.primary_identifier())?); if !object_mapping.contains_key(&record.parent_id) { object_mapping.insert(record.parent_id.clone(), Vec::new()); } // Write scalars and composites, but skip objects (relations) and scalar lists, which while they are in the selection, are handled separately. let values = record.values; let mut object = HashMap::with_capacity(values.len()); for (val, field) in values.into_iter().zip(fields.iter()) { let out_field = typ.find_field(field.name()).unwrap(); match field { Field::Composite(cf) => { object.insert(field.name().to_owned(), serialize_composite(cf, &out_field, val)?); } _ if !out_field.field_type.is_object() => { object.insert(field.name().to_owned(), serialize_scalar(&out_field, val)?); } _ => (), } } // Write nested results write_nested_items(&record_id, &mut nested_mapping, &mut object, &typ)?; let aggr_row = result.aggregation_rows.as_ref().map(|rows| rows.get(r_index).unwrap()); if let Some(aggr_row) = aggr_row { write_rel_aggregation_row(aggr_row, &mut object); } let mut aggr_fields = aggr_row .map(|row| { row.iter() .map(|aggr_result| match aggr_result { RelAggregationResult::Count(_, _) => UNDERSCORE_COUNT.to_owned(), }) .unique() .collect() }) .unwrap_or(vec![]); let mut all_fields = result.fields.clone(); all_fields.append(&mut aggr_fields); let map = all_fields .iter() .fold(Map::with_capacity(all_fields.len()), |mut acc, field_name| { acc.insert(field_name.to_owned(), object.remove(field_name).unwrap()); acc }); // TODO: Find out how to easily determine when a result is null. // If the object is null or completely empty, coerce into null instead. let result = Item::Map(map); // let result = if result.is_null_or_empty() { // Item::Value(PrismaValue::Null) // } else { // result // }; object_mapping.get_mut(&record.parent_id).unwrap().push(result); } Ok(object_mapping) } /// Unwraps are safe due to query validation. #[tracing::instrument(skip(record_id, items_with_parent, into, enclosing_type))] fn write_nested_items( record_id: &Option<SelectionResult>, items_with_parent: &mut HashMap<String, CheckedItemsWithParents>, into: &mut HashMap<String, Item>, enclosing_type: &ObjectTypeStrongRef, ) -> crate::Result<()> { for (field_name, inner) in items_with_parent.iter_mut() { let val = inner.get(record_id); // The value must be a reference (or None - handle default), everything else is an error in the serialization logic. match val { Some(Item::Ref(ref r)) => { into.insert(field_name.to_owned(), Item::Ref(ItemRef::clone(r))); } None => { let field = enclosing_type.find_field(field_name).unwrap(); let default = match field.field_type.borrow() { OutputType::List(_) => Item::list(Vec::new()), _ if field.is_nullable => Item::Value(PrismaValue::Null), _ => return Err(CoreError::null_serialization_error(field_name)), }; into.insert(field_name.to_owned(), Item::Ref(ItemRef::new(default))); } _ => panic!("Invariant error: Nested items have to be wrapped as a Item::Ref."), }; } Ok(()) } /// Processes nested results into a more ergonomic structure of { <nested field name> -> { parent ID -> item (list, map, ...) } }. #[tracing::instrument(skip(nested, enclosing_type))] fn process_nested_results( nested: Vec<QueryResult>, enclosing_type: &ObjectTypeStrongRef, ) -> crate::Result<HashMap<String, CheckedItemsWithParents>> { // For each nested selected field we need to map the parents to their items. let mut nested_mapping = HashMap::with_capacity(nested.len()); // Parse and validate all nested objects with their respective output type. // Unwraps are safe due to query validation. for nested_result in nested { // todo Workaround, tb changed with flat reads. if let QueryResult::RecordSelection(ref rs) = nested_result { let name = rs.name.clone(); let field = enclosing_type.find_field(&name).unwrap(); let result = serialize_internal(nested_result, &field, false)?; nested_mapping.insert(name, result); } } Ok(nested_mapping) } // Problem: order of selections fn serialize_composite(cf: &CompositeFieldRef, out_field: &OutputFieldRef, value: PrismaValue) -> crate::Result<Item> { match value { PrismaValue::Null if !cf.is_required() => Ok(Item::Value(PrismaValue::Null)), PrismaValue::List(values) if cf.is_list() => { let values = values .into_iter() .map(|value| serialize_composite(cf, out_field, value)) .collect::<crate::Result<Vec<_>>>(); Ok(Item::List(values?.into())) } PrismaValue::Object(pairs) => { let mut map = Map::new(); let object_type = out_field .field_type .as_object_type() .expect("Composite output field is not an object."); let composite_type = &cf.typ; for (field_name, value) in pairs { // The field on the composite type. // This will cause clashes if one field has an @map("name") and the other field is named "field" directly. let inner_field = composite_type .find_field(&field_name) .or(composite_type.find_field_by_db_name(&field_name)) .unwrap(); // The field on the output object type. Used for the actual serialization process. let inner_out_field = object_type.find_field(inner_field.name()).unwrap(); match inner_field { Field::Composite(cf) => { map.insert( inner_field.name().to_owned(), serialize_composite(cf, &inner_out_field, value)?, ); } _ if !inner_out_field.field_type.is_object() => { map.insert( inner_field.name().to_owned(), serialize_scalar(&inner_out_field, value)?, ); } _ => (), } } Ok(Item::Map(map)) } val => Err(CoreError::SerializationError(format!( "Attempted to serialize '{}' with non-composite compatible type '{:?}' for field {}.", val, cf.typ.name, cf.name ))), } } fn serialize_scalar(field: &OutputFieldRef, value: PrismaValue) -> crate::Result<Item> { match (&value, field.field_type.as_ref()) { (PrismaValue::Null, _) if field.is_nullable => Ok(Item::Value(PrismaValue::Null)), (_, OutputType::Enum(et)) => match et.borrow() { EnumType::Database(ref db) => convert_enum(value, db), _ => unreachable!(), }, (PrismaValue::List(_), OutputType::List(arc_type)) => match arc_type.as_ref() { OutputType::Scalar(subtype) => { let items = unwrap_prisma_value(value) .into_iter() .map(|v| convert_prisma_value(field, v, subtype)) .map(|pv| pv.map(Item::Value)) .collect::<Result<Vec<Item>, CoreError>>()?; Ok(Item::list(items)) } OutputType::Enum(et) => { let items = unwrap_prisma_value(value) .into_iter() .map(|v| match et.borrow() { EnumType::Database(ref dbt) => convert_enum(v, dbt), _ => unreachable!(), }) .collect::<Result<Vec<Item>, CoreError>>()?; Ok(Item::list(items)) } _ => Err(CoreError::SerializationError(format!( "Attempted to serialize scalar list which contained non-scalar items of type '{:?}' for field {}.", arc_type, field.name ))), }, (_, OutputType::Scalar(st)) => Ok(Item::Value(convert_prisma_value(field, value, st)?)), (pv, ot) => Err(CoreError::SerializationError(format!( "Attempted to serialize scalar '{}' with non-scalar compatible type '{:?}' for field {}.", pv, ot, field.name ))), } } fn convert_prisma_value(field: &OutputFieldRef, value: PrismaValue, st: &ScalarType) -> Result<PrismaValue, CoreError> { let item_value = match (st, value) { (ScalarType::String, PrismaValue::String(s)) => PrismaValue::String(s), (ScalarType::Json, PrismaValue::String(s)) => PrismaValue::Json(s), (ScalarType::Json, PrismaValue::Json(s)) => PrismaValue::Json(s), (ScalarType::Int, PrismaValue::Float(f)) => PrismaValue::Int(f.to_i64().unwrap()), (ScalarType::Int, PrismaValue::Int(i)) => PrismaValue::Int(i), (ScalarType::Float, PrismaValue::Float(f)) => PrismaValue::Float(f), (ScalarType::Float, PrismaValue::Int(i)) => { PrismaValue::Int(i.to_i64().expect("Unable to convert BigDecimal to i64.")) } (ScalarType::Decimal, PrismaValue::Int(i)) => PrismaValue::String(i.to_string()), (ScalarType::Decimal, PrismaValue::Float(f)) => PrismaValue::String(f.to_string()), (ScalarType::BigInt, PrismaValue::BigInt(i)) => PrismaValue::BigInt(i), (ScalarType::BigInt, PrismaValue::Int(i)) => PrismaValue::BigInt(i), (ScalarType::BigInt, PrismaValue::Float(f)) => PrismaValue::BigInt(f.to_i64().unwrap()), (ScalarType::Boolean, PrismaValue::Boolean(b)) => PrismaValue::Boolean(b), (ScalarType::Int, PrismaValue::Boolean(b)) => PrismaValue::Int(b as i64), (ScalarType::DateTime, PrismaValue::DateTime(dt)) => PrismaValue::DateTime(dt), (ScalarType::UUID, PrismaValue::Uuid(u)) => PrismaValue::Uuid(u), (ScalarType::Bytes, PrismaValue::Bytes(b)) => PrismaValue::Bytes(b), (ScalarType::Xml, PrismaValue::Xml(b)) => PrismaValue::Xml(b), (ScalarType::String, PrismaValue::Xml(s)) => PrismaValue::String(s), (st, pv) => { return Err(crate::FieldConversionError::create( field.name.clone(), format!("{:?}", st), format!("{}", pv), )) } }; Ok(item_value) } fn convert_enum(value: PrismaValue, dbt: &DatabaseEnumType) -> Result<Item, CoreError> { match value { PrismaValue::String(s) | PrismaValue::Enum(s) => match dbt.map_output_value(&s) { Some(inum) => Ok(Item::Value(inum)), None => Err(CoreError::SerializationError(format!( "Value '{}' not found in enum '{}'", s, dbt.name ))), }, val => Err(CoreError::SerializationError(format!( "Attempted to serialize non-enum-compatible value '{}' for enum '{}'", val, dbt.name ))), } } fn unwrap_prisma_value(pv: PrismaValue) -> Vec<PrismaValue> { match pv { PrismaValue::List(l) => l, _ => panic!("Invariant error: Called unwrap list value on non-list."), } }
41.97377
152
0.570809
e9258d581956ed327028be21918cd2f5bf87bd29
5,186
// Copyright (c) 2018-2021 Brendan Molloy <[email protected]>, // Ilya Solovyiov <[email protected]>, // Kai Ren <[email protected]> // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! [`Writer`]-wrapper for transforming [`Skipped`] [`Step`]s into [`Failed`]. //! //! [`Failed`]: event::Step::Failed //! [`Skipped`]: event::Step::Skipped //! [`Step`]: gherkin::Step use std::sync::Arc; use async_trait::async_trait; use derive_more::Deref; use crate::{event, parser, writer, Event, World, Writer}; /// [`Writer`]-wrapper for transforming [`Skipped`] [`Step`]s into [`Failed`]. /// /// [`Failed`]: event::Step::Failed /// [`Skipped`]: event::Step::Skipped /// [`Step`]: gherkin::Step #[derive(Debug, Deref)] pub struct FailOnSkipped<W, F = SkipFn> { /// Original [`Writer`] to pass transformed event into. #[deref] pub writer: W, /// [`Fn`] to determine whether [`Skipped`] test should be considered as /// [`Failed`] or not. /// /// [`Failed`]: event::Step::Failed /// [`Skipped`]: event::Step::Skipped should_fail: F, } /// Alias for a [`fn`] used to determine whether [`Skipped`] test should be /// considered as [`Failed`] or not. /// /// [`Failed`]: event::Step::Failed /// [`Skipped`]: event::Step::Skipped pub type SkipFn = fn(&gherkin::Feature, Option<&gherkin::Rule>, &gherkin::Scenario) -> bool; #[async_trait(?Send)] impl<W, Wr, F> Writer<W> for FailOnSkipped<Wr, F> where W: World, F: Fn( &gherkin::Feature, Option<&gherkin::Rule>, &gherkin::Scenario, ) -> bool, Wr: for<'val> writer::Arbitrary<'val, W, String>, { type Cli = Wr::Cli; async fn handle_event( &mut self, event: parser::Result<Event<event::Cucumber<W>>>, cli: &Self::Cli, ) { use event::{ Cucumber, Feature, Rule, Scenario, Step, StepError::Panic, }; let map_failed = |f: Arc<_>, r: Option<Arc<_>>, sc: Arc<_>, st: _| { let ev = if (self.should_fail)(&f, r.as_deref(), &sc) { Step::Failed(None, None, Panic(Arc::new("not allowed to skip"))) } else { Step::Skipped }; Cucumber::scenario(f, r, sc, Scenario::Step(st, ev)) }; let event = event.map(|outer| { outer.map(|ev| match ev { Cucumber::Feature( f, Feature::Rule( r, Rule::Scenario(sc, Scenario::Step(st, Step::Skipped)), ), ) => map_failed(f, Some(r), sc, st), Cucumber::Feature( f, Feature::Scenario(sc, Scenario::Step(st, Step::Skipped)), ) => map_failed(f, None, sc, st), Cucumber::Started | Cucumber::Feature(..) | Cucumber::Finished => ev, }) }); self.writer.handle_event(event, cli).await; } } #[async_trait(?Send)] impl<'val, W, Wr, Val, F> writer::Arbitrary<'val, W, Val> for FailOnSkipped<Wr, F> where W: World, Self: Writer<W>, Wr: writer::Arbitrary<'val, W, Val>, Val: 'val, { async fn write(&mut self, val: Val) where 'val: 'async_trait, { self.writer.write(val).await; } } impl<W, Wr, F> writer::Failure<W> for FailOnSkipped<Wr, F> where Wr: writer::Failure<W>, Self: Writer<W>, { fn failed_steps(&self) -> usize { self.writer.failed_steps() } fn parsing_errors(&self) -> usize { self.writer.parsing_errors() } fn hook_errors(&self) -> usize { self.writer.hook_errors() } } impl<Wr: writer::Normalized, F> writer::Normalized for FailOnSkipped<Wr, F> {} impl<Writer> From<Writer> for FailOnSkipped<Writer> { fn from(writer: Writer) -> Self { Self { writer, should_fail: |_, _, sc| { !sc.tags.iter().any(|tag| tag == "allow_skipped") }, } } } impl<Writer> FailOnSkipped<Writer> { /// Wraps the given [`Writer`] in a new [`FailOnSkipped`] one. #[must_use] pub fn new(writer: Writer) -> Self { Self::from(writer) } /// Wraps the given [`Writer`] in a new [`FailOnSkipped`] one with the given /// `predicate` indicating when a [`Skipped`] [`Step`] is considered /// [`Failed`]. /// /// [`Failed`]: event::Step::Failed /// [`Skipped`]: event::Step::Skipped /// [`Step`]: gherkin::Step #[must_use] pub fn with<P>(writer: Writer, predicate: P) -> FailOnSkipped<Writer, P> where P: Fn( &gherkin::Feature, Option<&gherkin::Rule>, &gherkin::Scenario, ) -> bool, { FailOnSkipped { writer, should_fail: predicate, } } }
28.338798
80
0.539337
0e144fb332ee0847cf2130ab8407223dff33cfe6
5,245
use super::bitmask::BitMask; use super::EMPTY; use core::{mem, ptr}; // Use the native word size as the group size. Using a 64-bit group size on // a 32-bit architecture will just end up being more expensive because // shifts and multiplies will need to be emulated. #[cfg(any( target_pointer_width = "64", target_arch = "aarch64", target_arch = "x86_64", ))] type GroupWord = u64; #[cfg(all( target_pointer_width = "32", not(target_arch = "aarch64"), not(target_arch = "x86_64"), ))] type GroupWord = u32; pub type BitMaskWord = GroupWord; pub const BITMASK_STRIDE: usize = 8; // We only care about the highest bit of each byte for the mask. pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080u64 as GroupWord; /// Helper function to replicate a byte across a `GroupWord`. #[inline] fn repeat(byte: u8) -> GroupWord { let repeat = byte as GroupWord; let repeat = repeat | repeat.wrapping_shl(8); let repeat = repeat | repeat.wrapping_shl(16); // This last line is a no-op with a 32-bit GroupWord repeat | repeat.wrapping_shl(32) } /// Abstraction over a group of control bytes which can be scanned in /// parallel. /// /// This implementation uses a word-sized integer. #[derive(Copy, Clone)] pub struct Group(GroupWord); // We perform all operations in the native endianess, and convert to // little-endian just before creating a BitMask. The can potentially // enable the compiler to eliminate unnecessary byte swaps if we are // only checking whether a BitMask is empty. impl Group { /// Number of bytes in the group. pub const WIDTH: usize = mem::size_of::<Self>(); /// Returns a full group of empty bytes, suitable for use as the initial /// value for an empty hash table. /// /// This is guaranteed to be aligned to the group size. #[inline] pub fn static_empty() -> &'static [u8] { union AlignedBytes { _align: Group, bytes: [u8; Group::WIDTH], }; const ALIGNED_BYTES: AlignedBytes = AlignedBytes { bytes: [EMPTY; Group::WIDTH], }; unsafe { &ALIGNED_BYTES.bytes } } /// Loads a group of bytes starting at the given address. #[inline] pub unsafe fn load(ptr: *const u8) -> Group { Group(ptr::read_unaligned(ptr as *const _)) } /// Loads a group of bytes starting at the given address, which must be /// aligned to `mem::align_of::<Group>()`. #[inline] pub unsafe fn load_aligned(ptr: *const u8) -> Group { debug_assert_eq!(ptr as usize & (mem::align_of::<Group>() - 1), 0); Group(ptr::read(ptr as *const _)) } /// Stores the group of bytes to the given address, which must be /// aligned to `mem::align_of::<Group>()`. #[inline] pub unsafe fn store_aligned(&self, ptr: *mut u8) { debug_assert_eq!(ptr as usize & (mem::align_of::<Group>() - 1), 0); ptr::write(ptr as *mut _, self.0); } /// Returns a `BitMask` indicating all bytes in the group which *may* /// have the given value. /// /// This function may return a false positive in certain cases where /// the byte in the group differs from the searched value only in its /// lowest bit. This is fine because: /// - This never happens for `EMPTY` and `DELETED`, only full entries. /// - The check for key equality will catch these. /// - This only happens if there is at least 1 true match. /// - The chance of this happening is very low (< 1% chance per byte). #[inline] pub fn match_byte(&self, byte: u8) -> BitMask { // This algorithm is derived from // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord let cmp = self.0 ^ repeat(byte); BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le()) } /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY`. #[inline] pub fn match_empty(&self) -> BitMask { // If the high bit is set, then the byte must be either: // 1111_1111 (EMPTY) or 1000_0000 (DELETED). // So we can just check if the top two bits are 1 by ANDing them. BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le()) } /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY` or `DELETED`. #[inline] pub fn match_empty_or_deleted(&self) -> BitMask { // A byte is EMPTY or DELETED iff the high bit is set BitMask((self.0 & repeat(0x80)).to_le()) } /// Performs the following transformation on all bytes in the group: /// - `EMPTY => EMPTY` /// - `DELETED => EMPTY` /// - `FULL => DELETED` #[inline] pub fn convert_special_to_empty_and_full_to_deleted(&self) -> Group { // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 // and high_bit = 0 (FULL) to 1000_0000 // // Here's this logic expanded to concrete values: // let full = 1000_0000 (true) or 0000_0000 (false) // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry) // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry) let full = !self.0 & repeat(0x80); Group(!full + (full >> 7)) } }
36.93662
79
0.629933
873fedf49db606a50f103b5d837629281abdb517
6,582
#![allow(clippy::type_repetition_in_bounds)] #[cfg(feature = "client")] use amethyst::{ assets::Handle, ecs::ReadExpect, renderer::{Material, Mesh, SpriteRender}, }; use amethyst::{ core::Transform, ecs::{prelude::World, Entities, Entity, WriteStorage}, shred::{ResourceId, SystemData}, utils::tag::Tag, }; #[cfg(feature = "client")] use gv_client_shared::ecs::resources::{AssetHandles, EntityGraphics}; use gv_core::{ actions::{mob::MobAction, Action}, ecs::{ components::{damage_history::DamageHistory, *}, tags::*, }, math::{Vector2, ZeroVector}, }; use crate::ecs::resources::MonsterDefinition; #[derive(SystemData)] pub struct PlayerFactory<'s> { entities: Entities<'s>, transforms: WriteStorage<'s, Transform>, player_actions: WriteStorage<'s, PlayerActions>, world_positions: WriteStorage<'s, WorldPosition>, net_world_positions: WriteStorage<'s, NetWorldPosition>, players: WriteStorage<'s, Player>, player_last_casted_spells: WriteStorage<'s, PlayerLastCastedSpells>, damage_histories: WriteStorage<'s, DamageHistory>, } impl<'s> PlayerFactory<'s> { pub fn create(&mut self) -> Entity { let mut transform = Transform::default(); transform.set_translation_z(10.0); self.entities .build_entity() .with(transform, &mut self.transforms) .with(PlayerActions::default(), &mut self.player_actions) .with( WorldPosition::new(Vector2::zero()), &mut self.world_positions, ) .with( NetWorldPosition::new(Vector2::zero()), &mut self.net_world_positions, ) .with(Player::new(), &mut self.players) .with( PlayerLastCastedSpells::default(), &mut self.player_last_casted_spells, ) .with(DamageHistory::new(0), &mut self.damage_histories) .build() } } #[derive(SystemData)] pub struct LandscapeFactory<'s> { entities: Entities<'s>, #[cfg(feature = "client")] asset_handles: ReadExpect<'s, AssetHandles>, tags: WriteStorage<'s, Tag<Landscape>>, transforms: WriteStorage<'s, Transform>, #[cfg(feature = "client")] sprite_renders: WriteStorage<'s, SpriteRender>, } impl<'s> LandscapeFactory<'s> { #[cfg(feature = "client")] pub fn create(&mut self) -> Entity { let AssetHandles { landscape, .. } = self.asset_handles.clone(); let mut transform = Transform::default(); transform.set_translation_z(-1.0); self.entities .build_entity() .with(Tag::<Landscape>::default(), &mut self.tags) .with(transform, &mut self.transforms) .with( SpriteRender { sprite_sheet: landscape, sprite_number: 0, }, &mut self.sprite_renders, ) .build() } #[cfg(not(feature = "client"))] pub fn create(&mut self) -> Entity { let mut transform = Transform::default(); transform.set_translation_z(-1.0); self.entities .build_entity() .with(Tag::<Landscape>::default(), &mut self.tags) .with(transform, &mut self.transforms) .build() } } #[derive(SystemData)] pub struct MonsterFactory<'s> { pub entities: Entities<'s>, pub transforms: WriteStorage<'s, Transform>, #[cfg(feature = "client")] pub meshes: WriteStorage<'s, Handle<Mesh>>, #[cfg(feature = "client")] pub materials: WriteStorage<'s, Handle<Material>>, pub monsters: WriteStorage<'s, Monster>, pub damage_histories: WriteStorage<'s, DamageHistory>, pub world_positions: WriteStorage<'s, WorldPosition>, } impl<'s> MonsterFactory<'s> { #[cfg(feature = "client")] pub fn create( &mut self, frame_spawned: u64, definition: MonsterDefinition, position: Vector2, destination: Vector2, action: Action<MobAction<Entity>>, ) -> Entity { let mut transform = Transform::default(); transform.set_translation_xyz(position.x, position.y, 5.0); let MonsterDefinition { name, base_health: health, base_speed: _base_speed, base_attack_damage: attack_damage, graphics: EntityGraphics { mesh, material }, radius, .. } = definition; self.entities .build_entity() .with(mesh, &mut self.meshes) .with(material, &mut self.materials) .with(transform, &mut self.transforms) .with(WorldPosition::new(position), &mut self.world_positions) .with( Monster { health, attack_damage, destination, velocity: Vector2::zero(), action, name, radius, }, &mut self.monsters, ) .with( DamageHistory::new(frame_spawned), &mut self.damage_histories, ) .build() } #[cfg(not(feature = "client"))] pub fn create( &mut self, frame_spawned: u64, definition: MonsterDefinition, position: Vector2, destination: Vector2, action: Action<MobAction<Entity>>, ) -> Entity { let mut transform = Transform::default(); transform.set_translation_xyz(position.x, position.y, 5.0); let MonsterDefinition { name, base_health: health, base_speed: _base_speed, base_attack_damage: attack_damage, radius, .. } = definition; self.entities .build_entity() .with(transform, &mut self.transforms) .with(WorldPosition::new(position), &mut self.world_positions) .with( Monster { health, attack_damage, destination, velocity: Vector2::zero(), action, name, radius, }, &mut self.monsters, ) .with( DamageHistory::new(frame_spawned), &mut self.damage_histories, ) .build() } }
30.054795
74
0.544515
e4de2a110d083ac9fb7cf1ffc31ffee6d5e8f8b8
2,590
#[doc = "Reader of register MATCHREL15"] pub type R = crate::R<u32, super::MATCHREL15>; #[doc = "Writer for register MATCHREL15"] pub type W = crate::W<u32, super::MATCHREL15>; #[doc = "Register MATCHREL15 `reset()`'s with value 0"] impl crate::ResetValue for super::MATCHREL15 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `RELOADn_L`"] pub type RELOADN_L_R = crate::R<u16, u16>; #[doc = "Write proxy for field `RELOADn_L`"] pub struct RELOADN_L_W<'a> { w: &'a mut W, } impl<'a> RELOADN_L_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff); self.w } } #[doc = "Reader of field `RELOADn_H`"] pub type RELOADN_H_R = crate::R<u16, u16>; #[doc = "Write proxy for field `RELOADn_H`"] pub struct RELOADN_H_W<'a> { w: &'a mut W, } impl<'a> RELOADN_H_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits = (self.w.bits & !(0xffff << 16)) | (((value as u32) & 0xffff) << 16); self.w } } impl R { #[doc = "Bits 0:15 - When UNIFY = 0, specifies the 16-bit value to be loaded into the MATCHn_L register. When UNIFY = 1, specifies the lower 16 bits of the 32-bit value to be loaded into the MATCHn register."] #[inline(always)] pub fn reloadn_l(&self) -> RELOADN_L_R { RELOADN_L_R::new((self.bits & 0xffff) as u16) } #[doc = "Bits 16:31 - When UNIFY = 0, specifies the 16-bit to be loaded into the MATCHn_H register. When UNIFY = 1, specifies the upper 16 bits of the 32-bit value to be loaded into the MATCHn register."] #[inline(always)] pub fn reloadn_h(&self) -> RELOADN_H_R { RELOADN_H_R::new(((self.bits >> 16) & 0xffff) as u16) } } impl W { #[doc = "Bits 0:15 - When UNIFY = 0, specifies the 16-bit value to be loaded into the MATCHn_L register. When UNIFY = 1, specifies the lower 16 bits of the 32-bit value to be loaded into the MATCHn register."] #[inline(always)] pub fn reloadn_l(&mut self) -> RELOADN_L_W { RELOADN_L_W { w: self } } #[doc = "Bits 16:31 - When UNIFY = 0, specifies the 16-bit to be loaded into the MATCHn_H register. When UNIFY = 1, specifies the upper 16 bits of the 32-bit value to be loaded into the MATCHn register."] #[inline(always)] pub fn reloadn_h(&mut self) -> RELOADN_H_W { RELOADN_H_W { w: self } } }
39.846154
213
0.627027
16da6e4c4839af083a0dc86c0a38ebc660aedb7e
1,537
// An imaginary magical school has a new report card generation system written in Rust! // Currently the system only supports creating report cards where the student's grade // is represented numerically (e.g. 1.0 -> 5.5). // However, the school also issues alphabetical grades (A+ -> F-) and needs // to be able to print both types of report card! // Make the necessary code changes to support alphabetical report cards, thereby making // the second test pass. use std::fmt::Display; pub struct ReportCard<T> { pub grade: T, pub student_name: String, pub student_age: u8, } impl<T: Display> ReportCard<T> { pub fn print(&self) -> String { format!("{} ({}) - achieved a grade of {}", &self.student_name, &self.student_age, &self.grade) } } #[cfg(test)] mod tests { use super::*; #[test] fn generate_numeric_report_card() { let report_card = ReportCard::<f32> { grade: 2.1, student_name: "Tom Wriggle".to_string(), student_age: 12, }; assert_eq!(report_card.print(), "Tom Wriggle (12) - achieved a grade of 2.1"); } #[test] fn generate_alphabetic_report_card() { // TODO: Make sure to change the grade here after you finish the exercise. let report_card = ReportCard::<&str> { grade: "A+", student_name: "Gary Plotter".to_string(), student_age: 11, }; assert_eq!(report_card.print(), "Gary Plotter (11) - achieved a grade of A+"); } }
30.74
87
0.620039
64d85645b27d24366d4c570ba2ab9a5e0c026f79
7,966
use crate::parser::expression::*; use crate::parser::util::*; use crate::tokenizer::*; #[derive(Debug)] pub enum StatementNode<'a> { Null, Expression(Expression<'a>), If { condition: Box<Expression<'a>>, statement: Box<StatementNode<'a>>, }, Switch { condition: Box<Expression<'a>>, statement: Box<StatementNode<'a>>, }, Labeled(&'a str), While { condition: Box<Expression<'a>>, statement: Box<StatementNode<'a>>, }, DoWhile { condition: Box<Expression<'a>>, statement: Box<StatementNode<'a>>, }, For { initialization: Box<Expression<'a>>, condition: Box<Expression<'a>>, afterthought: Box<Expression<'a>>, statement: Box<StatementNode<'a>>, }, Compound(Vec<StatementNode<'a>>), Return(Box<Expression<'a>>), Break, Continue, Goto(&'a str), } //parser body pub fn statement<'a, 'b>(context: &'a mut ParseContext<'b>) -> Result<StatementNode<'b>, String> { if let Some(result) = null_statement(context)? { Ok(result) } else if let Some(result) = if_statement(context)? { Ok(result) } else if let Some(result) = switch_statement(context)? { Ok(result) } else if let Some(result) = labeled_statement(context)? { Ok(result) } else if let Some(result) = while_statement(context)? { Ok(result) } else if let Some(result) = do_while_statement(context)? { Ok(result) } else if let Some(result) = for_statement(context)? { Ok(result) } else if let Some(result) = compound_statement(context)? { Ok(result) } else if let Some(result) = return_statement(context)? { Ok(result) } else if let Some(result) = break_statement(context)? { Ok(result) } else if let Some(result) = continue_statement(context)? { Ok(result) } else if let Some(result) = goto_statement(context)? { Ok(result) } else if let Some(result) = expression_statement(context)? { Ok(result) } else { Err("statement is expected but not found.".to_string()) } } #[allow(clippy::unnecessary_wraps)] fn null_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { let result = if consume_punctuator(context, PunctuatorKind::Semicolon).is_some() { Some(StatementNode::Null) } else { None }; Ok(result) } fn expression_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { let result = expression(context)?; expect_punctuator(context, PunctuatorKind::Semicolon)?; let result = StatementNode::Expression(result); Ok(Some(result)) } fn if_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::If).is_none() { return Ok(None); } expect_punctuator(context, PunctuatorKind::LeftRoundBracket)?; let cond = expression(context)?; expect_punctuator(context, PunctuatorKind::RightRoundBracket)?; let body = statement(context)?; Ok(Some(StatementNode::If { condition: Box::new(cond), statement: Box::new(body), })) } fn switch_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::Switch).is_none() { return Ok(None); } expect_punctuator(context, PunctuatorKind::LeftRoundBracket)?; let cond = expression(context)?; expect_punctuator(context, PunctuatorKind::RightRoundBracket)?; let body = statement(context)?; Ok(Some(StatementNode::Switch { condition: Box::new(cond), statement: Box::new(body), })) } fn labeled_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { let label = match consume_identifier(context) { Some(result) => { expect_punctuator(context, PunctuatorKind::Colon)?; StatementNode::Labeled(result) } None => return Ok(None), }; Ok(Some(label)) } fn while_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::While).is_none() { return Ok(None); } expect_punctuator(context, PunctuatorKind::LeftRoundBracket)?; let cond = expression(context)?; expect_punctuator(context, PunctuatorKind::RightRoundBracket)?; let body = statement(context)?; Ok(Some(StatementNode::While { condition: Box::new(cond), statement: Box::new(body), })) } fn do_while_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::Do).is_none() { return Ok(None); } expect_punctuator(context, PunctuatorKind::LeftRoundBracket)?; let cond = expression(context)?; expect_punctuator(context, PunctuatorKind::RightRoundBracket)?; let body = statement(context)?; expect_keyword(context, KeywordKind::While)?; Ok(Some(StatementNode::DoWhile { condition: Box::new(cond), statement: Box::new(body), })) } fn for_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::For).is_none() { return Ok(None); } expect_punctuator(context, PunctuatorKind::LeftRoundBracket)?; let init = expression(context)?; expect_punctuator(context, PunctuatorKind::Semicolon)?; let cond = expression(context)?; expect_punctuator(context, PunctuatorKind::Semicolon)?; let after = expression(context)?; expect_punctuator(context, PunctuatorKind::RightRoundBracket)?; let body = statement(context)?; Ok(Some(StatementNode::For { initialization: Box::new(init), condition: Box::new(cond), afterthought: Box::new(after), statement: Box::new(body), })) } fn compound_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_punctuator(context, PunctuatorKind::LeftCurlyBracket).is_none() { return Ok(None); } let mut result = Vec::new(); while consume_punctuator(context, PunctuatorKind::RightCurlyBracket).is_none() { result.push(statement(context)?); } Ok(Some(StatementNode::Compound(result))) } fn return_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::Return).is_none() { return Ok(None); } let content = expression(context)?; Ok(Some(StatementNode::Return(Box::new(content)))) } fn break_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { let result = match consume_keyword(context, KeywordKind::Break) { Some(_) => { expect_punctuator(context, PunctuatorKind::Semicolon)?; Some(StatementNode::Break) } None => None, }; Ok(result) } fn continue_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { let result = match consume_keyword(context, KeywordKind::Continue) { Some(_) => { expect_punctuator(context, PunctuatorKind::Semicolon)?; Some(StatementNode::Continue) } None => None, }; Ok(result) } fn goto_statement<'a, 'b>( context: &'a mut ParseContext<'b>, ) -> Result<Option<StatementNode<'b>>, String> { if consume_keyword(context, KeywordKind::Goto).is_none() { return Ok(None); } let label = expect_identifier(context)?; Ok(Some(StatementNode::Goto(label))) }
29.179487
98
0.630429
eb83d940838b9b57094f0d500076897dbfb09ac8
512
extern crate console_error_panic_hook; use js_sys::Int32Array; use rayon::prelude::*; use std::panic; use wasm_bindgen::prelude::*; pub use wasm_bindgen_rayon::init_thread_pool; #[wasm_bindgen(start)] pub fn start() -> Result<(), JsValue> { panic::set_hook(Box::new(console_error_panic_hook::hook)); Ok(()) } #[wasm_bindgen(js_name = sortParallel)] pub fn sort_parallel(numbers: Int32Array) -> Int32Array { let mut vec = numbers.to_vec(); vec.par_sort(); Int32Array::from(vec.as_slice()) }
25.6
62
0.712891
edbbf60530e460e2e6b8605f95d55be9c61d7821
16,577
use std::{clone::Clone, fmt, str}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; use serde_repr::{Deserialize_repr, Serialize_repr}; use url::Url; pub type AssetId = u64; pub const DEFAULT_PLACE_NAME: &str = "Untitled Game"; #[derive(Deserialize, Debug)] pub struct RobloxApiErrorResponse { // There are some other possible properties but we currently have no use for them so they are not // included // Most error models have a `message` property #[serde(alias = "Message")] pub message: Option<String>, // Some error models (500) have a `title` property instead #[serde(alias = "Title")] pub title: Option<String>, // Some error models on older APIs have an errors array #[serde(alias = "Errors")] pub errors: Option<Vec<RobloxApiErrorResponse>>, // Some errors return a `success` property which can be used to check for errors #[serde(alias = "Success")] pub success: Option<bool>, } impl RobloxApiErrorResponse { pub fn reason(self) -> Option<String> { if let Some(message) = self.message { Some(message) } else if let Some(title) = self.title { Some(title) } else if let Some(errors) = self.errors { for error in errors { if let Some(message) = error.reason() { return Some(message); } } None } else { None } } pub fn reason_or_status_code(self, status_code: StatusCode) -> String { self.reason() .unwrap_or_else(|| format!("Unknown error ({})", status_code)) } } #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] pub struct CreateExperienceResponse { pub universe_id: AssetId, pub root_place_id: AssetId, } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub enum CreatorType { User, Group, } impl fmt::Display for CreatorType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( f, "{}", match self { CreatorType::User => "User", CreatorType::Group => "Group", } ) } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetExperienceResponse { pub root_place_id: AssetId, pub is_active: bool, pub creator_type: CreatorType, pub creator_target_id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] pub struct CreatePlaceResponse { pub place_id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct GetPlaceResponse { pub id: AssetId, pub current_saved_version: u32, pub name: String, pub description: String, pub max_player_count: u32, pub allow_copying: bool, pub social_slot_type: SocialSlotType, pub custom_social_slots_count: Option<u32>, pub is_root_place: bool, } impl From<GetPlaceResponse> for PlaceConfigurationModel { fn from(response: GetPlaceResponse) -> Self { PlaceConfigurationModel { name: response.name, description: response.description, max_player_count: response.max_player_count, allow_copying: response.allow_copying, social_slot_type: response.social_slot_type, custom_social_slots_count: response.custom_social_slots_count, } } } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct RemovePlaceResponse {} #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct ListPlacesResponse { pub next_page_cursor: Option<String>, pub data: Vec<ListPlaceResponse>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ListPlaceResponse { pub id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct UploadImageResponse { pub target_id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct CreateDeveloperProductResponse { pub id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] pub struct ListDeveloperProductsResponse { pub developer_products: Vec<ListDeveloperProductResponseItem>, pub final_page: bool, } #[derive(Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub struct ListDeveloperProductResponseItem { pub product_id: AssetId, pub developer_product_id: AssetId, pub name: String, pub description: Option<String>, pub icon_image_asset_id: Option<AssetId>, pub price_in_robux: u32, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct GetDeveloperProductResponse { pub id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct ListGamePassesResponse { pub next_page_cursor: Option<String>, pub data: Vec<ListGamePassResponse>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ListGamePassResponse { pub id: AssetId, } #[derive(Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub struct GetGamePassResponse { pub target_id: AssetId, pub name: String, pub description: String, pub icon_image_asset_id: AssetId, pub price_in_robux: Option<u32>, } pub struct CreateGamePassResponse { pub asset_id: AssetId, pub icon_asset_id: AssetId, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct CreateBadgeResponse { pub id: AssetId, pub icon_image_id: AssetId, } #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct ListBadgesResponse { pub next_page_cursor: Option<String>, pub data: Vec<ListBadgeResponse>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ListBadgeResponse { pub id: AssetId, pub name: String, pub description: String, pub icon_image_id: AssetId, pub enabled: bool, } #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] pub struct ListAssetAliasesResponse { pub aliases: Vec<GetAssetAliasResponse>, pub final_page: bool, } #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] pub struct GetAssetAliasResponse { pub name: String, pub target_id: AssetId, pub asset: GetAssetResponse, } #[derive(Deserialize)] #[serde(rename_all = "PascalCase")] pub struct GetAssetResponse { pub type_id: u32, } #[derive(Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub struct CreateImageAssetResponse { pub asset_id: AssetId, pub backing_asset_id: AssetId, } #[derive(Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub struct CreateAudioAssetResponse { pub id: AssetId, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct GetGameIconsResponse { pub data: Vec<GetThumbnailResponse>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct GetExperienceThumbnailsResponse { pub data: Vec<GetExperienceThumbnailResponse>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct GetExperienceThumbnailResponse { pub id: AssetId, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct GetThumbnailResponse { pub target_id: AssetId, } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub enum SocialLinkType { Facebook, Twitter, YouTube, Twitch, Discord, RobloxGroup, Guilded, } #[derive(Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub struct CreateSocialLinkResponse { pub id: AssetId, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ListSocialLinksResponse { pub data: Vec<GetSocialLinkResponse>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct GetSocialLinkResponse { pub id: AssetId, pub title: String, pub url: Url, #[serde(rename = "type")] pub link_type: SocialLinkType, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct CreateAssetQuotasResponse { pub quotas: Vec<CreateAssetQuota>, } #[derive(Deserialize, Clone)] #[serde(rename_all = "PascalCase")] pub enum QuotaDuration { Month, } #[derive(Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct CreateAssetQuota { pub duration: QuotaDuration, pub usage: u32, pub capacity: u32, pub expiration_time: Option<String>, } #[derive(Serialize, Deserialize, Clone)] pub enum ExperienceGenre { All, Adventure, Tutorial, Funny, Ninja, #[serde(rename = "FPS")] Fps, Scary, Fantasy, War, Pirate, #[serde(rename = "RPG")] Rpg, SciFi, Sports, TownAndCity, WildWest, } #[derive(Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "PascalCase")] pub enum ExperiencePlayableDevice { Computer, Phone, Tablet, Console, } #[derive(Serialize, Deserialize, Clone)] pub enum ExperienceAvatarType { MorphToR6, MorphToR15, PlayerChoice, } #[derive(Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "PascalCase")] pub enum ExperienceAnimationType { Standard, PlayerChoice, } #[derive(Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "PascalCase")] pub enum ExperienceCollisionType { OuterBox, InnerBox, } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ExperienceAvatarScales { pub height: String, pub width: String, pub head: String, pub body_type: String, pub proportion: String, } #[derive(Serialize_repr, Deserialize_repr, Clone, Debug)] #[repr(u8)] pub enum AssetTypeId { Image = 1, TShirt = 2, Audio = 3, Mesh = 4, Lua = 5, Hat = 8, Place = 9, Model = 10, Shirt = 11, Pants = 12, Decal = 13, Head = 17, Face = 18, Gear = 19, Badge = 21, Animation = 24, Torso = 27, RightArm = 28, LeftArm = 29, LeftLeg = 30, RightLeg = 31, Package = 32, GamePass = 34, Plugin = 38, MeshPart = 40, HairAccessory = 41, FaceAccessory = 42, NeckAccessory = 43, ShoulderAccessory = 44, FrontAccessory = 45, BackAccessory = 46, WaistAccessory = 47, ClimbAnimation = 48, DeathAnimation = 49, FallAnimation = 50, IdleAnimation = 51, JumpAnimation = 52, RunAnimation = 53, SwimAnimation = 54, WalkAnimation = 55, PoseAnimation = 56, EarAccessory = 57, EyeAccessory = 58, EmoteAnimation = 61, Video = 62, TShirtAccessory = 64, ShirtAccessory = 65, PantsAccessory = 66, JacketAccessory = 67, SweaterAccessory = 68, ShortsAccessory = 69, LeftShoeAccessory = 70, RightShoeAccessory = 71, DressSkirtAccessory = 72, } impl fmt::Display for AssetTypeId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", serde_json::to_string(&self).unwrap(),) } } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ExperienceAvatarAssetOverride { #[serde(rename = "assetTypeID")] pub asset_type_id: AssetTypeId, pub is_player_choice: bool, #[serde(rename = "assetID")] pub asset_id: Option<AssetId>, } impl ExperienceAvatarAssetOverride { pub fn player_choice(asset_type_id: AssetTypeId) -> Self { Self { asset_type_id, is_player_choice: true, asset_id: None, } } } #[derive(Serialize, Deserialize, Clone, Default)] #[serde(rename_all = "PascalCase")] pub struct ExperiencePermissionsModel { pub is_third_party_purchase_allowed: bool, pub is_third_party_teleport_allowed: bool, } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ExperienceConfigurationModel { pub genre: ExperienceGenre, pub playable_devices: Vec<ExperiencePlayableDevice>, pub is_friends_only: Option<bool>, #[serde(default)] pub allow_private_servers: bool, pub private_server_price: Option<u32>, pub is_for_sale: bool, pub price: Option<u32>, #[serde(default)] pub studio_access_to_apis_allowed: bool, #[serde(default)] pub permissions: ExperiencePermissionsModel, pub universe_avatar_type: ExperienceAvatarType, pub universe_animation_type: ExperienceAnimationType, pub universe_collision_type: ExperienceCollisionType, #[serde(default = "default_min_scales")] pub universe_avatar_min_scales: ExperienceAvatarScales, #[serde(default = "default_max_scales")] pub universe_avatar_max_scales: ExperienceAvatarScales, #[serde(default = "default_asset_overrides")] pub universe_avatar_asset_overrides: Vec<ExperienceAvatarAssetOverride>, pub is_archived: bool, } fn default_min_scales() -> ExperienceAvatarScales { ExperienceConfigurationModel::default().universe_avatar_min_scales } fn default_max_scales() -> ExperienceAvatarScales { ExperienceConfigurationModel::default().universe_avatar_max_scales } fn default_asset_overrides() -> Vec<ExperienceAvatarAssetOverride> { ExperienceConfigurationModel::default().universe_avatar_asset_overrides } impl Default for ExperienceConfigurationModel { fn default() -> Self { ExperienceConfigurationModel { genre: ExperienceGenre::All, playable_devices: vec![ ExperiencePlayableDevice::Computer, ExperiencePlayableDevice::Phone, ExperiencePlayableDevice::Tablet, ], is_friends_only: Some(true), allow_private_servers: false, private_server_price: None, is_for_sale: false, price: None, studio_access_to_apis_allowed: false, permissions: ExperiencePermissionsModel { is_third_party_purchase_allowed: false, is_third_party_teleport_allowed: false, }, universe_avatar_type: ExperienceAvatarType::MorphToR15, universe_animation_type: ExperienceAnimationType::PlayerChoice, universe_collision_type: ExperienceCollisionType::OuterBox, universe_avatar_min_scales: ExperienceAvatarScales { height: 0.9.to_string(), width: 0.7.to_string(), head: 0.95.to_string(), body_type: 0.0.to_string(), proportion: 0.0.to_string(), }, universe_avatar_max_scales: ExperienceAvatarScales { height: 1.05.to_string(), width: 1.0.to_string(), head: 1.0.to_string(), body_type: 1.0.to_string(), proportion: 1.0.to_string(), }, universe_avatar_asset_overrides: vec![ ExperienceAvatarAssetOverride::player_choice(AssetTypeId::Face), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::Head), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::Torso), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::LeftArm), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::RightArm), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::LeftLeg), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::RightLeg), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::TShirt), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::Shirt), ExperienceAvatarAssetOverride::player_choice(AssetTypeId::Pants), ], is_archived: false, } } } #[derive(Serialize, Deserialize, Clone)] pub enum SocialSlotType { Automatic, Empty, Custom, } #[derive(Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct PlaceConfigurationModel { pub name: String, pub description: String, pub max_player_count: u32, pub allow_copying: bool, pub social_slot_type: SocialSlotType, pub custom_social_slots_count: Option<u32>, } impl Default for PlaceConfigurationModel { fn default() -> Self { PlaceConfigurationModel { name: DEFAULT_PLACE_NAME.to_owned(), description: "Created with Mantle".to_owned(), max_player_count: 50, allow_copying: false, social_slot_type: SocialSlotType::Automatic, custom_social_slots_count: None, } } } pub enum PlaceFileFormat { Xml, Binary, }
26.312698
101
0.668999
7a941221ab433a8c8789523bdb856fac2367e703
16,240
use std::io::{Error, ErrorKind}; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use std::sync::Arc; use local_encoding::{Encoder, Encoding}; use crate::cmd; use crate::compiler::{ Arg, CommandInfo, CompilationArgs, CompilationTask, InputKind, OutputKind, Scope, }; use std::fs; enum ParamValue<T> { None, Single(T), Many(Vec<T>), } pub fn create_tasks(command: CommandInfo, args: &[String]) -> Result<Vec<CompilationTask>, String> { load_arguments(&command.current_dir, args.iter()) .map_err(|e: Error| format!("IO error: {}", e)) .and_then(|a| parse_arguments(a.iter())) .and_then(|parsed_args| { // Source file name. let input_sources: Vec<PathBuf> = parsed_args .iter() .filter_map(|arg| match arg { Arg::Input { ref kind, ref file, .. } if *kind == InputKind::Source => Some(Path::new(file).to_path_buf()), _ => None, }) .collect(); if input_sources.is_empty() { return Err("Can't find source file path.".to_string()); } // Precompiled header file name. let precompiled_file = match find_param(&parsed_args, |arg: &Arg| -> Option<PathBuf> { match *arg { Arg::Input { ref kind, ref file, .. } if *kind == InputKind::Precompiled => Some(Path::new(file).to_path_buf()), _ => None, } }) { ParamValue::None => None, ParamValue::Single(v) => Some(v), ParamValue::Many(v) => { return Err(format!("Found too many precompiled header files: {:?}", v)); } }; let cwd = command.current_dir.clone(); // Precompiled header file name. let marker_precompiled; let input_precompiled; let output_precompiled; match find_param(&parsed_args, |arg: &Arg| -> Option<(bool, String)> { match *arg { Arg::Input { ref kind, ref file, .. } if *kind == InputKind::Marker => Some((true, file.clone())), Arg::Output { ref kind, ref file, .. } if *kind == OutputKind::Marker => Some((false, file.clone())), _ => None, } }) { ParamValue::None => { marker_precompiled = None; input_precompiled = None; output_precompiled = None; } ParamValue::Single((input, path)) => { let precompiled_path = match precompiled_file { Some(v) => v, None => Path::new(&path).with_extension(".pch"), }; marker_precompiled = if path.is_empty() { None } else { Some(path) }; if input { output_precompiled = None; input_precompiled = Some(precompiled_path); } else { input_precompiled = None; output_precompiled = Some(precompiled_path); } } ParamValue::Many(v) => { return Err(format!( "Found too many precompiled header markers: {}", v.iter().map(|item| item.1.clone()).collect::<String>() )); } }; // Output object file name. let output_object: Option<PathBuf> = match find_param(&parsed_args, |arg: &Arg| -> Option<PathBuf> { match *arg { Arg::Output { ref kind, ref file, .. } if *kind == OutputKind::Object => Some(Path::new(file).to_path_buf()), _ => None, } }) { ParamValue::None => None, ParamValue::Single(v) => Some(v), ParamValue::Many(v) => { return Err(format!("Found too many output object files: {:?}", v)); } } .map(|path| cwd.as_ref().map(|cwd| cwd.join(&path)).unwrap_or(path)); // Language let language: Option<String> = match find_param(&parsed_args, |arg: &Arg| -> Option<String> { match arg { Arg::Param { ref flag, ref value, .. } if *flag == "T" => Some(value.clone()), _ => None, } }) { ParamValue::None => None, ParamValue::Single(v) => Some(v), ParamValue::Many(v) => { return Err(format!("Found too many output object files: {:?}", v)); } }; let shared = Arc::new(CompilationArgs { args: parsed_args, input_precompiled: input_precompiled.map(|path| command.current_dir_join(&path)), output_precompiled: output_precompiled.map(|path| command.current_dir_join(&path)), marker_precompiled, command, }); input_sources .into_iter() .map(|source| { let input_source = cwd.as_ref().map(|cwd| cwd.join(&source)).unwrap_or(source); Ok(CompilationTask { shared: shared.clone(), language: language .as_ref() .map_or_else( || { input_source .extension() .and_then(|ext| match ext.to_str() { Some(e) if e.eq_ignore_ascii_case("cpp") => Some("P"), Some(e) if e.eq_ignore_ascii_case("c") => Some("C"), _ => None, }) .map(|ext| ext.to_string()) }, |lang| Some(lang.clone()), ) .ok_or_else(|| { format!( "Can't detect file language by extension: {}", input_source.to_string_lossy() ) })?, output_object: get_output_object(&input_source, &output_object)?, input_source, }) }) .collect() }) } fn get_output_object( input_source: &Path, output_object: &Option<PathBuf>, ) -> Result<PathBuf, String> { output_object.as_ref().map_or_else( || Ok(input_source.with_extension("obj")), |path| { if path.is_dir() { input_source .file_name() .map(|name| path.join(name).with_extension("obj")) .ok_or_else(|| { format!( "Input file path does not contains file name: {}", input_source.to_string_lossy() ) }) } else { Ok(path.clone()) } }, ) } fn find_param<T, R, F: Fn(&T) -> Option<R>>(args: &[T], filter: F) -> ParamValue<R> { let mut found = Vec::from_iter(args.iter().filter_map(filter)); match found.len() { 0 => ParamValue::None, 1 => ParamValue::Single(found.pop().unwrap()), _ => ParamValue::Many(found), } } fn load_arguments<S: AsRef<str>, I: Iterator<Item = S>>( base: &Option<PathBuf>, iter: I, ) -> Result<Vec<String>, Error> { let mut result: Vec<String> = Vec::new(); for item in iter { if item.as_ref().starts_with('@') { let path = match base { Some(ref p) => p.join(&item.as_ref()[1..]), None => Path::new(&item.as_ref()[1..]).to_path_buf(), }; let data = fs::read(path)?; let text = decode_string(&data)?; let mut args = cmd::native::parse(&text)?; result.append(&mut args); } else { result.push(item.as_ref().to_string()); } } Ok(result) } fn decode_string(data: &[u8]) -> Result<String, Error> { if data.starts_with(&[0xEF, 0xBB, 0xBF]) { String::from_utf8(data[3..].to_vec()).map_err(|e| Error::new(ErrorKind::InvalidInput, e)) } else if data.starts_with(&[0xFE, 0xFF]) { decode_utf16(&data[2..], |a, b| (a << 8) + b) } else if data.starts_with(&[0xFF, 0xFE]) { decode_utf16(&data[2..], |a, b| (b << 8) + a) } else { Encoding::ANSI.to_string(data) } } fn decode_utf16<F: Fn(u16, u16) -> u16>(data: &[u8], endian: F) -> Result<String, Error> { let mut utf16 = Vec::new(); if data.len() % 2 != 0 { return Err(Error::new( ErrorKind::InvalidInput, "Invalid UTF-16 line: odd bytes length", )); } let mut i = 0; while i < data.len() { utf16.push(endian(u16::from(data[i]), u16::from(data[i + 1]))); i += 2; } String::from_utf16(&utf16).map_err(|e| Error::new(ErrorKind::InvalidInput, e)) } fn parse_arguments<S: AsRef<str>, I: Iterator<Item = S>>(mut iter: I) -> Result<Vec<Arg>, String> { let mut result: Vec<Arg> = Vec::new(); let mut errors: Vec<String> = Vec::new(); while let Some(parse_result) = parse_argument(&mut iter) { match parse_result { Ok(arg) => { result.push(arg); } Err(e) => { errors.push(e); } } } if !errors.is_empty() { return Err(format!( "Found unknown command line arguments: {:?}", errors )); } Ok(result) } #[allow(clippy::cognitive_complexity)] fn parse_argument<S: AsRef<str>, I: Iterator<Item = S>>( iter: &mut I, ) -> Option<Result<Arg, String>> { match iter.next() { Some(arg) => Some(if has_param_prefix(arg.as_ref()) { let flag = &arg.as_ref()[1..]; match is_spaceable_param(flag) { Some((prefix, scope)) => { if flag == prefix { match iter.next() { Some(value) => { if !has_param_prefix(value.as_ref()) { Ok(Arg::param(scope, prefix, value.as_ref())) } else { Err(arg.as_ref().to_string()) } } _ => Err(arg.as_ref().to_string()), } } else { Ok(Arg::param(scope, prefix, &flag[prefix.len()..])) } } None => match flag { "c" | "nologo" => Ok(Arg::flag(Scope::Ignore, flag)), "bigobj" => Ok(Arg::flag(Scope::Compiler, flag)), s if s.starts_with('T') => Ok(Arg::param(Scope::Ignore, "T", &s[1..])), s if s.starts_with('O') => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with('G') => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("RTC") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with('Z') => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("d2Zi+") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("MP") => Ok(Arg::flag(Scope::Compiler, flag)), s if s.starts_with("MD") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("MT") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("EH") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("fp:") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("arch:") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("errorReport:") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("source-charset:") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("execution-charset:") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("Fo") => Ok(Arg::output(OutputKind::Object, "Fo", &s[2..])), s if s.starts_with("Fp") => { Ok(Arg::input(InputKind::Precompiled, "Fp", &s[2..])) } s if s.starts_with("Yc") => Ok(Arg::output(OutputKind::Marker, "Yc", &s[2..])), s if s.starts_with("Yu") => Ok(Arg::input(InputKind::Marker, "Yu", &s[2..])), s if s.starts_with("Yl") => Ok(Arg::flag(Scope::Shared, flag)), s if s.starts_with("FI") => Ok(Arg::param(Scope::Preprocessor, "FI", &s[2..])), s if s.starts_with("analyze") => Ok(Arg::flag(Scope::Shared, flag)), _ => Err(arg.as_ref().to_string()), }, } } else { Ok(Arg::Input { kind: InputKind::Source, flag: String::new(), file: arg.as_ref().to_string(), }) }), None => None, } } fn is_spaceable_param(flag: &str) -> Option<(&str, Scope)> { for prefix in ["D"].iter() { if flag.starts_with(*prefix) { return Some((*prefix, Scope::Shared)); } } for prefix in ["I"].iter() { if flag.starts_with(*prefix) { return Some((*prefix, Scope::Preprocessor)); } } for prefix in ["W", "wd", "we", "wo", "w"].iter() { if flag.starts_with(*prefix) { return Some((*prefix, Scope::Compiler)); } } None } fn has_param_prefix(arg: &str) -> bool { arg.starts_with('/') || arg.starts_with('-') } #[test] fn test_parse_argument() { let args = Vec::from_iter( "/TP /c /Yusample.h /Fpsample.h.pch /Fosample.cpp.o /DTEST /D TEST2 /arch:AVX \ sample.cpp" .split(' ') .map(|x| x.to_string()), ); assert_eq!( parse_arguments(args.iter()).unwrap(), [ Arg::param(Scope::Ignore, "T", "P"), Arg::flag(Scope::Ignore, "c"), Arg::input(InputKind::Marker, "Yu", "sample.h"), Arg::input(InputKind::Precompiled, "Fp", "sample.h.pch"), Arg::output(OutputKind::Object, "Fo", "sample.cpp.o"), Arg::param(Scope::Shared, "D", "TEST"), Arg::param(Scope::Shared, "D", "TEST2"), Arg::flag(Scope::Shared, "arch:AVX"), Arg::input(InputKind::Source, "", "sample.cpp") ] ) } #[test] fn test_decode_string() { // ANSI assert_eq!(&decode_string(b"test").unwrap(), "test"); // UTF-8 assert_eq!( &decode_string(b"\xEF\xBB\xBFtest \xD1\x80\xD1\x83\xD1\x81").unwrap(), "test рус" ); // UTF-16LE assert_eq!( &decode_string(b"\xFF\xFEt\x00e\x00s\x00t\x00 \x00\x40\x04\x43\x04\x41\x04").unwrap(), "test рус" ); // UTF-16BE assert_eq!( &decode_string(b"\xFE\xFF\x00t\x00e\x00s\x00t\x00 \x04\x40\x04\x43\x04\x41").unwrap(), "test рус" ); }
39.706601
100
0.440948
628676c7051193e864cee8ea6e93066104319539
3,706
pub use tdd_kata::mst_kata::day_1::{EdgeWeightedGraph, WeightedEdge, LazyMst}; pub use std::cmp::Ordering; pub use expectest::prelude::{be_equal_to, be_some, be_ok, be_err}; describe! edge_weighted_graph { describe! weighted_edge { before_each { let edge = WeightedEdge::new(1, 2, 1); } it "should create a new edge with weight" { expect!(edge.weight()).to(be_equal_to(1)); } it "should return other vertex" { expect!(edge.either()).to(be_equal_to(1)); expect!(edge.other(edge.either())).to(be_equal_to(2)); expect!(edge.other(edge.other(edge.either()))).to(be_equal_to(1)); } it "should be comparable by weight" { let bigger = WeightedEdge::new(1, 2, 3); let smaller = WeightedEdge::new(2, 3, 1); expect!(bigger.cmp(&smaller)).to(be_equal_to(Ordering::Less)); expect!(smaller.cmp(&bigger)).to(be_equal_to(Ordering::Greater)); let edge_1 = WeightedEdge::new(1, 2, 2); let edge_2 = WeightedEdge::new(1, 3, 2); expect!(edge_1.cmp(&edge_2)).to(be_equal_to(Ordering::Equal)); } } describe! graph { before_each { let mut graph = EdgeWeightedGraph::default(); } it "should create a new empty edge weighted graph" { expect!(graph.vertices()).to(be_equal_to(0)); expect!(graph.edges()).to(be_equal_to(0)); } it "should add a weighted edge to a graph" { graph.add_edge(WeightedEdge::new(1, 2, 1)); expect!(graph.vertices()).to(be_equal_to(2)); expect!(graph.edges()).to(be_equal_to(1)); } it "should add weighted edges to a graph" { graph.add_edge(WeightedEdge::new(1, 2, 1)); graph.add_edge(WeightedEdge::new(2, 3, 1)); graph.add_edge(WeightedEdge::new(1, 4, 1)); expect!(graph.vertices()).to(be_equal_to(4)); expect!(graph.edges()).to(be_equal_to(3)); } it "should be adjacent to each other" { let edge = WeightedEdge::new(1, 2, 1); graph.add_edge(edge); expect!(graph.adjacent_to(1)).to(be_some().value(&vec![edge])); expect!(graph.adjacent_to(2)).to(be_some().value(&vec![edge])); } } describe! lazy_mst { it "should create a lazy mst" { let mut graph = EdgeWeightedGraph::default(); graph.add_edge(WeightedEdge::new(1, 2, 1)); graph.add_edge(WeightedEdge::new(2, 3, 1)); graph.add_edge(WeightedEdge::new(1, 4, 1)); expect!(LazyMst::new(&graph)).to(be_ok()); } it "should not create a lazy mst from an empty graph" { let graph = EdgeWeightedGraph::default(); expect!(LazyMst::new(&graph)).to(be_err()); } it "should be weight of 3" { let mut graph = EdgeWeightedGraph::default(); graph.add_edge(WeightedEdge::new(1, 2, 1)); graph.add_edge(WeightedEdge::new(2, 3, 1)); graph.add_edge(WeightedEdge::new(1, 4, 1)); let mst = LazyMst::new(&graph).unwrap(); expect!(mst.weight()).to(be_equal_to(3)); } it "should be weight of 2" { let mut graph = EdgeWeightedGraph::default(); graph.add_edge(WeightedEdge::new(1, 2, 1)); graph.add_edge(WeightedEdge::new(2, 3, 1)); graph.add_edge(WeightedEdge::new(1, 3, 1)); let mst = LazyMst::new(&graph).unwrap(); expect!(mst.weight()).to(be_equal_to(2)); } } }
31.40678
78
0.550459
fcbc41806ea3f692e65c2d6f1b9e1784465c03d4
14,338
use std::error; use std::fmt; use std::io; use std::result; /// A convenient type alias for `Result<T, snap::Error>`. pub type Result<T> = result::Result<T, Error>; /// `IntoInnerError` occurs when consuming a `Writer` fails. /// /// Consuming the `Writer` causes a flush to happen. If the flush fails, then /// this error is returned, which contains both the original `Writer` and /// the error that occurred. /// /// The type parameter `W` is the unconsumed writer. pub struct IntoInnerError<W> { wtr: W, err: io::Error, } /// Creates a new `IntoInnerError`. /// /// (This is a visibility hack. It's public in this module, but not in the /// crate.) pub fn new_into_inner_error<W>(wtr: W, err: io::Error) -> IntoInnerError<W> { IntoInnerError { wtr: wtr, err: err } } impl<W> IntoInnerError<W> { /// Returns the error which caused the call to `into_inner` to fail. /// /// This error was returned when attempting to flush the internal buffer. pub fn error(&self) -> &io::Error { &self.err } /// Returns the underlying writer which generated the error. /// /// The returned value can be used for error recovery, such as /// re-inspecting the buffer. pub fn into_inner(self) -> W { self.wtr } } impl<W: ::std::any::Any> error::Error for IntoInnerError<W> { fn description(&self) -> &str { self.err.description() } fn cause(&self) -> Option<&error::Error> { self.err.cause() } } impl<W> fmt::Display for IntoInnerError<W> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.err.fmt(f) } } impl<W> fmt::Debug for IntoInnerError<W> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.err.fmt(f) } } /// Error describes all the possible errors that may occur during Snappy /// compression or decompression. /// /// Note that it's unlikely that you'll need to care about the specific error /// reported since all of them indicate a corrupt Snappy data or a limitation /// that cannot be worked around. Therefore, /// `From<snap::Error> for std::io::Error` is provided so that any Snappy /// errors will be converted to a `std::io::Error` automatically when using /// `try!`. #[derive(Debug)] pub enum Error { /// This error occurs when the given input is too big. This can happen /// during compression or decompression. TooBig { /// The size of the given input. given: u64, /// The maximum allowed size of an input buffer. max: u64, }, /// This error occurs when the given buffer is too small to contain the /// maximum possible compressed bytes or the total number of decompressed /// bytes. BufferTooSmall { /// The size of the given output buffer. given: u64, /// The minimum size of the output buffer. min: u64, }, /// This error occurs when trying to decompress a zero length buffer. Empty, /// This error occurs when an invalid header is found during decompression. Header, /// This error occurs when there is a mismatch between the number of /// decompressed bytes reported in the header and the number of /// actual decompressed bytes. In this error case, the number of actual /// decompressed bytes is always less than the number reported in the /// header. HeaderMismatch { /// The total number of decompressed bytes expected (i.e., the header /// value). expected_len: u64, /// The total number of actual decompressed bytes. got_len: u64, }, /// This error occurs during decompression when there was a problem /// reading a literal. Literal { /// The expected length of the literal. len: u64, /// The number of remaining bytes in the compressed bytes. src_len: u64, /// The number of remaining slots in the decompression buffer. dst_len: u64, }, /// This error occurs during decompression when there was a problem /// reading a copy. CopyRead { /// The expected length of the copy (as encoded in the compressed /// bytes). len: u64, /// The number of remaining bytes in the compressed bytes. src_len: u64, }, /// This error occurs during decompression when there was a problem /// writing a copy to the decompression buffer. CopyWrite { /// The length of the copy (i.e., the total number of bytes to be /// produced by this copy in the decompression buffer). len: u64, /// The number of remaining bytes in the decompression buffer. dst_len: u64, }, /// This error occurs during decompression when an invalid copy offset /// is found. An offset is invalid if it is zero or if it is out of bounds. Offset { /// The offset that was read. offset: u64, /// The current position in the decompression buffer. If the offset is /// non-zero, then the offset must be greater than this position. dst_pos: u64, }, /// This error occurs when a stream header chunk type was expected but got /// a different chunk type. /// This error only occurs when reading a Snappy frame formatted stream. StreamHeader { /// The chunk type byte that was read. byte: u8, }, /// This error occurs when the magic stream headers bytes do not match /// what is expected. /// This error only occurs when reading a Snappy frame formatted stream. StreamHeaderMismatch { /// The bytes that were read. bytes: Vec<u8>, }, /// This error occurs when an unsupported chunk type is seen. /// This error only occurs when reading a Snappy frame formatted stream. UnsupportedChunkType { /// The chunk type byte that was read. byte: u8, }, /// This error occurs when trying to read a chunk with length greater than /// that supported by this library when reading a Snappy frame formatted /// stream. /// This error only occurs when reading a Snappy frame formatted stream. UnsupportedChunkLength { /// The length of the chunk encountered. len: u64, /// True when this error occured while reading the stream header. header: bool, }, /// This error occurs when a checksum validity check fails. /// This error only occurs when reading a Snappy frame formatted stream. Checksum { /// The expected checksum read from the stream. expected: u32, /// The computed checksum. got: u32, }, } impl From<Error> for io::Error { fn from(err: Error) -> io::Error { io::Error::new(io::ErrorKind::Other, err) } } impl Eq for Error {} /// This implementation of `PartialEq` returns `false` when comparing two /// errors whose underlying type is `std::io::Error`. impl PartialEq for Error { fn eq(&self, other: &Error) -> bool { use self::Error::*; match (self, other) { (&TooBig { given: given1, max: max1 }, &TooBig { given: given2, max: max2 }) => { (given1, max1) == (given2, max2) } (&BufferTooSmall { given: given1, min: min1 }, &BufferTooSmall { given: given2, min: min2 }) => { (given1, min1) == (given2, min2) } (&Empty, &Empty) | (&Header, &Header) => true, (&HeaderMismatch { expected_len: elen1, got_len: glen1 }, &HeaderMismatch { expected_len: elen2, got_len: glen2 }) => { (elen1, glen1) == (elen2, glen2) } (&Literal { len: len1, src_len: src_len1, dst_len: dst_len1 }, &Literal { len: len2, src_len: src_len2, dst_len: dst_len2 }) => { (len1, src_len1, dst_len1) == (len2, src_len2, dst_len2) } (&CopyRead { len: len1, src_len: src_len1 }, &CopyRead { len: len2, src_len: src_len2 }) => { (len1, src_len1) == (len2, src_len2) } (&CopyWrite { len: len1, dst_len: dst_len1 }, &CopyWrite { len: len2, dst_len: dst_len2 }) => { (len1, dst_len1) == (len2, dst_len2) } (&Offset { offset: offset1, dst_pos: dst_pos1 }, &Offset { offset: offset2, dst_pos: dst_pos2 }) => { (offset1, dst_pos1) == (offset2, dst_pos2) } (&StreamHeader { byte: byte1 }, &StreamHeader { byte: byte2 }) => { byte1 == byte2 } (&StreamHeaderMismatch { bytes: ref bytes1 }, &StreamHeaderMismatch { bytes: ref bytes2 }) => { bytes1 == bytes2 } (&UnsupportedChunkType { byte: byte1 }, &UnsupportedChunkType { byte: byte2 }) => { byte1 == byte2 } (&UnsupportedChunkLength { len: len1, header: header1 }, &UnsupportedChunkLength { len: len2, header: header2 }) => { (len1, header1) == (len2, header2) } (&Checksum { expected: e1, got: g1 }, &Checksum { expected: e2, got: g2 }) => { (e1, g1) == (e2, g2) } _ => false, } } } impl error::Error for Error { fn description(&self) -> &str { match *self { Error::TooBig { .. } => "snappy: input buffer too big", Error::BufferTooSmall { .. } => "snappy: output buffer too small", Error::Empty => "snappy: corrupt input (empty)", Error::Header => "snappy: corrupt input (invalid header)", Error::HeaderMismatch { .. } => "snappy: corrupt input \ (header mismatch)", Error::Literal { .. } => "snappy: corrupt input (bad literal)", Error::CopyRead { .. } => "snappy: corrupt input (bad copy read)", Error::CopyWrite { .. } => "snappy: corrupt input \ (bad copy write)", Error::Offset { .. } => "snappy: corrupt input (bad offset)", Error::StreamHeader { .. } => { "snappy: corrupt input (missing stream header)" } Error::StreamHeaderMismatch { .. } => { "snappy: corrupt input (stream header mismatch)" } Error::UnsupportedChunkType { .. } => { "snappy: corrupt input (unsupported chunk type)" } Error::UnsupportedChunkLength { header: false, .. } => { "snappy: corrupt input (unsupported chunk length)" } Error::UnsupportedChunkLength { header: true, .. } => { "snappy: corrupt input (invalid stream header)" } Error::Checksum { .. } => "snappy: corrupt input (bad checksum)", } } fn cause(&self) -> Option<&error::Error> { None } } impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::TooBig { given, max } => { write!(f, "snappy: input buffer (size = {}) is larger than \ allowed (size = {})", given, max) } Error::BufferTooSmall { given, min } => { write!(f, "snappy: output buffer (size = {}) is smaller than \ required (size = {})", given, min) } Error::Empty => { write!(f, "snappy: corrupt input (empty)") } Error::Header => { write!(f, "snappy: corrupt input (invalid header)") } Error::HeaderMismatch { expected_len, got_len } => { write!(f, "snappy: corrupt input (header mismatch; expected \ {} decompressed bytes but got {})", expected_len, got_len) } Error::Literal { len, src_len, dst_len } => { write!(f, "snappy: corrupt input (expected literal read of \ length {}; remaining src: {}; remaining dst: {})", len, src_len, dst_len) } Error::CopyRead { len, src_len } => { write!(f, "snappy: corrupt input (expected copy read of \ length {}; remaining src: {})", len, src_len) } Error::CopyWrite { len, dst_len } => { write!(f, "snappy: corrupt input (expected copy write of \ length {}; remaining dst: {})", len, dst_len) } Error::Offset { offset, dst_pos } => { write!(f, "snappy: corrupt input (expected valid offset but \ got offset {}; dst position: {})", offset, dst_pos) } Error::StreamHeader { byte } => { write!(f, "snappy: corrupt input (expected stream header but \ got unexpected chunk type byte {})", byte) } Error::StreamHeaderMismatch { ref bytes } => { write!(f, "snappy: corrupt input (expected sNaPpY stream \ header but got {})", escape(&**bytes)) } Error::UnsupportedChunkType { byte } => { write!(f, "snappy: corrupt input (unsupported chunk type: {})", byte) } Error::UnsupportedChunkLength { len, header: false } => { write!(f, "snappy: corrupt input \ (unsupported chunk length: {})", len) } Error::UnsupportedChunkLength { len, header: true } => { write!(f, "snappy: corrupt input \ (invalid stream header length: {})", len) } Error::Checksum { expected, got } => { write!(f, "snappy: corrupt input (bad checksum; \ expected: {}, got: {})", expected, got) } } } } fn escape(bytes: &[u8]) -> String { use std::ascii::escape_default; bytes.iter().flat_map(|&b| escape_default(b)).map(|b| b as char).collect() }
39.174863
79
0.547566
90dd7ec90e6051ec18d3b12816de33a808396f88
4,030
use macaw::{Mat2, UVec4, Vec2, Vec4}; #[repr(C)] #[derive(Copy, Clone)] #[cfg_attr(not(target_arch = "spirv"), derive(Debug))] pub struct MeshDescriptor { pub vertex_core_offset: u32, // position, normal packed in one pub vertex_uv_offset: u32, pub vertex_mat_offset: u32, pub vertex_aux_offset: u32, pub vertex_tangent_offset: u32, pub mat_data_offset: u32, pub index_offset: u32, } #[repr(C, align(16))] #[derive(Copy, Clone)] pub struct InstanceDynamicConstants { pub emissive_multiplier: f32, } #[derive(Clone, Copy)] #[repr(C)] #[cfg_attr(not(target_arch = "spirv"), derive(Debug))] pub struct TextureMaps(UVec4); impl TextureMaps { #[inline(always)] pub fn normal(&self) -> usize { self.0.x as usize } #[inline(always)] pub fn metallic_roughness(&self) -> usize { self.0.y as usize } #[inline(always)] pub fn albedo(&self) -> usize { self.0.z as usize } #[inline(always)] pub fn emissive(&self) -> usize { self.0.w as usize } } #[derive(Clone, Copy, Default)] pub struct TextureMapsBuilder(UVec4); impl TextureMapsBuilder { pub fn new() -> Self { Default::default() } pub fn with_normal(mut self, normal: u32) -> Self { self.0.x = normal; self } pub fn with_metallic_roughness(mut self, metallic_roughness: u32) -> Self { self.0.y = metallic_roughness; self } pub fn with_albedo(mut self, albedo: u32) -> Self { self.0.z = albedo; self } pub fn with_emissive(mut self, emissive: u32) -> Self { self.0.w = emissive; self } pub fn build(self) -> TextureMaps { TextureMaps(self.0) } } #[derive(Clone, Copy)] #[repr(C)] #[cfg_attr(not(target_arch = "spirv"), derive(Debug))] pub struct MaterialDescriptor { pub base_color_mult: Vec4, pub maps: TextureMaps, pub roughness_mult: f32, pub metalness_factor: f32, pub emissive: Vec4, pub flags: u32, pub map_transforms: [[f32; 6]; 4], } impl MaterialDescriptor { pub fn load(data: &[u32], byte_offset: u32) -> Self { let offset = (byte_offset >> 2) as usize; let base_color_mult = load_vec4(data, offset); let maps = TextureMaps(UVec4::new( data[offset + 4], data[offset + 5], data[offset + 6], data[offset + 7], )); let roughness_mult = f32::from_bits(data[offset + 8]); let metalness_factor = f32::from_bits(data[offset + 9]); let emissive = load_vec4(data, offset + 10); let flags = data[offset + 15]; let map_transforms = load_map_transforms(data, offset + 16); Self { base_color_mult, maps, roughness_mult, metalness_factor, emissive, flags, map_transforms, } } pub fn transform_uv(&self, uv: Vec2, map_idx: usize) -> Vec2 { let mat = &self.map_transforms[map_idx]; let rot_scl: Mat2 = Mat2::from_cols(Vec2::new(mat[0], mat[2]), Vec2::new(mat[1], mat[3])); let offset: Vec2 = Vec2::new(mat[4], mat[5]); rot_scl * uv + offset } } fn load_vec4(data: &[u32], offset: usize) -> Vec4 { Vec4::new( f32::from_bits(data[offset]), f32::from_bits(data[offset + 1]), f32::from_bits(data[offset + 2]), f32::from_bits(data[offset + 3]), ) } fn load_f32_6(data: &[u32], offset: usize) -> [f32; 6] { [ f32::from_bits(data[offset]), f32::from_bits(data[offset + 1]), f32::from_bits(data[offset + 2]), f32::from_bits(data[offset + 3]), f32::from_bits(data[offset + 4]), f32::from_bits(data[offset + 5]), ] } fn load_map_transforms(data: &[u32], offset: usize) -> [[f32; 6]; 4] { [ load_f32_6(data, offset), load_f32_6(data, offset + 6), load_f32_6(data, offset + 12), load_f32_6(data, offset + 18), ] }
25.66879
98
0.57866
642c7cdc7e93d161ba98d009433bb6aa17ff172a
803
/* Copyright 2020 Takashi Ogura Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ //! Joint related structs #[allow(clippy::module_inception)] mod joint; mod joint_type; mod mimic; mod range; mod velocity; pub use joint::*; pub use joint_type::*; pub use mimic::*; pub use range::*; pub use velocity::*;
27.689655
74
0.744707
76def79f93414af7fa63d63b1f5d4cad3c76aaac
1,182
// Copyright © 2021 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause //! PVRDMA device for VMs //! //! # Additional Documentation //! - https://github.com/qemu/qemu/blob/master/docs/pvrdma.txt //! - https://blog.linuxplumbersconf.org/2017/ocw/system/presentations/4730/original/lpc-2017-pvrdma-marcel-apfelbaum-yuval-shaia.pdf //! mod defs; mod pci; use alloc::boxed::Box; use custom_error::custom_error; use pci::BarAccess; custom_error! {pub PVRDMAError DeviceNotSupported = "Unknown device/version", InterruptModeNotSupported = "Device requested an interrupt mode that is not supported by driver", OutOfMemory = "Unable to allocate raw memory.", } pub struct PVRDMA { pci: BarAccess, /// Is link active? link_active: bool, } impl PVRDMA { pub fn new(nrx: usize, trx: usize) -> Result<Box<PVRDMA>, PVRDMAError> { Err(PVRDMAError::OutOfMemory) } } /* static inline void pvrdma_write_reg(struct pvrdma_dev *dev, u32 reg, u32 val) { writel(cpu_to_le32(val), dev->regs + reg); } static inline u32 pvrdma_read_reg(struct pvrdma_dev *dev, u32 reg) { return le32_to_cpu(readl(dev->regs + reg)); } */
23.64
133
0.70643
08a602dc1077d8933ccd3c5a23b203f50a72a7c1
22,264
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ mod scope; use super::get_applied_fragment_name; use crate::{ match_::SplitOperationMetadata, no_inline::{is_raw_response_type_enabled, NO_INLINE_DIRECTIVE_NAME, PARENT_DOCUMENTS_ARG}, util::get_normalization_operation_name, }; use common::{Diagnostic, DiagnosticsResult, FeatureFlag, NamedItem, WithLocation}; use fnv::{FnvHashMap, FnvHashSet}; use graphql_ir::{ Condition, ConditionValue, ConstantValue, Directive, FragmentDefinition, FragmentSpread, InlineFragment, OperationDefinition, Program, Selection, Transformed, TransformedMulti, TransformedValue, Transformer, ValidationMessage, Value, Variable, VariableDefinition, }; use graphql_syntax::OperationKind; use indexmap::IndexMap; use intern::string_key::{Intern, StringKey}; use itertools::Itertools; use scope::{format_local_variable, Scope}; use std::sync::Arc; /// A transform that converts a set of documents containing fragments/fragment /// spreads *with* arguments to one where all arguments have been inlined. This /// is effectively static currying of functions. Nodes are changed as follows: /// - Fragment spreads with arguments are replaced with references to an inlined /// version of the referenced fragment. /// - Fragments with argument definitions are cloned once per unique set of /// arguments, with the name changed to original name + hash and all nested /// variable references changed to the value of that variable given its /// arguments. /// - Field & directive argument variables are replaced with the value of those /// variables in context. /// - Definitions of provided variables are added to the root operation. /// - All nodes are cloned with updated children. /// /// The transform also handles statically passing/failing Condition nodes: /// - Literal Conditions with a passing value are elided and their selections /// inlined in their parent. /// - Literal Conditions with a failing value are removed. /// - Nodes that would become empty as a result of the above are removed. /// /// Note that unreferenced fragments are not added to the output. pub fn apply_fragment_arguments( program: &Program, is_normalization: bool, no_inline_feature: &FeatureFlag, base_fragment_names: &FnvHashSet<StringKey>, ) -> DiagnosticsResult<Program> { let mut transform = ApplyFragmentArgumentsTransform { base_fragment_names, errors: Vec::new(), fragments: Default::default(), is_normalization, no_inline_feature, program, provided_variables: Default::default(), scope: Default::default(), split_operations: Default::default(), }; let mut next_program = transform .transform_program(program) .replace_or_else(|| program.clone()); for (fragment_name, used_fragment) in transform.fragments { match used_fragment { PendingFragment::Resolved(Some(fragment)) => next_program.insert_fragment(fragment), PendingFragment::Resolved(None) => { // The fragment ended up empty, do not add to result Program. } PendingFragment::Pending => panic!("Unexpected case, {}", fragment_name), } } for (_, operation) in transform.split_operations { next_program.insert_operation(Arc::new(operation)); } if transform.errors.is_empty() { Ok(next_program) } else { Err(transform.errors) } } #[derive(Debug)] enum PendingFragment { Pending, Resolved(Option<Arc<FragmentDefinition>>), } struct ApplyFragmentArgumentsTransform<'flags, 'program, 'base_fragments> { base_fragment_names: &'base_fragments FnvHashSet<StringKey>, errors: Vec<Diagnostic>, fragments: FnvHashMap<StringKey, PendingFragment>, is_normalization: bool, no_inline_feature: &'flags FeatureFlag, program: &'program Program, provided_variables: IndexMap<StringKey, VariableDefinition>, scope: Scope, split_operations: FnvHashMap<StringKey, OperationDefinition>, } impl Transformer for ApplyFragmentArgumentsTransform<'_, '_, '_> { const NAME: &'static str = "ApplyFragmentArgumentsTransform"; const VISIT_ARGUMENTS: bool = true; const VISIT_DIRECTIVES: bool = true; fn transform_operation( &mut self, operation: &OperationDefinition, ) -> Transformed<OperationDefinition> { self.scope = Scope::root_scope(); self.provided_variables = Default::default(); let transform_result = self.default_transform_operation(operation); if self.provided_variables.is_empty() { transform_result } else { match transform_result { Transformed::Keep => { let mut new_operation = operation.clone(); new_operation.variable_definitions.append( &mut self .provided_variables .drain(..) .map(|(_, definition)| definition) .collect_vec(), ); Transformed::Replace(new_operation) } Transformed::Replace(mut new_operation) => { new_operation.variable_definitions.append( &mut self .provided_variables .drain(..) .map(|(_, definition)| definition) .collect_vec(), ); Transformed::Replace(new_operation) } Transformed::Delete => Transformed::Delete, } } } fn transform_fragment( &mut self, fragment: &FragmentDefinition, ) -> Transformed<FragmentDefinition> { if self.is_normalization { let no_inline_directive = fragment.directives.named(*NO_INLINE_DIRECTIVE_NAME); if let Some(no_inline_directive) = no_inline_directive { self.transform_no_inline_fragment(fragment, no_inline_directive); } } // Non-inlined fragments are promoted to operations; other fragments are deleted // unless they are referenced Transformed::Delete } fn transform_fragment_spread(&mut self, spread: &FragmentSpread) -> Transformed<Selection> { let fragment = self .program .fragment(spread.fragment.item) .unwrap_or_else(|| { panic!( "Tried to spread missing fragment: `{}`.", spread.fragment.item ); }); if self.is_normalization { if let Some(directive) = fragment.directives.named(*NO_INLINE_DIRECTIVE_NAME) { let transformed_arguments = spread .arguments .iter() .map(|arg| { let mut arg = self.transform_argument(arg).unwrap_or_else(|| arg.clone()); arg.name.item = format_local_variable(fragment.name.item, arg.name.item); arg }) .collect(); let mut directives = Vec::with_capacity(spread.directives.len() + 1); directives.extend(spread.directives.iter().cloned()); directives.push(directive.clone()); let normalization_name = get_normalization_operation_name(fragment.name.item).intern(); let next_spread = Selection::FragmentSpread(Arc::new(FragmentSpread { arguments: transformed_arguments, directives, fragment: WithLocation::new(fragment.name.location, normalization_name), })); // If the fragment type is abstract, we need to ensure that it's only evaluated at runtime if the // type of the object matches the fragment's type condition. Rather than reimplement type refinement // for fragment spreads, we wrap the fragment spread in an inlinefragment (which may be inlined away) // that ensures it will go through type-refinement at runtime. return if fragment.type_condition.is_abstract_type() { Transformed::Replace(Selection::InlineFragment(Arc::new(InlineFragment { directives: Default::default(), selections: vec![next_spread], type_condition: Some(fragment.type_condition), }))) } else { Transformed::Replace(next_spread) }; } } if let Some(applied_fragment) = self.apply_fragment(spread, fragment) { let directives = self .transform_directives(&spread.directives) .replace_or_else(|| spread.directives.clone()); Transformed::Replace(Selection::FragmentSpread(Arc::new(FragmentSpread { fragment: applied_fragment.name, arguments: Vec::new(), directives, }))) } else { Transformed::Delete } } fn transform_selections( &mut self, selections: &[Selection], ) -> TransformedValue<Vec<Selection>> { self.transform_list_multi(selections, Self::transform_selection_multi) } fn transform_value(&mut self, value: &Value) -> TransformedValue<Value> { match value { Value::Variable(prev_variable) => { if let Some(scope_value) = self.scope.get(prev_variable.name.item) { match scope_value { Value::Variable(replacement_variable) => { TransformedValue::Replace(Value::Variable(Variable { // Update the name/location to the applied variable name name: replacement_variable.name, // But keep the type of the previous variable, which reflects the type // expected at this location type_: prev_variable.type_.clone(), })) } _ => TransformedValue::Replace(scope_value.clone()), } } else { // Assume a global variable if the variable has no local // bindings. TransformedValue::Keep } } Value::Constant(_) => TransformedValue::Keep, Value::List(items) => self .transform_list(items, Self::transform_value) .map(Value::List), Value::Object(arguments) => self.transform_arguments(arguments).map(Value::Object), } } fn transform_condition_value( &mut self, condition_value: &ConditionValue, ) -> TransformedValue<ConditionValue> { match condition_value { ConditionValue::Variable(prev_variable) => { match self.scope.get(prev_variable.name.item) { Some(Value::Variable(replacement_variable)) => { TransformedValue::Replace(ConditionValue::Variable(Variable { // Update the name/location to the applied variable name name: replacement_variable.name, // But keep the type of the previous variable, which reflects the type // expected at this location type_: prev_variable.type_.clone(), })) } Some(Value::Constant(ConstantValue::Boolean(constant_value))) => { TransformedValue::Replace(ConditionValue::Constant(*constant_value)) } None => { // Assume a global variable if the variable has no local // bindings. TransformedValue::Keep } Some(other_binding) => { panic!("Invalid variable value for condition: {:?}", other_binding); } } } ConditionValue::Constant(_) => TransformedValue::Keep, } } } impl ApplyFragmentArgumentsTransform<'_, '_, '_> { fn transform_no_inline_fragment( &mut self, fragment: &FragmentDefinition, directive: &Directive, ) { // We do not need to to write normalization files for base fragments if self.base_fragment_names.contains(&fragment.name.item) { return; } if !self.no_inline_feature.is_enabled_for(fragment.name.item) { self.errors.push(Diagnostic::error( format!( "Invalid usage of @no_inline on fragment '{}': this feature is gated and currently set to: {}", fragment.name.item, self.no_inline_feature ), directive.name.location, )); } self.scope = no_inline_fragment_scope(fragment); let fragment = self .default_transform_fragment(fragment) .unwrap_or_else(|| fragment.clone()); let FragmentDefinition { name, mut directives, mut variable_definitions, selections, type_condition, .. } = fragment; for variable in &mut variable_definitions { variable.name.item = format_local_variable(fragment.name.item, variable.name.item); } let mut metadata = SplitOperationMetadata { derived_from: fragment.name.item, parent_documents: Default::default(), raw_response_type: is_raw_response_type_enabled(directive), }; // - A fragment with user defined @no_inline always produces a $normalization file. The `parent_document` of // that file is the fragment itself as it gets deleted iff that fragment is deleted or no longer // has the @no_inline directive. // - A fragment with @no_inline generated by @module, `parent_documents` also include fragments that // spread the current fragment with @module metadata.parent_documents.insert(fragment.name.item); let parent_documents_arg = directive.arguments.named(*PARENT_DOCUMENTS_ARG); if let Some(Value::Constant(ConstantValue::List(parent_documents))) = parent_documents_arg.map(|arg| &arg.value.item) { for val in parent_documents { if let ConstantValue::String(name) = val { metadata.parent_documents.insert(*name); } else { panic!("Expected item in the parent_documents to be a StringKey.") } } } directives.push(metadata.to_directive()); let normalization_name = get_normalization_operation_name(name.item).intern(); let operation = OperationDefinition { name: WithLocation::new(name.location, normalization_name), type_: type_condition, variable_definitions, directives, selections, kind: OperationKind::Query, }; if self.program.operation(normalization_name).is_some() { self.errors.push(Diagnostic::error( format!( "Invalid usage of @no_inline on fragment '{}' - @no_inline is only allowed on allowlisted fragments loaded with @module", fragment.name.item, ), directive.name.location, )); } self.split_operations.insert(fragment.name.item, operation); } fn extract_provided_variables(&mut self, fragment: &FragmentDefinition) { let provided_arguments = fragment .used_global_variables .iter() .filter(|variable_definition| { variable_definition .directives .named(*graphql_ir::PROVIDER_MODULE) .is_some() }); for definition in provided_arguments { self.provided_variables .insert(fragment.name.item, definition.clone()); } } fn apply_fragment( &mut self, spread: &FragmentSpread, fragment: &FragmentDefinition, ) -> Option<Arc<FragmentDefinition>> { let transformed_arguments = self .transform_arguments(&spread.arguments) .replace_or_else(|| spread.arguments.clone()); let applied_fragment_name = get_applied_fragment_name(spread.fragment.item, &transformed_arguments); if let Some(applied_fragment) = self.fragments.get(&applied_fragment_name) { return match applied_fragment { PendingFragment::Resolved(resolved) => resolved.clone(), PendingFragment::Pending => { let mut error = Diagnostic::error( ValidationMessage::CircularFragmentReference { fragment_name: spread.fragment.item, }, spread.fragment.location, ); for location in self.scope.locations() { error = error.annotate("other member of the cycle", location); } self.errors.push(error); None } }; } self.fragments .insert(applied_fragment_name, PendingFragment::Pending); self.scope .push(spread.fragment.location, &transformed_arguments, fragment); self.extract_provided_variables(fragment); let selections = self .transform_selections(&fragment.selections) .replace_or_else(|| fragment.selections.clone()); let transformed_fragment = if selections.is_empty() { None } else { Some(Arc::new(FragmentDefinition { name: WithLocation::new(fragment.name.location, applied_fragment_name), variable_definitions: Vec::new(), type_condition: fragment.type_condition, // TODO update globals used_global_variables: Vec::new(), directives: fragment.directives.clone(), selections, })) }; self.fragments.insert( applied_fragment_name, PendingFragment::Resolved(transformed_fragment.clone()), ); self.scope.pop(); transformed_fragment } fn transform_selection_multi(&mut self, selection: &Selection) -> TransformedMulti<Selection> { match selection { Selection::FragmentSpread(selection) => { self.transform_fragment_spread(selection).into() } Selection::InlineFragment(selection) => { self.transform_inline_fragment(selection).into() } Selection::LinkedField(selection) => self.transform_linked_field(selection).into(), Selection::ScalarField(selection) => self.transform_scalar_field(selection).into(), Selection::Condition(selection) => self.transform_condition_multi(selection), } } fn transform_condition_multi(&mut self, condition: &Condition) -> TransformedMulti<Selection> { let condition_value = self.transform_condition_value(&condition.value); // If we replace with a constant condition, remove the condition node. if let TransformedValue::Replace(ConditionValue::Constant(const_condition_value)) = condition_value { return if const_condition_value == condition.passing_value { let selections = self .transform_selections(&condition.selections) .replace_or_else(|| condition.selections.clone()); TransformedMulti::ReplaceMultiple(selections) } else { TransformedMulti::Delete }; } // If selections are empty, delete let selections = self.transform_selections(&condition.selections); if let TransformedValue::Replace(selections) = &selections { if selections.is_empty() { return TransformedMulti::Delete; } } if selections.should_keep() && condition_value.should_keep() { TransformedMulti::Keep } else { TransformedMulti::Replace(Selection::Condition(Arc::new(Condition { value: condition_value.replace_or_else(|| condition.value.clone()), selections: selections.replace_or_else(|| condition.selections.clone()), ..condition.clone() }))) } } } fn no_inline_fragment_scope(fragment: &FragmentDefinition) -> Scope { let mut bindings = FnvHashMap::with_capacity_and_hasher( fragment.variable_definitions.len(), Default::default(), ); for variable_definition in &fragment.variable_definitions { let variable_name = variable_definition.name.item; let scoped_variable_name = format_local_variable(fragment.name.item, variable_name); bindings.insert( variable_name, Value::Variable(Variable { name: WithLocation::new(variable_definition.name.location, scoped_variable_name), type_: variable_definition.type_.clone(), }), ); } let mut scope = Scope::root_scope(); scope.push_bindings(fragment.name.location, bindings); scope }
41.614953
141
0.585879
ed26b7e6e4bf6118eddf12a6932b84bae0bc204b
11,604
// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::thread; use std::time::Duration; use crate::common::{parse_file_to_u64, set_epp}; use crate::memory::{ calculate_available_memory_kb, calculate_reserved_free_kb, parse_margins, parse_meminfo, parse_psi_memory, total_mem_to_margins_bps, MemInfo, }; use crate::gpu_freq_scaling::amd_device::AmdDeviceConfig; use crate::gpu_freq_scaling::{evaluate_gpu_frequency, init_gpu_params, init_gpu_scaling_thread}; use tempfile::TempDir; #[test] fn test_parse_file_to_u64() { assert_eq!( parse_file_to_u64("123".to_string().as_bytes()).unwrap(), 123 ); assert_eq!( parse_file_to_u64("456\n789".to_string().as_bytes()).unwrap(), 456 ); assert!(parse_file_to_u64("".to_string().as_bytes()).is_err()); assert!(parse_file_to_u64("abc".to_string().as_bytes()).is_err()); } #[test] fn test_calculate_reserved_free_kb() { let mock_partial_zoneinfo = r#" Node 0, zone DMA pages free 3968 min 137 low 171 high 205 spanned 4095 present 3999 managed 3976 protection: (0, 1832, 3000, 3786) Node 0, zone DMA32 pages free 422432 min 16270 low 20337 high 24404 spanned 1044480 present 485541 managed 469149 protection: (0, 0, 1953, 1500) Node 0, zone Normal pages free 21708 min 17383 low 21728 high 26073 spanned 524288 present 524288 managed 501235 protection: (0, 0, 0, 0)"#; let page_size_kb = 4; let high_watermarks = 205 + 24404 + 26073; let lowmem_reserves = 3786 + 1953; let reserved = calculate_reserved_free_kb(mock_partial_zoneinfo.as_bytes()).unwrap(); assert_eq!(reserved, (high_watermarks + lowmem_reserves) * page_size_kb); } #[test] fn test_parse_psi_memory() { let mock_psi_memory = r#" some avg10=57.25 avg60=35.97 avg300=10.18 total=32748793 full avg10=29.29 avg60=19.01 avg300=5.44 total=17589167"#; let pressure = parse_psi_memory(mock_psi_memory.as_bytes()).unwrap(); assert!((pressure - 57.25).abs() < f64::EPSILON); } #[test] fn test_parse_meminfo() { let mock_meminfo = r#" MemTotal: 8025656 kB MemFree: 4586928 kB MemAvailable: 6704404 kB Buffers: 659640 kB Cached: 1949056 kB SwapCached: 0 kB Active: 1430416 kB Inactive: 1556968 kB Active(anon): 489640 kB Inactive(anon): 29188 kB Active(file): 940776 kB Inactive(file): 1527780 kB Unevictable: 151128 kB Mlocked: 41008 kB SwapTotal: 11756332 kB SwapFree: 11756332 kB Dirty: 5712 kB Writeback: 0 kB AnonPages: 529800 kB Mapped: 321468 kB Shmem: 140156 kB Slab: 169252 kB SReclaimable: 115540 kB SUnreclaim: 53712 kB KernelStack: 7072 kB PageTables: 13340 kB NFS_Unstable: 0 kB Bounce: 0 kB WritebackTmp: 0 kB CommitLimit: 15769160 kB Committed_AS: 2483600 kB VmallocTotal: 34359738367 kB VmallocUsed: 0 kB VmallocChunk: 0 kB Percpu: 2464 kB AnonHugePages: 40960 kB ShmemHugePages: 0 kB ShmemPmdMapped: 0 kB DirectMap4k: 170216 kB DirectMap2M: 5992448 kB DirectMap1G: 3145728 kB"#; let meminfo = parse_meminfo(mock_meminfo.as_bytes()).unwrap(); assert_eq!(meminfo.free, 4586928); assert_eq!(meminfo.active_anon, 489640); assert_eq!(meminfo.inactive_anon, 29188); assert_eq!(meminfo.active_file, 940776); assert_eq!(meminfo.inactive_file, 1527780); assert_eq!(meminfo.dirty, 5712); assert_eq!(meminfo.swap_free, 11756332); } #[test] fn test_calculate_available_memory_kb() { let mut info = MemInfo::default(); let min_filelist = 400 * 1024; let reserved_free = 0; let ram_swap_weight = 4; // Available determined by file cache. info.active_file = 500 * 1024; info.inactive_file = 500 * 1024; info.dirty = 10 * 1024; let file = info.active_file + info.inactive_file; let available = calculate_available_memory_kb(&info, reserved_free, min_filelist, ram_swap_weight); assert_eq!(available, file - min_filelist - info.dirty); // Available determined by swap free. info.swap_free = 1200 * 1024; info.active_anon = 1000 * 1024; info.inactive_anon = 1000 * 1024; info.active_file = 0; info.inactive_file = 0; info.dirty = 0; let available = calculate_available_memory_kb(&info, reserved_free, min_filelist, ram_swap_weight); assert_eq!(available, info.swap_free / ram_swap_weight); // Available determined by anonymous. info.swap_free = 6000 * 1024; info.active_anon = 500 * 1024; info.inactive_anon = 500 * 1024; let anon = info.active_anon + info.inactive_anon; let available = calculate_available_memory_kb(&info, reserved_free, min_filelist, ram_swap_weight); assert_eq!(available, anon / ram_swap_weight); // When ram_swap_weight is 0, swap is ignored in available. info.swap_free = 1200 * 1024; info.active_anon = 1000 * 1024; info.inactive_anon = 1000 * 1024; info.active_file = 500 * 1024; info.inactive_file = 500 * 1024; let file = info.active_file + info.inactive_file; let ram_swap_weight = 0; let available = calculate_available_memory_kb(&info, reserved_free, min_filelist, ram_swap_weight); assert_eq!(available, file - min_filelist); } #[test] fn test_parse_margins() { assert!(parse_margins("".to_string().as_bytes()).is_err()); assert!(parse_margins("123 4a6".to_string().as_bytes()).is_err()); assert!(parse_margins("123.2 412.3".to_string().as_bytes()).is_err()); assert!(parse_margins("123".to_string().as_bytes()).is_err()); let margins = parse_margins("123 456".to_string().as_bytes()).unwrap(); assert_eq!(margins.len(), 2); assert_eq!(margins[0], 123); assert_eq!(margins[1], 456); } #[test] fn test_bps_to_margins_bps() { let (critical, moderate) = total_mem_to_margins_bps( 100000, /* 100mb */ 1200, /* 12% */ 3600, /* 36% */ ); assert_eq!(critical, 12000 /* 12mb */); assert_eq!(moderate, 36000 /* 36mb */); let (critical, moderate) = total_mem_to_margins_bps( 1000000, /* 1000mb */ 1250, /* 12.50% */ 7340, /* 73.4% */ ); assert_eq!(critical, 125000 /* 125mb */); assert_eq!(moderate, 734000 /* 734mb */); } #[test] fn test_set_epp() { let dir = TempDir::new().unwrap(); // Create the fake sysfs paths in temp directory let mut tpb0 = dir.path().to_owned(); tpb0.push("sys/devices/system/cpu/cpufreq/policy0/"); // let dirpath_str0 = tpb0.clone().into_os_string().into_string().unwrap(); std::fs::create_dir_all(&tpb0).unwrap(); let mut tpb1 = dir.path().to_owned(); tpb1.push("sys/devices/system/cpu/cpufreq/policy1/"); std::fs::create_dir_all(&tpb1).unwrap(); tpb0.push("energy_performance_preference"); tpb1.push("energy_performance_preference"); // Create energy_performance_preference files. std::fs::write(&tpb0, "balance_performance").unwrap(); std::fs::write(&tpb1, "balance_performance").unwrap(); // Set the EPP set_epp(dir.path().to_str().unwrap(), "179").unwrap(); // Verify that files were written assert_eq!(std::fs::read_to_string(&tpb0).unwrap(), "179".to_string()); assert_eq!(std::fs::read_to_string(&tpb1).unwrap(), "179".to_string()); } #[test] fn test_amd_device_true() { let mock_cpuinfo = r#" processor : 0 vendor_id : AuthenticAMD cpu family : 23 model : 24"#; assert!(AmdDeviceConfig::has_amd_tag_in_cpu_info( mock_cpuinfo.as_bytes() )); } #[test] fn test_amd_device_false() { // Incorrect vendor ID let mock_cpuinfo = r#" processor : 0 vendor_id : GenuineIntel cpu family : 23 model : 24"#; assert!(!AmdDeviceConfig::has_amd_tag_in_cpu_info( mock_cpuinfo.as_bytes() )); // missing vendor ID assert!(!AmdDeviceConfig::has_amd_tag_in_cpu_info( "".to_string().as_bytes() )); assert!(!AmdDeviceConfig::has_amd_tag_in_cpu_info( "processor: 0".to_string().as_bytes() )); } #[test] fn test_amd_parse_sclk_valid() { let dev: AmdDeviceConfig = AmdDeviceConfig::new("mock_file", "mock_sclk"); // trailing space is intentional, reflects sysfs output. let mock_sclk = r#" 0: 200Mhz 1: 700Mhz * 2: 1400Mhz "#; let (sclk, sel) = dev.parse_sclk(mock_sclk.as_bytes()).unwrap(); assert_eq!(1, sel); assert_eq!(3, sclk.len()); assert_eq!(200, sclk[0]); assert_eq!(700, sclk[1]); assert_eq!(1400, sclk[2]); } #[test] fn test_amd_parse_sclk_invalid() { let dev: AmdDeviceConfig = AmdDeviceConfig::new("mock_file", "mock_sclk"); // trailing space is intentional, reflects sysfs output. let mock_sclk = r#" 0: nonint 1: 700Mhz * 2: 1400Mhz "#; assert!(dev.parse_sclk(mock_sclk.as_bytes()).is_err()); assert!(dev.parse_sclk("nonint".to_string().as_bytes()).is_err()); assert!(dev.parse_sclk("0: 1400 ".to_string().as_bytes()).is_err()); assert!(dev.parse_sclk("0: 1400 *".to_string().as_bytes()).is_err()); assert!(dev .parse_sclk("x: nonint *".to_string().as_bytes()) .is_err()); } #[test] fn test_amd_device_filter_pass() { let dev: AmdDeviceConfig = AmdDeviceConfig::new("mock_file", "mock_sclk"); let mock_cpuinfo = r#" processor : 0 vendor_id : AuthenticAMD cpu family : 23 model : 24 model name : AMD Ryzen 7 3700C with Radeon Vega Mobile Gfx stepping : 1 microcode : 0x8108109"#; assert!(dev .is_supported_dev_family(mock_cpuinfo.as_bytes()) .unwrap()); assert!(dev .is_supported_dev_family("model name : AMD Ryzen 5 3700C".as_bytes()) .unwrap()); } #[test] fn test_amd_device_filter_fail() { let dev: AmdDeviceConfig = AmdDeviceConfig::new("mock_file", "mock_sclk"); let mock_cpuinfo = r#" processor : 0 vendor_id : AuthenticAMD cpu family : 23 model : 24 model name : AMD Ryzen 3 3700C with Radeon Vega Mobile Gfx stepping : 1 microcode : 0x8108109"#; assert!(!dev .is_supported_dev_family(mock_cpuinfo.as_bytes()) .unwrap()); assert!(!dev .is_supported_dev_family("model name : AMD Ryzen 5 2700C".as_bytes()) .unwrap()); assert!(!dev .is_supported_dev_family("model name : AMD Ryzen 3 3700C".as_bytes()) .unwrap()); assert!(!dev .is_supported_dev_family("model name : malformed".as_bytes()) .unwrap()); assert!(!dev.is_supported_dev_family("".as_bytes()).unwrap()); } #[test] #[allow(unused_must_use)] fn test_gpu_thread_on_off() { println!("test gpu thread"); let config = init_gpu_params().unwrap(); // TODO: break this function to make is unit testable evaluate_gpu_frequency(&config, 150); let game_mode_on = Arc::new(AtomicBool::new(true)); let game_mode_on_clone = Arc::clone(&game_mode_on); init_gpu_scaling_thread(game_mode_on_clone, 1000); thread::sleep(Duration::from_millis(500)); game_mode_on.store(false, Ordering::Relaxed); thread::sleep(Duration::from_millis(500)); println!("gpu thread exit gracefully"); }
30.536842
96
0.654516
5d96d57f635573650465095a4e66572969919371
26,007
use std::borrow::Cow; use std::collections::HashMap; use std::iter::FromIterator; use std::ops::Range; use failure::Error; use crate::psd_channel::PsdChannelCompression; use crate::psd_channel::PsdChannelKind; use crate::sections::image_data_section::ChannelBytes; use crate::sections::layer_and_mask_information_section::container::NamedItems; use crate::sections::layer_and_mask_information_section::layer::{GroupDivider, LayerChannels, LayerRecord, PsdGroup, PsdLayer}; use crate::sections::PsdCursor; /// One of the possible additional layer block signatures const SIGNATURE_EIGHT_BIM: [u8; 4] = [56, 66, 73, 77]; /// One of the possible additional layer block signatures const SIGNATURE_EIGHT_B64: [u8; 4] = [56, 66, 54, 52]; /// Additional Layer Information constants. /// Key of `Unicode layer name (Photoshop 5.0)`, "luni" const KEY_UNICODE_LAYER_NAME: &[u8; 4] = b"luni"; /// Key of `Section divider setting (Photoshop 6.0)`, "lsct" const KEY_SECTION_DIVIDER_SETTING: &[u8; 4] = b"lsct"; pub mod layer; pub mod container; /// The LayerAndMaskInformationSection comes from the bytes in the fourth section of the PSD. /// /// When possible we'll make the data easier to work with by storing it structures such as HashMaps. /// /// # Note /// /// We do not currently store all of the information that is present in the layer and mask /// information section of the PSD. If something that you need is missing please open an issue. /// /// # [Adobe Docs](https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/) /// /// The fourth section of a Photoshop file contains information about layers and masks. This section of the document describes the formats of layer and mask records. /// /// The complete merged image data is not stored here. The complete merged/composite image resides in the last section of the file. See See Image Data Section. If maximize compatibility is unchecked then the merged/composite is not created and the layer data must be read to reproduce the final image. /// /// See Layer and mask information section shows the overall structure of this section. If there are no layers or masks, this section is just 4 bytes: the length field, which is set to zero. (**PSB** length is 8 bytes /// /// 'Layr', 'Lr16' and 'Lr32' start at See Layer info. NOTE: The length of the section may already be known.) /// /// When parsing this section pay close attention to the length of sections. /// /// | Length | Description | /// |----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| /// | 4 | Length of the layer and mask information section.<br> (**PSB** length is 8 bytes.) | /// | Variable | Layer info (see See Layer info for details). | /// | Variable | Global layer mask info (see See Global layer mask info for details). | /// | Variable | (Photoshop 4.0 and later) <br> Series of tagged blocks containing various types of data. See See Additional Layer Information for the list of the types of data that can be included here. | #[derive(Debug)] pub struct LayerAndMaskInformationSection { pub(crate) layers: NamedItems<PsdLayer>, pub(crate) groups: NamedItems<PsdGroup>, } /// Frame represents a group stack frame #[derive(Debug)] struct Frame { start_idx: usize, name: String, group_id: u32, parent_group_id: u32, } impl LayerAndMaskInformationSection { /// Create a LayerAndMaskInformationSection from the bytes in the corresponding section in a /// PSD file (including the length marker). pub fn from_bytes( bytes: &[u8], psd_width: u32, psd_height: u32, ) -> Result<LayerAndMaskInformationSection, Error> { let mut cursor = PsdCursor::new(bytes); // The first four bytes of the section is the length marker for the layer and mask // information section. // // We do not currently use it since the number of bytes passed into this function was // the exact number of bytes in the layer and information mask section of the PSD file, // so there's no way for us to accidentally read too many bytes. If we did the program // would panic. cursor.read_4()?; // Read the next four bytes to get the length of the layer info section. let _layer_info_section_len = cursor.read_u32()?; // Next 2 bytes is the layer count // // NOTE: Appears to be -1 when we create a new PSD and don't create any new layers but // instead only manipulate the default background layer. // // # [Adobe Docs](https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/) // // Layer count. If it is a negative number, its absolute value is the number of layers and // the first alpha channel contains the transparency data for the merged result. let layer_count = cursor.read_i16()?; // TODO: If the layer count was negative we were supposed to treat the first alpha // channel as transparency data for the merged result.. So add a new test with a transparent // PSD and make sure that we're handling this case properly. let layer_count: u16 = layer_count.abs() as u16; let ( group_count, layer_records ) = LayerAndMaskInformationSection::read_layer_records(&mut cursor, layer_count)?; LayerAndMaskInformationSection::decode_layers( layer_records, group_count, (psd_width, psd_height), ) } fn decode_layers( layer_records: Vec<(LayerRecord, LayerChannels)>, group_count: usize, psd_size: (u32, u32), ) -> Result<LayerAndMaskInformationSection, Error> { let mut layers = NamedItems::with_capacity(layer_records.len()); let mut groups: NamedItems<PsdGroup> = NamedItems::with_capacity(group_count); // Create stack with root-level let mut stack: Vec<Frame> = vec![Frame { start_idx: 0, name: String::from("root"), group_id: 0, parent_group_id: 0, }]; // Viewed group counter let mut already_viewed = 0; // Read each layer's channel image data for (layer_record, channels) in layer_records.into_iter() { // get current group from stack let current_group_id = stack.last().unwrap().group_id; match layer_record.divider_type { // open the folder Some(GroupDivider::CloseFolder) | Some(GroupDivider::OpenFolder) => { already_viewed = already_viewed + 1; let frame = Frame { start_idx: layers.len(), name: layer_record.name, group_id: already_viewed, parent_group_id: current_group_id, }; stack.push(frame); } // close the folder Some(GroupDivider::BoundingSection) => { let frame = stack.pop().unwrap(); let range = Range { start: frame.start_idx, end: layers.len(), }; groups.push(frame.name.clone(), PsdGroup::new( frame.name, frame.group_id, range, &layer_record, psd_size.0, psd_size.1, if frame.parent_group_id > 0 { Some(frame.parent_group_id) } else { None }, )); } _ => { let psd_layer = LayerAndMaskInformationSection::read_layer( &layer_record, current_group_id, psd_size, channels, )?; layers.push(psd_layer.name.clone(), psd_layer); } }; } Ok(LayerAndMaskInformationSection { layers, groups, }) } fn read_layer_records(cursor: &mut PsdCursor, layer_count: u16) -> Result<(usize, Vec<(LayerRecord, LayerChannels)>), Error> { let mut groups_count = 0; let mut layer_records = vec![]; // Read each layer record for _layer_num in 0..layer_count { let layer_record = read_layer_record(cursor)?; match layer_record.divider_type { Some(GroupDivider::BoundingSection) => { groups_count = groups_count + 1; } _ => {} } layer_records.push(layer_record); } let mut result = vec![]; for layer_record in layer_records { let channels = read_layer_channels( cursor, &layer_record.channel_data_lengths, layer_record.height() as usize, )?; result.push((layer_record, channels)); } // Photoshop stores layers in reverse order result.reverse(); Ok((groups_count, result)) } fn read_layer( layer_record: &LayerRecord, parent_id: u32, psd_size: (u32, u32), channels: LayerChannels, ) -> Result<PsdLayer, Error> { Ok(PsdLayer::new( &layer_record, psd_size.0, psd_size.1, if parent_id > 0 { Some(parent_id) } else { None }, channels, )) } } /// Reads layer channels fn read_layer_channels( cursor: &mut PsdCursor, channel_data_lengths: &Vec<(PsdChannelKind, u32)>, scanlines: usize, ) -> Result<LayerChannels, Error> { let capacity = channel_data_lengths.len(); let mut channels = HashMap::with_capacity(capacity); for (channel_kind, channel_length) in channel_data_lengths.iter() { let compression = cursor.read_u16()?; let compression = PsdChannelCompression::new(compression)?; let channel_data = cursor.read(*channel_length)?; let channel_bytes = match compression { PsdChannelCompression::RawData => ChannelBytes::RawData(channel_data.into()), PsdChannelCompression::RleCompressed => { // We're skipping over the bytes that describe the length of each scanline since // we don't currently use them. We might re-think this in the future when we // implement serialization of a Psd back into bytes.. But not a concern at the // moment. // Compressed bytes per scanline are encoded at the beginning as 2 bytes // per scanline let channel_data = &channel_data[2 * scanlines..]; ChannelBytes::RleCompressed(channel_data.into()) } _ => unimplemented!("Zip compression currently unsupported"), }; channels.insert(*channel_kind, channel_bytes); } Ok(channels) } /// Read bytes, starting from the cursor, until we've processed all of the data for a layer in /// the layer records section. /// /// At the moment we skip over some of the data. /// /// Please open an issue if there is data in here that you need that we don't currently parse. /// /// # [Adobe Docs](https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/) /// /// Information about each layer. /// /// | Length | Description | /// |------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| /// | 4 * 4 | Rectangle containing the contents of the layer. Specified as top, left, bottom, right coordinates | /// | 2 | Number of channels in the layer | /// | 6 * number of channels | Channel information. Six bytes per channel,<br> consisting of: 2 bytes for Channel ID: 0 = red, 1 = green, etc.; <br> -1 = transparency mask; -2 = user supplied layer mask, -3 real user supplied layer mask (when both a user mask and a vector mask are present) <br> 4 bytes for length of corresponding channel data. (**PSB** 8 bytes for length of corresponding channel data.) See See Channel image data for structure of channel data. | /// | 4 | Blend mode signature: '8BIM' | /// | 4 | Blend mode key: <br> 'pass' = pass through, 'norm' = normal, 'diss' = dissolve, 'dark' = darken, 'mul ' = multiply, 'idiv' = color burn, 'lbrn' = linear burn, 'dkCl' = darker color, 'lite' = lighten, 'scrn' = screen, 'div ' = color dodge, 'lddg' = linear dodge, 'lgCl' = lighter color, 'over' = overlay, 'sLit' = soft light, 'hLit' = hard light, 'vLit' = vivid light, 'lLit' = linear light, 'pLit' = pin light, 'hMix' = hard mix, 'diff' = difference, 'smud' = exclusion, 'fsub' = subtract, 'fdiv' = divide 'hue ' = hue, 'sat ' = saturation, 'colr' = color, 'lum ' = luminosity, | /// | 1 | Opacity. 0 = transparent ... 255 = opaque | /// | 1 | Clipping: 0 = base, 1 = non-base | /// | 1 | Flags: <br> bit 0 = transparency protected; <br> bit 1 = visible; <br> bit 2 = obsolete; <br> bit 3 = 1 for Photoshop 5.0 and later, tells if bit 4 has useful information; <br> bit 4 = pixel data irrelevant to appearance of document | /// | 1 | Filler (zero) | /// | 4 | Length of the extra data field ( = the total length of the next five fields). | /// | Variable | Layer mask data: See See Layer mask / adjustment layer data for structure. Can be 40 bytes, 24 bytes, or 4 bytes if no layer mask. | /// | Variable | Layer blending ranges: See See Layer blending ranges data. | /// | Variable | Layer name: Pascal string, padded to a multiple of 4 bytes. | fn read_layer_record(cursor: &mut PsdCursor) -> Result<LayerRecord, Error> { let mut channel_data_lengths = vec![]; // FIXME: // Ran into a bug where a PSD file had a top and left of over 4billion. // The PSD file was 128x128 yet the single layer in the file was 1024x1024. // Manually changing the layer's dimensions fixed the problem.. but this is something // that we should look into handling automatically since the file opened just fine in // Photoshop. // Read the rectangle that encloses the layer mask. let top = cursor.read_i32()?; let left = cursor.read_i32()?; // Subtract one in order to zero index. If a layer is fully transparent it's bottom will // already be 0 so we don't subtract let bottom = cursor.read_i32()?; let bottom = if bottom == 0 { 0 } else { bottom - 1 }; // Subtract one in order to zero index. If a layer is fully transparent it's right will // already be zero so we don't subtract. let right = cursor.read_i32()?; let right = if right == 0 { 0 } else { right - 1 }; // Get the number of channels in the layer let channel_count = cursor.read_u16()?; // Read the channel information for _ in 0..channel_count { let channel_id = cursor.read_i16()?; let channel_id = PsdChannelKind::new(channel_id)?; let channel_length = cursor.read_u32()?; // The first two bytes encode the compression, the rest of the bytes // are the channel data. let channel_data_length = channel_length - 2; channel_data_lengths.push((channel_id, channel_data_length)); } // We do not currently parse the blend mode signature, skip it cursor.read_4()?; // We do not currently parse the blend mode key, skip it cursor.read_4()?; // We do not currently parse the opacity, skip it cursor.read_1()?; // We do not currently parse the clipping, skip it cursor.read_1()?; // We do not currently parse the flags, skip it cursor.read_1()?; // We do not currently parse the filter, skip it cursor.read_1()?; // We do not currently use the length of the extra data field, skip it cursor.read_4()?; // We do not currently use the layer mask data, skip it let layer_mask_data_len = cursor.read_u32()?; cursor.read(layer_mask_data_len)?; // We do not currently use the layer blending range, skip it let layer_blending_range_data_len = cursor.read_u32()?; cursor.read(layer_blending_range_data_len)?; // Read the layer name let name_len = cursor.read_u8()?; let name = cursor.read(name_len as u32)?; let name = String::from_utf8_lossy(name); let mut name = name.to_string(); // Layer name is padded to the next multiple of 4 bytes. // So if the name length is 9, there will be three throwaway bytes // after it. Here we skip over those throwaday bytes. // // The 1 is the 1 byte that we read for the name length let bytes_mod_4 = (name_len + 1) % 4; let padding = (4 - bytes_mod_4) % 4; cursor.read(padding as u32)?; let mut divider_type = None; // There can be multiple additional layer information sections so we'll loop // until we stop seeing them. while cursor.peek_4()? == SIGNATURE_EIGHT_BIM || cursor.peek_4()? == SIGNATURE_EIGHT_B64 { let _signature = cursor.read_4()?; let mut key = [0; 4]; key.copy_from_slice(cursor.read_4()?); let additional_layer_info_len = cursor.read_u32()?; match &key { KEY_UNICODE_LAYER_NAME => { name = cursor.read_unicode_string()?; } KEY_SECTION_DIVIDER_SETTING => { divider_type = GroupDivider::match_divider(cursor.read_i32()?); // data present only if length >= 12 if additional_layer_info_len >= 12 { let _signature = cursor.read_4()?; let _key = cursor.read_4()?; } // data present only if length >= 16 if additional_layer_info_len >= 16 { cursor.read_4()?; } } // TODO: Skipping other keys until we implement parsing for them _ => { cursor.read(additional_layer_info_len)?; } } } Ok(LayerRecord { name, channel_data_lengths, top, left, bottom, right, divider_type, }) }
57.793333
610
0.428077
de3d72033a039361ede631525f19023018b0aee4
10,432
// DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! //! This documentation was generated from *Cloud Build* crate version *1.0.14+20200704*, where *20200704* is the exact revision of the *cloudbuild:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.14*. //! //! Everything else about the *Cloud Build* *v1* API can be found at the //! [official documentation site](https://cloud.google.com/cloud-build/docs/). //! The original source code is [on github](https://github.com/Byron/google-apis-rs/tree/master/gen/cloudbuild1). //! # Features //! //! Handle the following *Resources* with ease from the central [hub](CloudBuild) ... //! //! * [operations](api::Operation) //! * [*cancel*](api::OperationCancelCall) and [*get*](api::OperationGetCall) //! * projects //! * [*builds cancel*](api::ProjectBuildCancelCall), [*builds create*](api::ProjectBuildCreateCall), [*builds get*](api::ProjectBuildGetCall), [*builds list*](api::ProjectBuildListCall), [*builds retry*](api::ProjectBuildRetryCall), [*locations operations cancel*](api::ProjectLocationOperationCancelCall), [*locations operations get*](api::ProjectLocationOperationGetCall), [*triggers create*](api::ProjectTriggerCreateCall), [*triggers delete*](api::ProjectTriggerDeleteCall), [*triggers get*](api::ProjectTriggerGetCall), [*triggers list*](api::ProjectTriggerListCall), [*triggers patch*](api::ProjectTriggerPatchCall) and [*triggers run*](api::ProjectTriggerRunCall) //! //! //! //! //! Not what you are looking for ? Find all other Google APIs in their Rust [documentation index](http://byron.github.io/google-apis-rs). //! //! # Structure of this Library //! //! The API is structured into the following primary items: //! //! * **[Hub](CloudBuild)** //! * a central object to maintain state and allow accessing all *Activities* //! * creates [*Method Builders*](client::MethodsBuilder) which in turn //! allow access to individual [*Call Builders*](client::CallBuilder) //! * **[Resources](client::Resource)** //! * primary types that you can apply *Activities* to //! * a collection of properties and *Parts* //! * **[Parts](client::Part)** //! * a collection of properties //! * never directly used in *Activities* //! * **[Activities](client::CallBuilder)** //! * operations to apply to *Resources* //! //! All *structures* are marked with applicable traits to further categorize them and ease browsing. //! //! Generally speaking, you can invoke *Activities* like this: //! //! ```Rust,ignore //! let r = hub.resource().activity(...).doit() //! ``` //! //! Or specifically ... //! //! ```ignore //! let r = hub.operations().cancel(...).doit() //! let r = hub.operations().get(...).doit() //! let r = hub.projects().builds_create(...).doit() //! let r = hub.projects().builds_retry(...).doit() //! let r = hub.projects().locations_operations_get(...).doit() //! let r = hub.projects().triggers_run(...).doit() //! ``` //! //! The `resource()` and `activity(...)` calls create [builders][builder-pattern]. The second one dealing with `Activities` //! supports various methods to configure the impending operation (not shown here). It is made such that all required arguments have to be //! specified right away (i.e. `(...)`), whereas all optional ones can be [build up][builder-pattern] as desired. //! The `doit()` method performs the actual communication with the server and returns the respective result. //! //! # Usage //! //! ## Setting up your Project //! //! To use this library, you would put the following lines into your `Cargo.toml` file: //! //! ```toml //! [dependencies] //! google-cloudbuild1 = "*" //! # This project intentionally uses an old version of Hyper. See //! # https://github.com/Byron/google-apis-rs/issues/173 for more //! # information. //! hyper = "^0.14" //! hyper-rustls = "^0.22" //! serde = "^1.0" //! serde_json = "^1.0" //! yup-oauth2 = "^5.0" //! ``` //! //! ## A complete example //! //! ```test_harness,no_run //! extern crate hyper; //! extern crate hyper_rustls; //! extern crate yup_oauth2 as oauth2; //! extern crate google_cloudbuild1 as cloudbuild1; //! use cloudbuild1::api::RetryBuildRequest; //! use cloudbuild1::{Result, Error}; //! # #[test] fn egal() { //! use std::default::Default; //! use oauth2; //! use cloudbuild1::CloudBuild; //! //! // Get an ApplicationSecret instance by some means. It contains the `client_id` and //! // `client_secret`, among other things. //! let secret: ApplicationSecret = Default::default(); //! // Instantiate the authenticator. It will choose a suitable authentication flow for you, //! // unless you replace `None` with the desired Flow. //! // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about //! // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and //! // retrieve them from storage. //! let auth = yup_oauth2::InstalledFlowAuthenticator::builder( //! secret, //! yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, //! ).build().await.unwrap(); //! let mut hub = CloudBuild::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); //! // As the method needs a request, you would usually fill it with the desired information //! // into the respective structure. Some of the parts shown here might not be applicable ! //! // Values shown here are possibly random and not representative ! //! let mut req = RetryBuildRequest::default(); //! //! // You can configure optional parameters by calling the respective setters at will, and //! // execute the final call using `doit()`. //! // Values shown here are possibly random and not representative ! //! let result = hub.projects().builds_retry(req, "projectId", "id") //! .doit(); //! //! match result { //! Err(e) => match e { //! // The Error enum provides details about what exactly happened. //! // You can also just use its `Debug`, `Display` or `Error` traits //! Error::HttpError(_) //! |Error::MissingAPIKey //! |Error::MissingToken(_) //! |Error::Cancelled //! |Error::UploadSizeLimitExceeded(_, _) //! |Error::Failure(_) //! |Error::BadRequest(_) //! |Error::FieldClash(_) //! |Error::JsonDecodeError(_, _) => println!("{}", e), //! }, //! Ok(res) => println!("Success: {:?}", res), //! } //! # } //! ``` //! ## Handling Errors //! //! All errors produced by the system are provided either as [Result](client::Result) enumeration as return value of //! the doit() methods, or handed as possibly intermediate results to either the //! [Hub Delegate](client::Delegate), or the [Authenticator Delegate](https://docs.rs/yup-oauth2/*/yup_oauth2/trait.AuthenticatorDelegate.html). //! //! When delegates handle errors or intermediate values, they may have a chance to instruct the system to retry. This //! makes the system potentially resilient to all kinds of errors. //! //! ## Uploads and Downloads //! If a method supports downloads, the response body, which is part of the [Result](client::Result), should be //! read by you to obtain the media. //! If such a method also supports a [Response Result](client::ResponseResult), it will return that by default. //! You can see it as meta-data for the actual media. To trigger a media download, you will have to set up the builder by making //! this call: `.param("alt", "media")`. //! //! Methods supporting uploads can do so using up to 2 different protocols: //! *simple* and *resumable*. The distinctiveness of each is represented by customized //! `doit(...)` methods, which are then named `upload(...)` and `upload_resumable(...)` respectively. //! //! ## Customization and Callbacks //! //! You may alter the way an `doit()` method is called by providing a [delegate](client::Delegate) to the //! [Method Builder](client::CallBuilder) before making the final `doit()` call. //! Respective methods will be called to provide progress information, as well as determine whether the system should //! retry on failure. //! //! The [delegate trait](client::Delegate) is default-implemented, allowing you to customize it with minimal effort. //! //! ## Optional Parts in Server-Requests //! //! All structures provided by this library are made to be [encodable](client::RequestValue) and //! [decodable](client::ResponseResult) via *json*. Optionals are used to indicate that partial requests are responses //! are valid. //! Most optionals are are considered [Parts](client::Part) which are identifiable by name, which will be sent to //! the server to indicate either the set parts of the request or the desired parts in the response. //! //! ## Builder Arguments //! //! Using [method builders](client::CallBuilder), you are able to prepare an action call by repeatedly calling it's methods. //! These will always take a single argument, for which the following statements are true. //! //! * [PODs][wiki-pod] are handed by copy //! * strings are passed as `&str` //! * [request values](client::RequestValue) are moved //! //! Arguments will always be copied or cloned into the builder, to make them independent of their original life times. //! //! [wiki-pod]: http://en.wikipedia.org/wiki/Plain_old_data_structure //! [builder-pattern]: http://en.wikipedia.org/wiki/Builder_pattern //! [google-go-api]: https://github.com/google/google-api-go-client //! //! // Unused attributes happen thanks to defined, but unused structures // We don't warn about this, as depending on the API, some data structures or facilities are never used. // Instead of pre-determining this, we just disable the lint. It's manually tuned to not have any // unused imports in fully featured APIs. Same with unused_mut ... . #![allow(unused_imports, unused_mut, dead_code)] // DO NOT EDIT ! // This file was generated automatically from 'src/mako/api/lib.rs.mako' // DO NOT EDIT ! #[macro_use] extern crate serde_derive; extern crate hyper; extern crate serde; extern crate serde_json; extern crate yup_oauth2 as oauth2; extern crate mime; extern crate url; pub mod api; pub mod client; // Re-export the hub type and some basic client structs pub use api::CloudBuild; pub use client::{Result, Error, Delegate};
47.418182
672
0.68587
1aa9b0951dd6d68473479c5a39ded0a05ae5414a
4,887
// DO NOT EDIT -- generated code use byteorder::{ ByteOrder, ReadBytesExt }; use serde::Serialize; #[allow(unused_imports)] use crate::bits::BitReader; #[allow(unused_imports)] use crate::fields::{ Field, FieldContent, FieldDefinition, }; #[derive(Debug, Default, Serialize)] pub struct BloodPressure { #[serde(skip_serializing_if = "Option::is_none")] diastolic_pressure: Option<crate::fields::Pressure>, #[serde(skip_serializing_if = "Option::is_none")] heart_rate: Option<crate::fields::Frequency>, #[serde(skip_serializing_if = "Option::is_none")] heart_rate_type: Option<crate::profile::enums::HrType>, #[serde(skip_serializing_if = "Option::is_none")] map_3_sample_mean: Option<crate::fields::Pressure>, #[serde(skip_serializing_if = "Option::is_none")] map_evening_values: Option<crate::fields::Pressure>, #[serde(skip_serializing_if = "Option::is_none")] map_morning_values: Option<crate::fields::Pressure>, #[serde(skip_serializing_if = "Option::is_none")] mean_arterial_pressure: Option<crate::fields::Pressure>, #[serde(skip_serializing_if = "Option::is_none")] status: Option<crate::profile::enums::BpStatus>, #[serde(skip_serializing_if = "Option::is_none")] systolic_pressure: Option<crate::fields::Pressure>, #[serde(skip_serializing_if = "Option::is_none")] timestamp: Option<crate::fields::DateTime>, #[serde(skip_serializing_if = "Option::is_none")] user_profile_index: Option<crate::profile::enums::MessageIndex>, } impl BloodPressure { pub fn from_fields<Order, Reader>(reader: &mut Reader, field_defs: &Vec<FieldDefinition>) -> Result<Self, std::io::Error> where Order: ByteOrder, Reader: ReadBytesExt, { let mut msg: Self = Default::default(); for field_def in field_defs { let (number, field) = field_def.content_from::<Order, Reader>(reader)?; msg.from_content(number, field); } Ok(msg) } fn from_content(&mut self, number: u8, field: Field) { match number { 0 => { self.systolic_pressure =field.one().map(|v| { let value = u16::from(v); (crate::fields::Pressure::new::<uom::si::pressure::millimeter_of_mercury, u16>)(value) }) }, 1 => { self.diastolic_pressure =field.one().map(|v| { let value = u16::from(v); (crate::fields::Pressure::new::<uom::si::pressure::millimeter_of_mercury, u16>)(value) }) }, 2 => { self.mean_arterial_pressure =field.one().map(|v| { let value = u16::from(v); (crate::fields::Pressure::new::<uom::si::pressure::millimeter_of_mercury, u16>)(value) }) }, 3 => { self.map_3_sample_mean =field.one().map(|v| { let value = u16::from(v); (crate::fields::Pressure::new::<uom::si::pressure::millimeter_of_mercury, u16>)(value) }) }, 4 => { self.map_morning_values =field.one().map(|v| { let value = u16::from(v); (crate::fields::Pressure::new::<uom::si::pressure::millimeter_of_mercury, u16>)(value) }) }, 5 => { self.map_evening_values =field.one().map(|v| { let value = u16::from(v); (crate::fields::Pressure::new::<uom::si::pressure::millimeter_of_mercury, u16>)(value) }) }, 6 => { self.heart_rate =field.one().map(|v| { let value = u8::from(v); (crate::fields::Frequency::new::<uom::si::frequency::cycle_per_minute, u8>)(value) }) }, 7 => { self.heart_rate_type =field.one().map(|v| { let value = crate::profile::enums::HrType::from(v); value }) }, 8 => { self.status =field.one().map(|v| { let value = crate::profile::enums::BpStatus::from(v); value }) }, 9 => { self.user_profile_index =field.one().map(|v| { let value = crate::profile::enums::MessageIndex::from(v); value }) }, 253 => { self.timestamp =field.one().map(|v| { let value = crate::fields::DateTime::from(v); value }) }, _ => (), } } }
31.529032
106
0.512789
1867a7996cb8d2bef5148f923f5bdc8089c0d618
23,930
//! RISCV Support #![allow(clippy::inconsistent_digit_grouping)] use crate::core::Architecture; use crate::CoreInterface; use anyhow::{anyhow, Result}; use communication_interface::{ AbstractCommandErrorKind, AccessRegisterCommand, DebugRegister, RiscvCommunicationInterface, RiscvError, }; use crate::core::{CoreInformation, RegisterFile}; use crate::{CoreRegisterAddress, CoreStatus, Error, HaltReason, MemoryInterface}; use bitfield::bitfield; use register::RISCV_REGISTERS; use std::time::{Duration, Instant}; #[macro_use] mod register; pub(crate) mod assembly; pub mod communication_interface; pub struct Riscv32<'probe> { interface: &'probe mut RiscvCommunicationInterface, } impl<'probe> Riscv32<'probe> { pub fn new(interface: &'probe mut RiscvCommunicationInterface) -> Self { Self { interface } } fn read_csr(&mut self, address: u16) -> Result<u32, RiscvError> { // We need to sue the "Access Register Command", // which has cmdtype 0 // write needs to be clear // transfer has to be set log::debug!("Reading CSR {:#x}", address); // always try to read register with abstract command, fallback to program buffer, // if not supported match self.interface.abstract_cmd_register_read(address) { Ok(v) => Ok(v), Err(RiscvError::AbstractCommand(AbstractCommandErrorKind::NotSupported)) => { log::debug!("Could not read core register {:#x} with abstract command, falling back to program buffer", address); let reg_value = self.read_csr_progbuf(address)?; Ok(reg_value) } Err(e) => Err(e.into()), } } fn read_csr_progbuf(&mut self, address: u16) -> Result<u32, RiscvError> { log::debug!("Reading CSR {:#04x}", address); let s0 = self.interface.abstract_cmd_register_read(&register::S0)?; // We need to perform the csrr instruction, which reads a CSR. // This is a pseudo instruction, which actually is encoded as a // csrrs instruction, with the rs1 register being x0, // so no bits are changed in the CSR, but the CSR is read into rd, i.e. s0. // // csrrs, // with rd = s0 // rs1 = x0 // csr = address let mut csrrs_cmd: u32 = 0b_00000_010_01000_1110011; csrrs_cmd |= ((address as u32) & 0xfff) << 20; self.interface.setup_program_buffer(&[csrrs_cmd])?; // command: postexec let mut postexec_cmd = AccessRegisterCommand(0); postexec_cmd.set_postexec(true); self.interface.execute_abstract_command(postexec_cmd.0)?; // read the s0 value let reg_value = self.interface.abstract_cmd_register_read(&register::S0)?; // restore original value in s0 self.interface .abstract_cmd_register_write(&register::S0, s0)?; Ok(reg_value) } fn write_csr(&mut self, address: u16, value: u32) -> Result<(), RiscvError> { log::debug!("Writing CSR {:#x}", address); match self.interface.abstract_cmd_register_write(address, value) { Ok(_) => Ok(()), Err(RiscvError::AbstractCommand(AbstractCommandErrorKind::NotSupported)) => { log::debug!("Could not write core register {:#x} with abstract command, falling back to program buffer", address); self.write_csr_progbuf(address, value)?; Ok(()) } Err(e) => Err(e.into()), } } fn write_csr_progbuf(&mut self, address: u16, value: u32) -> Result<(), RiscvError> { log::debug!("Writing CSR {:#04x}={}", address, value); // Backup register s0 let s0 = self.interface.abstract_cmd_register_read(&register::S0)?; // We need to perform the csrw instruction, which writes a CSR. // This is a pseudo instruction, which actually is encoded as a // csrrw instruction, with the destination register being x0, // so the read is ignored. // // csrrw, // with rd = x0 // rs1 = s0 // csr = address // Write value into s0 self.interface .abstract_cmd_register_write(&register::S0, value)?; let mut csrrw_cmd: u32 = 0b_01000_001_00000_1110011; csrrw_cmd |= ((address as u32) & 0xfff) << 20; // write progbuf0: csrr xxxxxx s0, (address) // lookup correct command self.interface.setup_program_buffer(&[csrrw_cmd])?; // command: postexec let mut postexec_cmd = AccessRegisterCommand(0); postexec_cmd.set_postexec(true); self.interface.execute_abstract_command(postexec_cmd.0)?; // command: transfer, regno = 0x1008 // restore original value in s0 self.interface .abstract_cmd_register_write(&register::S0, s0)?; Ok(()) } } impl<'probe> CoreInterface for Riscv32<'probe> { fn wait_for_core_halted(&mut self, timeout: Duration) -> Result<(), crate::Error> { let start = Instant::now(); while start.elapsed() < timeout { let dmstatus: Dmstatus = self.interface.read_dm_register()?; log::trace!("{:?}", dmstatus); if dmstatus.allhalted() { return Ok(()); } } Err(RiscvError::Timeout.into()) } fn core_halted(&mut self) -> Result<bool, crate::Error> { let dmstatus: Dmstatus = self.interface.read_dm_register()?; Ok(dmstatus.allhalted()) } fn halt(&mut self, timeout: Duration) -> Result<CoreInformation, crate::Error> { // write 1 to the haltreq register, which is part // of the dmcontrol register // read the current dmcontrol register let current_dmcontrol: Dmcontrol = self.interface.read_dm_register()?; log::debug!("{:?}", current_dmcontrol); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_haltreq(true); dmcontrol.set_dmactive(true); self.interface.write_dm_register(dmcontrol)?; self.wait_for_core_halted(timeout)?; // clear the halt request let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); self.interface.write_dm_register(dmcontrol)?; let pc = self.read_core_reg(register::RISCV_REGISTERS.program_counter.address)?; Ok(CoreInformation { pc }) } fn run(&mut self) -> Result<(), crate::Error> { // TODO: test if core halted? // set resume request let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_resumereq(true); self.interface.write_dm_register(dmcontrol)?; // check if request has been acknowleged let status: Dmstatus = self.interface.read_dm_register()?; if !status.allresumeack() { return Err(RiscvError::RequestNotAcknowledged.into()); }; // clear resume request let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); self.interface.write_dm_register(dmcontrol)?; Ok(()) } fn reset(&mut self) -> Result<(), crate::Error> { log::debug!("Resetting core, setting hartreset bit"); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_hartreset(true); self.interface.write_dm_register(dmcontrol)?; // Read back register to verify reset is supported let readback: Dmcontrol = self.interface.read_dm_register()?; if readback.hartreset() { log::debug!("Clearing hartreset bit"); // Reset is performed by setting the bit high, and then low again let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_hartreset(false); self.interface.write_dm_register(dmcontrol)?; } else { // Hartreset is not supported, whole core needs to be reset // // TODO: Cache this log::debug!("Hartreset bit not supported, using ndmreset"); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_ndmreset(true); self.interface.write_dm_register(dmcontrol)?; log::debug!("Clearing ndmreset bit"); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_ndmreset(false); self.interface.write_dm_register(dmcontrol)?; } // check that cores have reset let readback: Dmstatus = self.interface.read_dm_register()?; if !readback.allhavereset() { log::warn!("Dmstatue: {:?}", readback); return Err(RiscvError::RequestNotAcknowledged.into()); } // acknowledge the reset let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_ackhavereset(true); self.interface.write_dm_register(dmcontrol)?; Ok(()) } fn reset_and_halt( &mut self, _timeout: Duration, ) -> Result<crate::core::CoreInformation, crate::Error> { log::debug!("Resetting core, setting hartreset bit"); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_hartreset(true); dmcontrol.set_haltreq(true); self.interface.write_dm_register(dmcontrol)?; // Read back register to verify reset is supported let readback: Dmcontrol = self.interface.read_dm_register()?; if readback.hartreset() { log::debug!("Clearing hartreset bit"); // Reset is performed by setting the bit high, and then low again let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_haltreq(true); dmcontrol.set_hartreset(false); self.interface.write_dm_register(dmcontrol)?; } else { // Hartreset is not supported, whole core needs to be reset // // TODO: Cache this log::debug!("Hartreset bit not supported, using ndmreset"); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_ndmreset(true); dmcontrol.set_haltreq(true); self.interface.write_dm_register(dmcontrol)?; log::debug!("Clearing ndmreset bit"); let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_ndmreset(false); dmcontrol.set_haltreq(true); self.interface.write_dm_register(dmcontrol)?; } // check that cores have reset let readback: Dmstatus = self.interface.read_dm_register()?; if !(readback.allhavereset() && readback.allhalted()) { return Err(RiscvError::RequestNotAcknowledged.into()); } // acknowledge the reset, clear the halt request let mut dmcontrol = Dmcontrol(0); dmcontrol.set_dmactive(true); dmcontrol.set_ackhavereset(true); self.interface.write_dm_register(dmcontrol)?; let pc = self.read_core_reg(CoreRegisterAddress(0x7b1))?; Ok(CoreInformation { pc }) } fn step(&mut self) -> Result<crate::core::CoreInformation, crate::Error> { let mut dcsr = Dcsr(self.read_core_reg(CoreRegisterAddress(0x7b0))?); dcsr.set_step(true); self.write_csr(0x7b0, dcsr.0)?; self.run()?; self.wait_for_core_halted(Duration::from_millis(100))?; let pc = self.read_core_reg(CoreRegisterAddress(0x7b1))?; // clear step request let mut dcsr = Dcsr(self.read_core_reg(CoreRegisterAddress(0x7b0))?); dcsr.set_step(false); self.write_csr(0x7b0, dcsr.0)?; Ok(CoreInformation { pc }) } fn read_core_reg(&mut self, address: crate::CoreRegisterAddress) -> Result<u32, crate::Error> { self.read_csr(address.0).map_err(|e| e.into()) } fn write_core_reg(&mut self, address: crate::CoreRegisterAddress, value: u32) -> Result<()> { self.write_csr(address.0, value).map_err(|e| e.into()) } fn get_available_breakpoint_units(&mut self) -> Result<u32, crate::Error> { // TODO: This should probably only be done once, when initialising log::debug!("Determining number of HW breakpoints supported"); let tselect = 0x7a0; let tdata1 = 0x7a1; let tinfo = 0x7a4; let mut tselect_index = 0; // These steps follow the debug specification 0.13, section 5.1 Enumeration loop { log::debug!("Trying tselect={}", tselect_index); if let Err(e) = self.write_csr(tselect, tselect_index) { match e { RiscvError::AbstractCommand(AbstractCommandErrorKind::Exception) => break, other_error => return Err(other_error.into()), } } let readback = self.read_csr(tselect)?; if readback != tselect_index { break; } match self.read_csr(tinfo) { Ok(tinfo_val) => { if tinfo_val & 0xffff == 1 { // Trigger doesn't exist, break the loop break; } else { log::info!( "Discovered trigger with index {} and type {}", tselect_index, tinfo_val & 0xffff ); } } Err(RiscvError::AbstractCommand(AbstractCommandErrorKind::Exception)) => { // An exception means we have to read tdata1 to discover the type let tdata_val = self.read_csr(tdata1)?; // TODO: Proper handle xlen let xlen = 32; let trigger_type = tdata_val >> (xlen - 4); if trigger_type == 0 { break; } log::info!( "Discovered trigger with index {} and type {}", tselect_index, trigger_type, ); } Err(other) => return Err(other.into()), } tselect_index += 1; } log::debug!("Target supports {} breakpoints.", tselect_index); Ok(tselect_index) } fn enable_breakpoints(&mut self, _state: bool) -> Result<(), crate::Error> { // seems not needed on RISCV Ok(()) } fn set_breakpoint(&mut self, bp_unit_index: usize, addr: u32) -> Result<(), crate::Error> { // select requested trigger let tselect = 0x7a0; let tdata1 = 0x7a1; let tdata2 = 0x7a2; self.write_csr(tselect, bp_unit_index as u32)?; // verify the trigger has the correct type let tdata_value = Mcontrol(self.read_csr(tdata1)?); // This should not happen let trigger_type = tdata_value.type_(); if trigger_type != 0b10 { return Err(RiscvError::UnexpectedTriggerType(trigger_type).into()); } // Setup the trigger let mut instruction_breakpoint = Mcontrol(0); instruction_breakpoint.set_action(1); instruction_breakpoint.set_match(0); instruction_breakpoint.set_m(true); instruction_breakpoint.set_s(true); instruction_breakpoint.set_u(true); instruction_breakpoint.set_execute(true); instruction_breakpoint.set_dmode(true); self.write_csr(tdata1, instruction_breakpoint.0)?; self.write_csr(tdata2, addr)?; Ok(()) } fn clear_breakpoint(&mut self, unit_index: usize) -> Result<(), crate::Error> { let tselect = 0x7a0; let tdata1 = 0x7a1; let tdata2 = 0x7a2; self.write_csr(tselect, unit_index as u32)?; self.write_csr(tdata1, 0)?; self.write_csr(tdata2, 0)?; Ok(()) } fn registers(&self) -> &'static RegisterFile { &RISCV_REGISTERS } fn hw_breakpoints_enabled(&self) -> bool { // No special enable on RISC true } fn architecture(&self) -> Architecture { Architecture::Riscv } fn status(&mut self) -> Result<crate::core::CoreStatus, crate::Error> { // TODO: We should use hartsum to determine if any hart is halted // quickly let status: Dmstatus = self.interface.read_dm_register()?; if status.allhalted() { // determine reason for halt let dcsr = Dcsr(self.read_core_reg(CoreRegisterAddress::from(0x7b0))?); let reason = match dcsr.cause() { // An ebreak instruction was hit 1 => HaltReason::Breakpoint, // Trigger module caused halt 2 => HaltReason::Breakpoint, // Debugger requested a halt 3 => HaltReason::Request, // Core halted after single step 4 => HaltReason::Step, // Core halted directly after reset 5 => HaltReason::Exception, // Reserved for future use in specification _ => HaltReason::Unknown, }; Ok(CoreStatus::Halted(reason)) } else if status.allrunning() { Ok(CoreStatus::Running) } else { Err( anyhow!("Some cores are running while some are halted, this should not happen.") .into(), ) } } } impl<'probe> MemoryInterface for Riscv32<'probe> { fn read_word_32(&mut self, address: u32) -> Result<u32, Error> { self.interface.read_word_32(address) } fn read_word_8(&mut self, address: u32) -> Result<u8, Error> { self.interface.read_word_8(address) } fn read_32(&mut self, address: u32, data: &mut [u32]) -> Result<(), Error> { self.interface.read_32(address, data) } fn read_8(&mut self, address: u32, data: &mut [u8]) -> Result<(), Error> { self.interface.read_8(address, data) } fn write_word_32(&mut self, address: u32, data: u32) -> Result<(), Error> { self.interface.write_word_32(address, data) } fn write_word_8(&mut self, address: u32, data: u8) -> Result<(), Error> { self.interface.write_word_8(address, data) } fn write_32(&mut self, address: u32, data: &[u32]) -> Result<(), Error> { self.interface.write_32(address, data) } fn write_8(&mut self, address: u32, data: &[u8]) -> Result<(), Error> { self.interface.write_8(address, data) } fn flush(&mut self) -> Result<(), Error> { self.interface.flush() } } bitfield! { // `dmcontrol` register, located at // address 0x10 pub struct Dmcontrol(u32); impl Debug; _, set_haltreq: 31; _, set_resumereq: 30; hartreset, set_hartreset: 29; _, set_ackhavereset: 28; hasel, set_hasel: 26; hartsello, set_hartsello: 25, 16; hartselhi, set_hartselhi: 15, 6; _, set_resethaltreq: 3; _, set_clrresethaltreq: 2; ndmreset, set_ndmreset: 1; dmactive, set_dmactive: 0; } impl DebugRegister for Dmcontrol { const ADDRESS: u8 = 0x10; const NAME: &'static str = "dmcontrol"; } impl From<Dmcontrol> for u32 { fn from(register: Dmcontrol) -> Self { register.0 } } impl From<u32> for Dmcontrol { fn from(value: u32) -> Self { Self(value) } } bitfield! { /// Readonly `dmstatus` register. /// /// Located at address 0x11 pub struct Dmstatus(u32); impl Debug; impebreak, _: 22; allhavereset, _: 19; anyhavereset, _: 18; allresumeack, _: 17; anyresumeack, _: 16; allnonexistent, _: 15; anynonexistent, _: 14; allunavail, _: 13; anyunavail, _: 12; allrunning, _: 11; anyrunning, _: 10; allhalted, _: 9; anyhalted, _: 8; authenticated, _: 7; authbusy, _: 6; hasresethaltreq, _: 5; confstrptrvalid, _: 4; version, _: 3, 0; } impl DebugRegister for Dmstatus { const ADDRESS: u8 = 0x11; const NAME: &'static str = "dmstatus"; } impl From<u32> for Dmstatus { fn from(value: u32) -> Self { Self(value) } } impl From<Dmstatus> for u32 { fn from(register: Dmstatus) -> Self { register.0 } } bitfield! { struct Dcsr(u32); impl Debug; xdebugver, _: 31, 28; ebreakm, set_ebreakm: 15; ebreaks, set_ebreaks: 13; ebreaku, set_ebreaku: 12; stepie, set_stepie: 11; stopcount, set_stopcount: 10; stoptime, set_stoptime: 9; cause, _: 8, 6; mprven, set_mprven: 4; nmip, _: 3; step, set_step: 2; prv, set_prv: 1,0; } bitfield! { pub struct Abstractcs(u32); impl Debug; progbufsize, _: 28, 24; busy, _: 12; cmderr, set_cmderr: 10, 8; datacount, _: 3, 0; } impl DebugRegister for Abstractcs { const ADDRESS: u8 = 0x16; const NAME: &'static str = "abstractcs"; } impl From<Abstractcs> for u32 { fn from(register: Abstractcs) -> Self { register.0 } } impl From<u32> for Abstractcs { fn from(value: u32) -> Self { Self(value) } } bitfield! { pub struct Hartinfo(u32); impl Debug; nscratch, _: 23, 20; dataaccess, _: 16; datasize, _: 15, 12; dataaddr, _: 11, 0; } impl DebugRegister for Hartinfo { const ADDRESS: u8 = 0x12; const NAME: &'static str = "hartinfo"; } impl From<Hartinfo> for u32 { fn from(register: Hartinfo) -> Self { register.0 } } impl From<u32> for Hartinfo { fn from(value: u32) -> Self { Self(value) } } data_register! { pub Data0, 0x04, "data0" } data_register! { pub Data1, 0x05, "data1" } data_register! { pub Data2, 0x06, "data2" } data_register! { pub Data3, 0x07, "data3" } data_register! { pub Data4, 0x08, "data4" } data_register! { pub Data5, 0x09, "data5" } data_register! { pub Data6, 0x0A, "data6" } data_register! { pub Data7, 0x0B, "data7" } data_register! { pub Data8, 0x0C, "data8" } data_register! { pub Data9, 0x0D, "data9" } data_register! { pub Data10, 0x0E, "data10" } data_register! { pub Data11, 0x0f, "data11" } data_register! { Command, 0x17, "command" } data_register! { pub Progbuf0, 0x20, "progbuf0" } data_register! { pub Progbuf1, 0x21, "progbuf1" } data_register! { pub Progbuf2, 0x22, "progbuf2" } data_register! { pub Progbuf3, 0x23, "progbuf3" } data_register! { pub Progbuf4, 0x24, "progbuf4" } data_register! { pub Progbuf5, 0x25, "progbuf5" } data_register! { pub Progbuf6, 0x26, "progbuf6" } data_register! { pub Progbuf7, 0x27, "progbuf7" } data_register! { pub Progbuf8, 0x28, "progbuf8" } data_register! { pub Progbuf9, 0x29, "progbuf9" } data_register! { pub Progbuf10, 0x2A, "progbuf10" } data_register! { pub Progbuf11, 0x2B, "progbuf11" } data_register! { pub Progbuf12, 0x2C, "progbuf12" } data_register! { pub Progbuf13, 0x2D, "progbuf13" } data_register! { pub Progbuf14, 0x2E, "progbuf14" } data_register! { pub Progbuf15, 0x2F, "progbuf15" } bitfield! { struct Mcontrol(u32); impl Debug; type_, set_type: 31, 28; dmode, set_dmode: 27; maskmax, _: 26, 21; hit, set_hit: 20; select, set_select: 19; timing, set_timing: 18; sizelo, set_sizelo: 17, 16; action, set_action: 15, 12; chain, set_chain: 11; match_, set_match: 10, 7; m, set_m: 6; s, set_s: 4; u, set_u: 3; execute, set_execute: 2; store, set_store: 1; load, set_load: 0; }
30.252845
130
0.590221
26a9955555719c426d93e60fd19cac9f588d9251
9,080
//! A module for implementing some helpful functions for `monotree`. use crate::*; use num::{NumCast, PrimInt}; use rand::Rng; use std::cmp; use std::ops::Range; #[macro_export] /// std::cmp::max() extension for use with multiple arguments. macro_rules! max { ($x:expr) => ($x); ($x:expr, $($e:expr),+) => (cmp::max($x, max!($($e),+))); } #[macro_export] /// std::cmp::min() extension for use with multiple arguments. macro_rules! min { ($x:expr) => ($x); ($x:expr, $($e:expr),+) => (cmp::min($x, min!($($e),+))); } #[macro_export] /// Convert `bytes` slice into `hex` string. macro_rules! hex { ($bytes:expr) => {{ hex::encode($bytes) }}; } #[macro_export] /// Convert elapsed time in nano second (`u128`) into appropriate format of time (`String`). macro_rules! fmtime { ($t:expr) => {{ match $t as f64 { t if t > 1e9 => format!("{:.4} s", 1e-9 * t), t if t > 1e6 => format!("{:.4} ms", 1e-6 * t), t if t > 1e3 => format!("{:.4} us", 1e-3 * t), t if t > 1e0 => format!("{:.4} ns", 1e-0 * t), _ => format!("under 1 ns"), } }}; } #[macro_export] /// Simple benchmark tool for runtime measure of a code block. /// /// For the given block code, it adds up total time during `LOOPS`-time run /// and then print and return the total time. /// When `LOOPS` is 0, it only runs once without `STDOUT`. macro_rules! perf { ($n:expr, $label:expr, $code:block) => {{ let label = match $label.trim().len() { 0 => stringify!($code), _ => $label, }; let tick = std::time::Instant::now(); match $n { 0 => $code, _ => (0..$n).for_each(|_| $code), } let tock = tick.elapsed().as_nanos(); match $n { 0 => tock, _ => { let elapsed = fmtime!(tock); let div = if $n == 0 { 1 } else { $n }; let mean = fmtime!(tock / div as u128); println!("\n{}", label); println!("{} loops: {} ({}, on average)", $n, elapsed, mean); tock } } }}; } /// Cast from a typed scalar to another based on `num_traits` pub fn cast<T: NumCast, U: NumCast>(n: T) -> U { NumCast::from(n).expect("cast(): Numcast") } /// Generate a random byte based on `rand::random`. pub fn random_byte() -> u8 { rand::random::<u8>() } /// Generate random bytes of the given length. pub fn random_bytes(n: usize) -> Vec<u8> { (0..n).map(|_| random_byte()).collect() } /// Generate a random `Hash`, byte-array of `HASH_LEN` length. pub fn random_hash() -> Hash { slice_to_hash(&random_bytes(HASH_LEN)) } /// Generate a vector of random `Hash` with the given length. pub fn random_hashes(n: usize) -> Vec<Hash> { (0..n).map(|_| random_hash()).collect() } /// Get a fixed lenght byte-array or `Hash` from slice. pub fn slice_to_hash(slice: &[u8]) -> Hash { let mut hash = [0x00; HASH_LEN]; hash.copy_from_slice(slice); hash } /// Shuffle a slice using _Fisher-Yates_ algorithm. pub fn shuffle<T: Clone>(slice: &mut [T]) { let mut rng = rand::thread_rng(); let s = slice.len(); (0..s).for_each(|i| { let q = rng.gen_range(0, s); slice.swap(i, q); }); } /// Get sorted indices from unsorted slice. pub fn get_sorted_indices<T>(slice: &[T], reverse: bool) -> Vec<usize> where T: Clone + cmp::Ord, { let mut t: Vec<_> = slice.iter().enumerate().collect(); if reverse { t.sort_unstable_by(|(_, a), (_, b)| b.cmp(a)); } else { t.sort_unstable_by(|(_, a), (_, b)| a.cmp(b)); } t.iter().map(|(i, _)| *i).collect() } /// Get length of the longest common prefix bits for the given two slices. pub fn len_lcp<T>(a: &[u8], m: &Range<T>, b: &[u8], n: &Range<T>) -> T where T: PrimInt + NumCast, Range<T>: Iterator<Item = T>, { let count = (cast(0)..min!(m.end - m.start, n.end - n.start)) .take_while(|&i| bit(a, m.start + i) == bit(b, n.start + i)) .count(); cast(count) } /// Get `i`-th bit from bytes slice. Index `i` starts from 0. pub fn bit<T: PrimInt + NumCast>(bytes: &[u8], i: T) -> bool { let q = i.to_usize().expect("bit(): usize") / 8; let r = i.to_u8().expect("bit(): u8") % 8; (bytes[q] >> (7 - r)) & 0x01 == 0x01 } /// Get the required length of bytes from a `Range`, bits indices across the bytes. pub fn nbytes_across<T: PrimInt + NumCast>(start: T, end: T) -> T { let n = (end - (start - start % cast(8))) / cast(8); if end % cast(8) == cast(0) { n } else { n + cast(1) } } /// Adjust the bytes representation for `Bits` when shifted. /// Returns a bytes shift, `n` and thereby resulting shifted range, `R`. pub fn offsets<T: PrimInt + NumCast>(range: &Range<T>, n: T, tail: bool) -> (T, Range<T>) { let x = range.start + n; let e: T = cast(8); if tail { (nbytes_across(range.start, x), range.start..x) } else { (x / e, x % e..range.end - e * (x / e)) } } /// Convert big-endian bytes into base10 or decimal number. pub fn bytes_to_int<T: PrimInt + NumCast>(bytes: &[u8]) -> T { let l = bytes.len(); let sum = (0..l).fold(0, |sum, i| { sum + (1 << ((l - i - 1) * 8)) * bytes[i] as usize }); cast(sum) } /// Get a compressed bytes (leading-zero-truncated big-endian bytes) from a `u64`. pub fn int_to_bytes(number: u64) -> Vec<u8> { match number { 0 => vec![0x00], _ => number .to_be_bytes() .iter() .skip_while(|&x| *x == 0x00) .copied() .collect(), } } /// Convert a Vec slice of bit or `bool` into a number as `usize`. pub fn bits_to_usize(bits: &[bool]) -> usize { let l = bits.len(); (0..l).fold(0, |sum, i| sum + ((bits[i] as usize) << (l - 1 - i))) } /// Convert a bytes slice into a Vec of bit. pub fn bytes_to_bits(bytes: &[u8]) -> Vec<bool> { bytes_to_slicebit(bytes, &(0..bytes.len() * 8)) } /// Convert (bytes slice + Range) representation into bits in forms of `Vec<bool>`. pub fn bytes_to_slicebit<T>(bytes: &[u8], range: &Range<T>) -> Vec<bool> where T: PrimInt + NumCast, Range<T>: Iterator<Item = T>, { range.clone().map(|x| bit(bytes, x)).collect() } /// Convert bits, Vec slice of `bool` into bytes, `Vec<u8>`. pub fn bits_to_bytes(bits: &[bool]) -> Vec<u8> { bits.rchunks(8) .rev() .map(|v| bits_to_usize(v) as u8) .collect() } #[cfg(test)] mod tests { use super::*; #[test] fn test_bit() { let bytes = [0x73, 0x6f, 0x66, 0x69, 0x61]; assert_eq!(bit(&bytes, 10), true); assert_eq!(bit(&bytes, 20), false); assert_eq!(bit(&bytes, 30), false); } #[test] fn test_nbyte_across() { assert_eq!(nbytes_across(0, 8), 1); assert_eq!(nbytes_across(1, 7), 1); assert_eq!(nbytes_across(5, 9), 2); assert_eq!(nbytes_across(9, 16), 1); assert_eq!(nbytes_across(7, 19), 3); } #[test] fn test_offsets() { assert_eq!(offsets(&(0..8), 1, false), (0, 1..8)); assert_eq!(offsets(&(0..8), 1, true), (1, 0..1)); assert_eq!(offsets(&(3..20), 10, false), (1, 5..12)); assert_eq!(offsets(&(3..20), 10, true), (2, 3..13)); assert_eq!(offsets(&(9..16), 5, false), (1, 6..8)); assert_eq!(offsets(&(9..16), 5, true), (1, 9..14)); } #[test] fn test_bytes_to_int() { let number: usize = bytes_to_int(&[0x73, 0x6f, 0x66, 0x69, 0x61]); assert_eq!(number, 495790221665usize); } #[test] fn test_usize_to_bytes() { assert_eq!( int_to_bytes(495790221665u64), [0x73, 0x6f, 0x66, 0x69, 0x61] ); } #[test] fn test_bytes_to_bits() { assert_eq!( bytes_to_bits(&[0x33, 0x33]), [ false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true, ] ); } #[test] fn test_bits_to_bytes() { let bits = [ false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true, ]; assert_eq!(bits_to_bytes(&bits), [0x33, 0x33]); } #[test] fn test_bits_to_usize() { assert_eq!( bits_to_usize(&[ false, false, true, true, false, false, true, true, false, false, true, true, false, false, true, true, ]), 13107usize ); } #[test] fn test_len_lcp() { let sofia = [0x73, 0x6f, 0x66, 0x69, 0x61]; let maria = [0x6d, 0x61, 0x72, 0x69, 0x61]; assert_eq!(len_lcp(&sofia, &(0..3), &maria, &(0..3)), 3); assert_eq!(len_lcp(&sofia, &(0..3), &maria, &(5..9)), 0); assert_eq!(len_lcp(&sofia, &(2..9), &maria, &(18..30)), 5); assert_eq!(len_lcp(&sofia, &(20..30), &maria, &(3..15)), 4); } }
29.480519
96
0.528965
62fd5a28f6fb08dfcff9fe880ab9cd23d04f7156
3,655
extern crate core; use hound; use libmp3lame_sys::*; use num_rational::Rational64; use std::ffi::CString; use std::fs::File; use std::i16; use std::io::prelude::*; use std::io::{self}; use std::os::raw::c_char; use std::ptr::NonNull; use std::time::SystemTime; use std::time::UNIX_EPOCH; pub const SAMPLE_RATE: u32 = 48_000; #[derive(Clone)] pub struct RawFrame { pub timestamp: Rational64, pub data: Vec<i16>, } impl RawFrame { fn duration(&self, num_channels: usize) -> Rational64 { Rational64::new((self.data.len() / num_channels) as i64, 48_000) } } pub struct Encoder { lame: NonNull<lame_global_flags>, data: Vec<u8>, } #[no_mangle] pub extern "C" fn encode(data: *mut u8, length: u32) -> *const c_char { unsafe { let buf: &mut [u8] = core::slice::from_raw_parts_mut(data, length as usize); let mut ptr = io::Cursor::new(buf); let mut wav_reader = hound::WavReader::new(&mut ptr).unwrap(); let mut data = Vec::new(); for sample in wav_reader.samples() { match sample { Ok(smpl) => data.push(smpl), _ => panic!("failed"), } } let timestamp = Rational64::from_integer(0); let raw_frame = RawFrame { timestamp, data }; let mut encoder = Encoder::new(); let output = encoder.encode(raw_frame); return CString::new(output) .expect("failed to encode mp3") .into_raw(); } } impl Encoder { fn new() -> Encoder { let lame = unsafe { let lame = NonNull::new(lame_init()).expect("Failed to allocate lame global flags"); lame_set_in_samplerate(lame.as_ptr(), SAMPLE_RATE as i32); lame_set_brate(lame.as_ptr(), 128); lame_set_quality(lame.as_ptr(), 2); // 2=high 5=medium 7=low lame_set_num_channels(lame.as_ptr(), 2); lame_set_write_id3tag_automatic(lame.as_ptr(), 0); let ret = lame_init_params(lame.as_ptr()); if ret < 0 { panic!("failed to lame_init_params(): {}", ret); } lame }; Encoder { lame, data: Vec::new(), } } fn encode(&mut self, mut raw_frame: RawFrame) -> String { let estimated_encoded_bytes = (raw_frame.duration(2) * SAMPLE_RATE as i64 * 5 / 4 + 7200) .ceil() .to_integer() as usize; let mut encoded: Vec<u8> = Vec::new(); encoded.resize(estimated_encoded_bytes, 0); let encoded_bytes = unsafe { lame_encode_buffer_interleaved( self.lame.as_ptr(), raw_frame.data.as_mut_ptr(), raw_frame.data.len() as i32 / 2, encoded.as_mut_ptr(), encoded.len() as i32, ) }; if encoded_bytes < 0 { panic!( "Failed to lame_encode_buffer_interleaved: error={}", encoded_bytes ); } encoded.resize(encoded_bytes as usize, 0); self.data.append(&mut encoded); let data = self.data.clone(); let now = SystemTime::now(); let unixtime = now.duration_since(UNIX_EPOCH).expect("back to the future"); let timestamp = unixtime.as_secs(); let tempfile = format!("{}.mp3", timestamp); File::create(tempfile.clone()) .expect("oops") .write_all(&data) .expect("oops"); return tempfile; } } impl Drop for Encoder { fn drop(&mut self) { unsafe { lame_close(self.lame.as_ptr()) }; } }
29.24
97
0.55513
e2b984959060c2b270e9536b509fbcaaa67e512b
16,847
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Generalized type folding mechanism. use middle::subst; use middle::subst::VecPerParamSpace; use middle::ty; use middle::typeck; use std::rc::Rc; use syntax::ast; use syntax::owned_slice::OwnedSlice; use util::ppaux::Repr; /////////////////////////////////////////////////////////////////////////// // Two generic traits /// The TypeFoldable trait is implemented for every type that can be folded. /// Basically, every type that has a corresponding method in TypeFolder. pub trait TypeFoldable { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Self; } /// The TypeFolder trait defines the actual *folding*. There is a /// method defined for every foldable type. Each of these has a /// default implementation that does an "identity" fold. Within each /// identity fold, it should invoke `foo.fold_with(self)` to fold each /// sub-item. pub trait TypeFolder { fn tcx<'a>(&'a self) -> &'a ty::ctxt; fn fold_ty(&mut self, t: ty::t) -> ty::t { super_fold_ty(self, t) } fn fold_mt(&mut self, t: &ty::mt) -> ty::mt { super_fold_mt(self, t) } fn fold_trait_ref(&mut self, t: &ty::TraitRef) -> ty::TraitRef { super_fold_trait_ref(self, t) } fn fold_sty(&mut self, sty: &ty::sty) -> ty::sty { super_fold_sty(self, sty) } fn fold_substs(&mut self, substs: &subst::Substs) -> subst::Substs { super_fold_substs(self, substs) } fn fold_sig(&mut self, sig: &ty::FnSig) -> ty::FnSig { super_fold_sig(self, sig) } fn fold_bare_fn_ty(&mut self, fty: &ty::BareFnTy) -> ty::BareFnTy { super_fold_bare_fn_ty(self, fty) } fn fold_closure_ty(&mut self, fty: &ty::ClosureTy) -> ty::ClosureTy { super_fold_closure_ty(self, fty) } fn fold_region(&mut self, r: ty::Region) -> ty::Region { r } fn fold_trait_store(&mut self, s: ty::TraitStore) -> ty::TraitStore { super_fold_trait_store(self, s) } fn fold_autoref(&mut self, ar: &ty::AutoRef) -> ty::AutoRef { super_fold_autoref(self, ar) } fn fold_item_substs(&mut self, i: ty::ItemSubsts) -> ty::ItemSubsts { super_fold_item_substs(self, i) } } /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. // // Ideally, each type should invoke `folder.fold_foo(self)` and // nothing else. In some cases, though, we haven't gotten around to // adding methods on the `folder` yet, and thus the folding is // hard-coded here. This is less-flexible, because folders cannot // override the behavior, but there are a lot of random types and one // can easily refactor the folding into the TypeFolder trait as // needed. impl<T:TypeFoldable> TypeFoldable for Option<T> { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Option<T> { self.as_ref().map(|t| t.fold_with(folder)) } } impl<T:TypeFoldable> TypeFoldable for Rc<T> { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Rc<T> { Rc::new((**self).fold_with(folder)) } } impl<T:TypeFoldable> TypeFoldable for Vec<T> { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> Vec<T> { self.iter().map(|t| t.fold_with(folder)).collect() } } impl<T:TypeFoldable> TypeFoldable for OwnedSlice<T> { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> OwnedSlice<T> { self.iter().map(|t| t.fold_with(folder)).collect() } } impl<T:TypeFoldable> TypeFoldable for VecPerParamSpace<T> { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> VecPerParamSpace<T> { self.map(|t| t.fold_with(folder)) } } impl TypeFoldable for ty::TraitStore { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TraitStore { folder.fold_trait_store(*self) } } impl TypeFoldable for ty::t { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::t { folder.fold_ty(*self) } } impl TypeFoldable for ty::BareFnTy { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::BareFnTy { folder.fold_bare_fn_ty(self) } } impl TypeFoldable for ty::ClosureTy { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ClosureTy { folder.fold_closure_ty(self) } } impl TypeFoldable for ty::mt { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::mt { folder.fold_mt(self) } } impl TypeFoldable for ty::FnSig { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::FnSig { folder.fold_sig(self) } } impl TypeFoldable for ty::sty { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::sty { folder.fold_sty(self) } } impl TypeFoldable for ty::TraitRef { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TraitRef { folder.fold_trait_ref(self) } } impl TypeFoldable for ty::Region { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::Region { folder.fold_region(*self) } } impl TypeFoldable for subst::Substs { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> subst::Substs { folder.fold_substs(self) } } impl TypeFoldable for ty::ItemSubsts { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ItemSubsts { ty::ItemSubsts { substs: self.substs.fold_with(folder), } } } impl TypeFoldable for ty::AutoRef { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::AutoRef { folder.fold_autoref(self) } } impl TypeFoldable for typeck::vtable_origin { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> typeck::vtable_origin { match *self { typeck::vtable_static(def_id, ref substs, ref origins) => { let r_substs = substs.fold_with(folder); let r_origins = origins.fold_with(folder); typeck::vtable_static(def_id, r_substs, r_origins) } typeck::vtable_param(n, b) => { typeck::vtable_param(n, b) } typeck::vtable_unboxed_closure(def_id) => { typeck::vtable_unboxed_closure(def_id) } typeck::vtable_error => { typeck::vtable_error } } } } impl TypeFoldable for ty::BuiltinBounds { fn fold_with<F:TypeFolder>(&self, _folder: &mut F) -> ty::BuiltinBounds { *self } } impl TypeFoldable for ty::ParamBounds { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::ParamBounds { ty::ParamBounds { builtin_bounds: self.builtin_bounds.fold_with(folder), trait_bounds: self.trait_bounds.fold_with(folder), } } } impl TypeFoldable for ty::TypeParameterDef { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::TypeParameterDef { ty::TypeParameterDef { ident: self.ident, def_id: self.def_id, space: self.space, index: self.index, bounds: self.bounds.fold_with(folder), default: self.default.fold_with(folder), } } } impl TypeFoldable for ty::RegionParameterDef { fn fold_with<F:TypeFolder>(&self, _folder: &mut F) -> ty::RegionParameterDef { *self } } impl TypeFoldable for ty::Generics { fn fold_with<F:TypeFolder>(&self, folder: &mut F) -> ty::Generics { ty::Generics { types: self.types.fold_with(folder), regions: self.regions.fold_with(folder), } } } /////////////////////////////////////////////////////////////////////////// // "super" routines: these are the default implementations for TypeFolder. // // They should invoke `foo.fold_with()` to do recursive folding. pub fn super_fold_ty<T:TypeFolder>(this: &mut T, t: ty::t) -> ty::t { let sty = ty::get(t).sty.fold_with(this); ty::mk_t(this.tcx(), sty) } pub fn super_fold_substs<T:TypeFolder>(this: &mut T, substs: &subst::Substs) -> subst::Substs { let regions = match substs.regions { subst::ErasedRegions => { subst::ErasedRegions } subst::NonerasedRegions(ref regions) => { subst::NonerasedRegions(regions.fold_with(this)) } }; subst::Substs { regions: regions, types: substs.types.fold_with(this) } } pub fn super_fold_sig<T:TypeFolder>(this: &mut T, sig: &ty::FnSig) -> ty::FnSig { ty::FnSig { binder_id: sig.binder_id, inputs: sig.inputs.fold_with(this), output: sig.output.fold_with(this), variadic: sig.variadic } } pub fn super_fold_bare_fn_ty<T:TypeFolder>(this: &mut T, fty: &ty::BareFnTy) -> ty::BareFnTy { ty::BareFnTy { sig: fty.sig.fold_with(this), abi: fty.abi, fn_style: fty.fn_style } } pub fn super_fold_closure_ty<T:TypeFolder>(this: &mut T, fty: &ty::ClosureTy) -> ty::ClosureTy { ty::ClosureTy { store: fty.store.fold_with(this), sig: fty.sig.fold_with(this), fn_style: fty.fn_style, onceness: fty.onceness, bounds: fty.bounds, abi: fty.abi, } } pub fn super_fold_trait_ref<T:TypeFolder>(this: &mut T, t: &ty::TraitRef) -> ty::TraitRef { ty::TraitRef { def_id: t.def_id, substs: t.substs.fold_with(this), } } pub fn super_fold_mt<T:TypeFolder>(this: &mut T, mt: &ty::mt) -> ty::mt { ty::mt {ty: mt.ty.fold_with(this), mutbl: mt.mutbl} } pub fn super_fold_sty<T:TypeFolder>(this: &mut T, sty: &ty::sty) -> ty::sty { match *sty { ty::ty_box(typ) => { ty::ty_box(typ.fold_with(this)) } ty::ty_uniq(typ) => { ty::ty_uniq(typ.fold_with(this)) } ty::ty_ptr(ref tm) => { ty::ty_ptr(tm.fold_with(this)) } ty::ty_vec(ref tm, sz) => { ty::ty_vec(tm.fold_with(this), sz) } ty::ty_enum(tid, ref substs) => { ty::ty_enum(tid, substs.fold_with(this)) } ty::ty_trait(box ty::TyTrait { def_id, ref substs, bounds }) => { ty::ty_trait(box ty::TyTrait { def_id: def_id, substs: substs.fold_with(this), bounds: bounds }) } ty::ty_tup(ref ts) => { ty::ty_tup(ts.fold_with(this)) } ty::ty_bare_fn(ref f) => { ty::ty_bare_fn(f.fold_with(this)) } ty::ty_closure(ref f) => { ty::ty_closure(box f.fold_with(this)) } ty::ty_rptr(r, ref tm) => { ty::ty_rptr(r.fold_with(this), tm.fold_with(this)) } ty::ty_struct(did, ref substs) => { ty::ty_struct(did, substs.fold_with(this)) } ty::ty_unboxed_closure(did) => { ty::ty_unboxed_closure(did) } ty::ty_nil | ty::ty_bot | ty::ty_bool | ty::ty_char | ty::ty_str | ty::ty_int(_) | ty::ty_uint(_) | ty::ty_float(_) | ty::ty_err | ty::ty_infer(_) | ty::ty_param(..) => { (*sty).clone() } } } pub fn super_fold_trait_store<T:TypeFolder>(this: &mut T, trait_store: ty::TraitStore) -> ty::TraitStore { match trait_store { ty::UniqTraitStore => ty::UniqTraitStore, ty::RegionTraitStore(r, m) => { ty::RegionTraitStore(r.fold_with(this), m) } } } pub fn super_fold_autoref<T:TypeFolder>(this: &mut T, autoref: &ty::AutoRef) -> ty::AutoRef { match *autoref { ty::AutoPtr(r, m) => ty::AutoPtr(r.fold_with(this), m), ty::AutoBorrowVec(r, m) => ty::AutoBorrowVec(r.fold_with(this), m), ty::AutoBorrowVecRef(r, m) => ty::AutoBorrowVecRef(r.fold_with(this), m), ty::AutoUnsafe(m) => ty::AutoUnsafe(m), ty::AutoBorrowObj(r, m) => ty::AutoBorrowObj(r.fold_with(this), m), } } pub fn super_fold_item_substs<T:TypeFolder>(this: &mut T, substs: ty::ItemSubsts) -> ty::ItemSubsts { ty::ItemSubsts { substs: substs.substs.fold_with(this), } } /////////////////////////////////////////////////////////////////////////// // Some sample folders pub struct BottomUpFolder<'a> { pub tcx: &'a ty::ctxt, pub fldop: |ty::t|: 'a -> ty::t, } impl<'a> TypeFolder for BottomUpFolder<'a> { fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx } fn fold_ty(&mut self, ty: ty::t) -> ty::t { let t1 = super_fold_ty(self, ty); (self.fldop)(t1) } } /////////////////////////////////////////////////////////////////////////// // Region folder /// Folds over the substructure of a type, visiting its component /// types and all regions that occur *free* within it. /// /// That is, `ty::t` can contain function or method types that bind /// regions at the call site (`ReLateBound`), and occurrences of /// regions (aka "lifetimes") that are bound within a type are not /// visited by this folder; only regions that occur free will be /// visited by `fld_r`. /// /// (The distinction between "free" and "bound" is represented by /// keeping track of each `FnSig` in the lexical context of the /// current position of the fold.) pub struct RegionFolder<'a> { tcx: &'a ty::ctxt, fld_t: |ty::t|: 'a -> ty::t, fld_r: |ty::Region|: 'a -> ty::Region, within_binder_ids: Vec<ast::NodeId>, } impl<'a> RegionFolder<'a> { pub fn general(tcx: &'a ty::ctxt, fld_r: |ty::Region|: 'a -> ty::Region, fld_t: |ty::t|: 'a -> ty::t) -> RegionFolder<'a> { RegionFolder { tcx: tcx, fld_t: fld_t, fld_r: fld_r, within_binder_ids: vec![], } } pub fn regions(tcx: &'a ty::ctxt, fld_r: |ty::Region|: 'a -> ty::Region) -> RegionFolder<'a> { fn noop(t: ty::t) -> ty::t { t } RegionFolder { tcx: tcx, fld_t: noop, fld_r: fld_r, within_binder_ids: vec![], } } } /// If `ty` has `FnSig` (i.e. closure or fn), return its binder_id; /// else None. fn opt_binder_id_of_function(t: ty::t) -> Option<ast::NodeId> { match ty::get(t).sty { ty::ty_closure(ref f) => Some(f.sig.binder_id), ty::ty_bare_fn(ref f) => Some(f.sig.binder_id), _ => None, } } impl<'a> TypeFolder for RegionFolder<'a> { fn tcx<'a>(&'a self) -> &'a ty::ctxt { self.tcx } fn fold_ty(&mut self, ty: ty::t) -> ty::t { debug!("RegionFolder.fold_ty({})", ty.repr(self.tcx())); let opt_binder_id = opt_binder_id_of_function(ty); match opt_binder_id { Some(binder_id) => self.within_binder_ids.push(binder_id), None => {} } let t1 = super_fold_ty(self, ty); let ret = (self.fld_t)(t1); if opt_binder_id.is_some() { self.within_binder_ids.pop(); } ret } fn fold_region(&mut self, r: ty::Region) -> ty::Region { match r { ty::ReLateBound(binder_id, _) if self.within_binder_ids.contains(&binder_id) => { debug!("RegionFolder.fold_region({}) skipped bound region", r.repr(self.tcx())); r } _ => { debug!("RegionFolder.fold_region({}) folding free region", r.repr(self.tcx())); (self.fld_r)(r) } } } }
30.686703
96
0.539384
f5796fffbb7d702f91abdce088e45cef9d4714a8
2,287
use crate::{ datatypes::DataType, types::{NativeType, NaturalDataType}, error::ArrowError, }; use super::Scalar; /// The implementation of [`Scalar`] for primitive, semantically equivalent to [`Option<T>`] /// with [`DataType`]. #[derive(Debug, Clone)] pub struct PrimitiveScalar<T: NativeType> { // Not Option<T> because this offers a stabler pointer offset on the struct value: T, is_valid: bool, data_type: DataType, } impl<T: NativeType> PartialEq for PrimitiveScalar<T> { fn eq(&self, other: &Self) -> bool { self.data_type == other.data_type && self.is_valid == other.is_valid && ((!self.is_valid) | (self.value == other.value)) } } impl<T: NativeType> PrimitiveScalar<T> { /// Returns a new [`PrimitiveScalar`]. #[inline] pub fn new(data_type: DataType, v: Option<T>) -> Self { if !T::is_valid(&data_type) { Err(ArrowError::InvalidArgumentError(format!( "Type {} does not support logical type {}", std::any::type_name::<T>(), data_type ))) .unwrap() } let is_valid = v.is_some(); Self { value: v.unwrap_or_default(), is_valid, data_type, } } /// Returns the value irrespectively of its validity. #[inline] pub fn value(&self) -> T { self.value } /// Returns a new `PrimitiveScalar` with the same value but different [`DataType`] /// # Panic /// This function panics if the `data_type` is not valid for self's physical type `T`. pub fn to(self, data_type: DataType) -> Self { let v = if self.is_valid { Some(self.value) } else { None }; Self::new(data_type, v) } } impl<T: NativeType + NaturalDataType> From<Option<T>> for PrimitiveScalar<T> { #[inline] fn from(v: Option<T>) -> Self { Self::new(T::DATA_TYPE, v) } } impl<T: NativeType> Scalar for PrimitiveScalar<T> { #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn is_valid(&self) -> bool { self.is_valid } #[inline] fn data_type(&self) -> &DataType { &self.data_type } }
25.696629
92
0.56056
50128330fbc03d2a5159a30d1ad336f1368836eb
12,728
#![cfg(feature = "serde")] extern crate serde; use self::serde::de::{ Deserialize, DeserializeSeed, Deserializer, EnumAccess, Error, Unexpected, VariantAccess, Visitor, }; use self::serde::ser::{Serialize, Serializer}; use {Level, LevelFilter, LOG_LEVEL_NAMES}; use std::fmt; use std::str::{self, FromStr}; // The Deserialize impls are handwritten to be case insensitive using FromStr. impl Serialize for Level { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match *self { Level::Error => serializer.serialize_unit_variant("Level", 0, "ERROR"), Level::Warn => serializer.serialize_unit_variant("Level", 1, "WARN"), Level::Info => serializer.serialize_unit_variant("Level", 2, "INFO"), Level::Debug => serializer.serialize_unit_variant("Level", 3, "DEBUG"), Level::Trace => serializer.serialize_unit_variant("Level", 4, "TRACE"), } } } impl<'de> Deserialize<'de> for Level { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct LevelIdentifier; impl<'de> Visitor<'de> for LevelIdentifier { type Value = Level; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("log level") } fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> where E: Error, { // Case insensitive. FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES[1..])) } fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E> where E: Error, { let variant = str::from_utf8(value) .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; self.visit_str(variant) } fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> where E: Error, { let variant = LOG_LEVEL_NAMES[1..] .get(v as usize) .ok_or_else(|| Error::invalid_value(Unexpected::Unsigned(v), &self))?; self.visit_str(variant) } } impl<'de> DeserializeSeed<'de> for LevelIdentifier { type Value = Level; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_identifier(LevelIdentifier) } } struct LevelEnum; impl<'de> Visitor<'de> for LevelEnum { type Value = Level; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("log level") } fn visit_enum<A>(self, value: A) -> Result<Self::Value, A::Error> where A: EnumAccess<'de>, { let (level, variant) = value.variant_seed(LevelIdentifier)?; // Every variant is a unit variant. variant.unit_variant()?; Ok(level) } } deserializer.deserialize_enum("Level", &LOG_LEVEL_NAMES[1..], LevelEnum) } } impl Serialize for LevelFilter { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match *self { LevelFilter::Off => serializer.serialize_unit_variant("LevelFilter", 0, "OFF"), LevelFilter::Error => serializer.serialize_unit_variant("LevelFilter", 1, "ERROR"), LevelFilter::Warn => serializer.serialize_unit_variant("LevelFilter", 2, "WARN"), LevelFilter::Info => serializer.serialize_unit_variant("LevelFilter", 3, "INFO"), LevelFilter::Debug => serializer.serialize_unit_variant("LevelFilter", 4, "DEBUG"), LevelFilter::Trace => serializer.serialize_unit_variant("LevelFilter", 5, "TRACE"), } } } impl<'de> Deserialize<'de> for LevelFilter { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { struct LevelFilterIdentifier; impl<'de> Visitor<'de> for LevelFilterIdentifier { type Value = LevelFilter; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("log level filter") } fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> where E: Error, { // Case insensitive. FromStr::from_str(s).map_err(|_| Error::unknown_variant(s, &LOG_LEVEL_NAMES)) } fn visit_bytes<E>(self, value: &[u8]) -> Result<Self::Value, E> where E: Error, { let variant = str::from_utf8(value) .map_err(|_| Error::invalid_value(Unexpected::Bytes(value), &self))?; self.visit_str(variant) } fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> where E: Error, { let variant = LOG_LEVEL_NAMES .get(v as usize) .ok_or_else(|| Error::invalid_value(Unexpected::Unsigned(v), &self))?; self.visit_str(variant) } } impl<'de> DeserializeSeed<'de> for LevelFilterIdentifier { type Value = LevelFilter; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_identifier(LevelFilterIdentifier) } } struct LevelFilterEnum; impl<'de> Visitor<'de> for LevelFilterEnum { type Value = LevelFilter; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("log level filter") } fn visit_enum<A>(self, value: A) -> Result<Self::Value, A::Error> where A: EnumAccess<'de>, { let (level_filter, variant) = value.variant_seed(LevelFilterIdentifier)?; // Every variant is a unit variant. variant.unit_variant()?; Ok(level_filter) } } deserializer.deserialize_enum("LevelFilter", &LOG_LEVEL_NAMES, LevelFilterEnum) } } #[cfg(test)] mod tests { extern crate serde_test; use self::serde_test::{assert_de_tokens, assert_de_tokens_error, assert_tokens, Token}; use {Level, LevelFilter}; fn level_token(variant: &'static str) -> Token { Token::UnitVariant { name: "Level", variant: variant, } } fn level_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { [ Token::Enum { name: "Level" }, Token::Bytes(variant), Token::Unit, ] } fn level_variant_tokens(variant: u32) -> [Token; 3] { [ Token::Enum { name: "Level" }, Token::U32(variant), Token::Unit, ] } fn level_filter_token(variant: &'static str) -> Token { Token::UnitVariant { name: "LevelFilter", variant: variant, } } fn level_filter_bytes_tokens(variant: &'static [u8]) -> [Token; 3] { [ Token::Enum { name: "LevelFilter", }, Token::Bytes(variant), Token::Unit, ] } fn level_filter_variant_tokens(variant: u32) -> [Token; 3] { [ Token::Enum { name: "LevelFilter", }, Token::U32(variant), Token::Unit, ] } #[test] fn test_level_ser_de() { let cases = [ (Level::Error, [level_token("ERROR")]), (Level::Warn, [level_token("WARN")]), (Level::Info, [level_token("INFO")]), (Level::Debug, [level_token("DEBUG")]), (Level::Trace, [level_token("TRACE")]), ]; for &(s, expected) in &cases { assert_tokens(&s, &expected); } } #[test] fn test_level_case_insensitive() { let cases = [ (Level::Error, [level_token("error")]), (Level::Warn, [level_token("warn")]), (Level::Info, [level_token("info")]), (Level::Debug, [level_token("debug")]), (Level::Trace, [level_token("trace")]), ]; for &(s, expected) in &cases { assert_de_tokens(&s, &expected); } } #[test] fn test_level_de_bytes() { let cases = [ (Level::Error, level_bytes_tokens(b"ERROR")), (Level::Warn, level_bytes_tokens(b"WARN")), (Level::Info, level_bytes_tokens(b"INFO")), (Level::Debug, level_bytes_tokens(b"DEBUG")), (Level::Trace, level_bytes_tokens(b"TRACE")), ]; for &(value, tokens) in &cases { assert_de_tokens(&value, &tokens); } } #[test] fn test_level_de_variant_index() { let cases = [ (Level::Error, level_variant_tokens(0)), (Level::Warn, level_variant_tokens(1)), (Level::Info, level_variant_tokens(2)), (Level::Debug, level_variant_tokens(3)), (Level::Trace, level_variant_tokens(4)), ]; for &(value, tokens) in &cases { assert_de_tokens(&value, &tokens); } } #[test] fn test_level_de_error() { let msg = "unknown variant `errorx`, expected one of \ `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; assert_de_tokens_error::<Level>(&[level_token("errorx")], msg); } #[test] fn test_level_filter_ser_de() { let cases = [ (LevelFilter::Off, [level_filter_token("OFF")]), (LevelFilter::Error, [level_filter_token("ERROR")]), (LevelFilter::Warn, [level_filter_token("WARN")]), (LevelFilter::Info, [level_filter_token("INFO")]), (LevelFilter::Debug, [level_filter_token("DEBUG")]), (LevelFilter::Trace, [level_filter_token("TRACE")]), ]; for &(s, expected) in &cases { assert_tokens(&s, &expected); } } #[test] fn test_level_filter_case_insensitive() { let cases = [ (LevelFilter::Off, [level_filter_token("off")]), (LevelFilter::Error, [level_filter_token("error")]), (LevelFilter::Warn, [level_filter_token("warn")]), (LevelFilter::Info, [level_filter_token("info")]), (LevelFilter::Debug, [level_filter_token("debug")]), (LevelFilter::Trace, [level_filter_token("trace")]), ]; for &(s, expected) in &cases { assert_de_tokens(&s, &expected); } } #[test] fn test_level_filter_de_bytes() { let cases = [ (LevelFilter::Off, level_filter_bytes_tokens(b"OFF")), (LevelFilter::Error, level_filter_bytes_tokens(b"ERROR")), (LevelFilter::Warn, level_filter_bytes_tokens(b"WARN")), (LevelFilter::Info, level_filter_bytes_tokens(b"INFO")), (LevelFilter::Debug, level_filter_bytes_tokens(b"DEBUG")), (LevelFilter::Trace, level_filter_bytes_tokens(b"TRACE")), ]; for &(value, tokens) in &cases { assert_de_tokens(&value, &tokens); } } #[test] fn test_level_filter_de_variant_index() { let cases = [ (LevelFilter::Off, level_filter_variant_tokens(0)), (LevelFilter::Error, level_filter_variant_tokens(1)), (LevelFilter::Warn, level_filter_variant_tokens(2)), (LevelFilter::Info, level_filter_variant_tokens(3)), (LevelFilter::Debug, level_filter_variant_tokens(4)), (LevelFilter::Trace, level_filter_variant_tokens(5)), ]; for &(value, tokens) in &cases { assert_de_tokens(&value, &tokens); } } #[test] fn test_level_filter_de_error() { let msg = "unknown variant `errorx`, expected one of \ `OFF`, `ERROR`, `WARN`, `INFO`, `DEBUG`, `TRACE`"; assert_de_tokens_error::<LevelFilter>(&[level_filter_token("errorx")], msg); } }
31.740648
98
0.530798
ed4e56df59a6590d24dd7cb1674a36e2ab0058ee
376
use std::path; fn main() { let wally_dir_path = path::PathBuf::from("./bld/lib"); let wally_dir_str = wally_dir_path.as_path().to_str().expect("Please build wally first (in ./bld)"); println!("cargo:rustc-link-search=native={}", wally_dir_str); println!("cargo:rustc-link-lib=dylib=wallycore"); println!("cargo:rustc-link-lib=dylib=secp256k1"); }
34.181818
88
0.667553
18dc9e196d435d285d45a4e2e2053abc40845039
4,271
use cmd; use channel; use msg::RawMessage; use util; use server::{Server}; use con::Peer; /// Handles the quit/part event pub fn do_quit_leave(channel: &mut channel::Channel, client: Peer, command: cmd::Command, reason: Option<Vec<u8>>) { let nick = channel.member_with_id(client.id()).map(|v| v.nick().to_string()); match nick { Some(nick) => { let msg = { let payload = match reason { None => vec![channel.name().as_bytes()], Some(ref reason) => vec![channel.name().as_bytes(), reason.as_slice()], }; RawMessage::new_raw( command, payload.as_slice(), Some(nick.as_bytes()) ) }; channel.broadcast(msg); channel.remove_member(&client.id()); }, // This error message makes only sense for the part command None if command == cmd::PART => channel.send_response( &client, cmd::ERR_NOTONCHANNEL, &[channel.name(), "You are not on this channel."] ), _ => {} } } /// Handles the PART command pub struct Part { raw: RawMessage, channels: Vec<String>, reason: Option<Vec<u8>> } impl super::MessageHandler for Part { fn from_message(message: RawMessage) -> Result<Box<Part>, Option<RawMessage>> { let params = message.params(); let mut channels = Vec::new(); if params.len() > 0 { for channel_name in params[0].as_slice().split(|c| *c == b',') { match util::verify_channel(channel_name) { Some(channel) => { channels.push(channel.to_string()); }, None => return Err(Some(RawMessage::new(cmd::REPLY(cmd::ERR_NOSUCHCHANNEL), &[ "*", String::from_utf8_lossy(channel_name).as_slice(), "Invalid channel name." ], None))) } } Ok(box Part { raw: message.clone(), channels: channels, reason: params.as_slice().get(1).map(|v| v.to_vec()) }) } else { Err(Some(RawMessage::new(cmd::REPLY(cmd::ERR_NEEDMOREPARAMS), &[ "*", message.command().to_string().as_slice(), "no params given" ], None))) } } fn invoke(&self, server: &mut Server, origin: Peer) { let host = server.host().to_string(); // clone due to #6393 for channel_name in self.channels.iter() { match server.channels.get_mut(channel_name) { Some(channel) => { let reason = self.reason.clone(); let proxy = origin.clone(); channel.send(channel::HandleMut(proc(channel) { do_quit_leave(channel, proxy, cmd::PART, reason) })) }, None => origin.send_response(cmd::ERR_NOSUCHCHANNEL, &[channel_name.as_slice(), "No such channel"], host.as_slice() ) } } } fn raw_message(&self) -> &RawMessage { &self.raw } } /// Handles the QUIT command pub struct Quit { raw: RawMessage, reason: Option<Vec<u8>> } impl super::MessageHandler for Quit { fn from_message(message: RawMessage) -> Result<Box<Quit>, Option<RawMessage>> { let reason = message.params().as_slice().get(0).map( |&v| v.to_vec()); Ok(box Quit { raw: message, reason: reason }) } fn invoke(&self, server: &mut Server, origin: Peer) { server.close_connection(&origin); for (_, channel) in server.channels.iter() { // TODO make this more performant, cache channels in user? let reason = self.reason.clone(); let proxy = origin.clone(); channel.send(channel::HandleMut(proc(channel) { do_quit_leave(channel, proxy, cmd::QUIT, reason) })) } } fn raw_message(&self) -> &RawMessage { &self.raw } }
35.008197
98
0.504332
f4454963d45a8769bdea6bb4e28e171e051ea6e4
635
extern crate bio; use std::str; use bio::dna::Dna; /// Code Challenge: Solve the String Composition Problem. /// Input: An integer k and a string Text. /// Output: Compositionk(Text) (the k-mers can be provided in any order). fn main() { let mut s_dna = String::new(); let mut s_k = String::new(); bio::io::read_line(&mut s_k); bio::io::read_line(&mut s_dna); let k = s_k.parse::<usize>().unwrap(); let dna = Dna::from_string(s_dna); let mut res = dna.windows(k) .map(|x| unsafe { str::from_utf8_unchecked(x) }) .collect::<Vec<_>>(); res.sort(); bio::io::println_vec(&res); }
23.518519
73
0.607874
f5530c027d9288f78f3d4a6b3b382fc0bd115317
972
use std::future::Future; use std::sync::RwLock; use once_cell::sync::Lazy; use tokio::runtime::Runtime; use tokio::task::JoinHandle; use tracing::trace; pub static RUNTIME: Lazy<RwLock<Option<Runtime>>> = Lazy::new(|| { trace!("build tokio runtime"); RwLock::new(Some( tokio::runtime::Builder::new() .threaded_scheduler() .thread_name("toda") .enable_all() .build() .unwrap(), )) }); pub fn spawn<F>(future: F) -> JoinHandle<F::Output> where F: Future + Send + 'static, F::Output: Send + 'static, { if let Some(runtime) = &*RUNTIME.read().unwrap() { return runtime.spawn(future); } unreachable!() } pub fn spawn_blocking<F, R>(func: F) -> JoinHandle<R> where R: Send + 'static, F: FnOnce() -> R + Send + 'static, { if let Some(runtime) = &*RUNTIME.read().unwrap() { return runtime.handle().spawn_blocking(func); } unreachable!() }
22.604651
66
0.580247
91b0e011bd1e5cc3f46811f6a6b91d0abac9cbd9
1,223
pub use wasmtime_rust_macro::wasmtime; // modules used by the macro #[doc(hidden)] pub mod __rt { pub use anyhow; pub use wasmtime; pub use wasmtime_wasi; use std::convert::{TryFrom, TryInto}; pub trait FromVecValue: Sized { fn from(list: Vec<wasmtime::Val>) -> anyhow::Result<Self>; } macro_rules! tuple { ($(($($a:ident),*),)*) => ($( impl<$($a: TryFrom<wasmtime::Val>),*> FromVecValue for ($($a,)*) where $(anyhow::Error: From<$a::Error>,)* { #[allow(non_snake_case)] fn from(list: Vec<wasmtime::Val>) -> anyhow::Result<Self> { let mut iter = list.into_iter(); $( let $a = iter.next() .ok_or_else(|| anyhow::format_err!("not enough values"))? .try_into()?; )* if iter.next().is_some() { anyhow::bail!("too many return values"); } Ok(($($a,)*)) } } )*) } tuple! { (), (A), (A, B), (A, B, C), } }
27.177778
85
0.408831
2f18a914b823880476e0d3a14857b02fa54db3fa
4,096
#![allow(non_snake_case, non_upper_case_globals)] #![allow(non_camel_case_types)] //! Advanced-timers //! //! Used by: stm32f302, stm32f303 #[cfg(not(feature = "nosync"))] pub use crate::stm32f3::peripherals::tim8::Instance; pub use crate::stm32f3::peripherals::tim8::{ CCMR3_Output, ARR, BDTR, CCER, CCMR1, CCMR2, CCR1, CCR2, CCR3, CCR4, CCR5, CCR6, CNT, CR1, CR2, DCR, DIER, DMAR, EGR, OR, PSC, RCR, SMCR, SR, }; pub use crate::stm32f3::peripherals::tim8::{RegisterBlock, ResetValues}; /// Access functions for the TIM8 peripheral instance pub mod TIM8 { use super::ResetValues; #[cfg(not(feature = "nosync"))] use super::Instance; #[cfg(not(feature = "nosync"))] const INSTANCE: Instance = Instance { addr: 0x40013400, _marker: ::core::marker::PhantomData, }; /// Reset values for each field in TIM8 pub const reset: ResetValues = ResetValues { CR1: 0x00000000, CR2: 0x00000000, SMCR: 0x00000000, DIER: 0x00000000, SR: 0x00000000, EGR: 0x00000000, CCMR1: 0x00000000, CCMR2: 0x00000000, CCER: 0x00000000, CNT: 0x00000000, PSC: 0x00000000, ARR: 0x00000000, RCR: 0x00000000, CCR1: 0x00000000, CCR2: 0x00000000, CCR3: 0x00000000, CCR4: 0x00000000, BDTR: 0x00000000, DCR: 0x00000000, DMAR: 0x00000000, CCMR3_Output: 0x00000000, CCR5: 0x00000000, CCR6: 0x00000000, OR: 0x00000000, }; #[cfg(not(feature = "nosync"))] #[allow(renamed_and_removed_lints)] #[allow(private_no_mangle_statics)] #[no_mangle] static mut TIM8_TAKEN: bool = false; /// Safe access to TIM8 /// /// This function returns `Some(Instance)` if this instance is not /// currently taken, and `None` if it is. This ensures that if you /// do get `Some(Instance)`, you are ensured unique access to /// the peripheral and there cannot be data races (unless other /// code uses `unsafe`, of course). You can then pass the /// `Instance` around to other functions as required. When you're /// done with it, you can call `release(instance)` to return it. /// /// `Instance` itself dereferences to a `RegisterBlock`, which /// provides access to the peripheral's registers. #[cfg(not(feature = "nosync"))] #[inline] pub fn take() -> Option<Instance> { external_cortex_m::interrupt::free(|_| unsafe { if TIM8_TAKEN { None } else { TIM8_TAKEN = true; Some(INSTANCE) } }) } /// Release exclusive access to TIM8 /// /// This function allows you to return an `Instance` so that it /// is available to `take()` again. This function will panic if /// you return a different `Instance` or if this instance is not /// already taken. #[cfg(not(feature = "nosync"))] #[inline] pub fn release(inst: Instance) { external_cortex_m::interrupt::free(|_| unsafe { if TIM8_TAKEN && inst.addr == INSTANCE.addr { TIM8_TAKEN = false; } else { panic!("Released a peripheral which was not taken"); } }); } /// Unsafely steal TIM8 /// /// This function is similar to take() but forcibly takes the /// Instance, marking it as taken irregardless of its previous /// state. #[cfg(not(feature = "nosync"))] #[inline] pub unsafe fn steal() -> Instance { TIM8_TAKEN = true; INSTANCE } } /// Raw pointer to TIM8 /// /// Dereferencing this is unsafe because you are not ensured unique /// access to the peripheral, so you may encounter data races with /// other users of this peripheral. It is up to you to ensure you /// will not cause data races. /// /// This constant is provided for ease of use in unsafe code: you can /// simply call for example `write_reg!(gpio, GPIOA, ODR, 1);`. pub const TIM8: *const RegisterBlock = 0x40013400 as *const _;
32
99
0.608398
223d932258b3e58722c5dc1eae42b6d60c0f4198
334
fn main() { print_the_pattern(10); } fn print_the_pattern(n: i32) { let mut s = String::new(); for i in (1..n + 1).rev() { for j in (1..n + 1).rev() { for _ in 0..i { s.push_str(j.to_string().as_str()); } } s.push_str("\n"); } print!("{}", s); }
16.7
51
0.41018
28fdb9947c9b5c66b4c54f0f2b6a14ef62bce308
4,565
use criterion::*; use cryptography_core::cdd_claim::{compute_cdd_id, CddClaimData}; use cryptography_core::curve25519_dalek::scalar::Scalar; use private_identity_audit::{ uuid_to_scalar, verifier::gen_random_uuids, CommittedSetGenerator, PrivateUids, ProofGenerator, ProofVerifier, Prover, Verifier, VerifierSetGenerator, }; use rand::{rngs::StdRng, SeedableRng}; use rand_core::RngCore; use uuid::Uuid; const UID_SET_MAX: usize = 100_000; fn setup(rng: &mut StdRng) -> (Vec<CddClaimData>, Vec<Scalar>) { let batch_size = 100; // Private input of the Verifier. let private_uid_set: Vec<Uuid> = gen_random_uuids(UID_SET_MAX, rng); // Make a random did for the investor. let mut investor_dids = vec![]; for i in 0..batch_size { investor_dids.push([0u8; 32]); rng.fill_bytes(&mut investor_dids[i]); } // Verifier shares one of its uids with the Prover. ( investor_dids .iter() .map(|did| CddClaimData::new(did, private_uid_set[0].as_bytes())) .collect::<Vec<_>>(), private_uid_set.into_iter().map(uuid_to_scalar).collect(), ) } fn bench_proof_generation(c: &mut Criterion) { let mut group = c.benchmark_group("Proof Generation"); let mut rng = StdRng::from_seed([20u8; 32]); let (claims, private_uid_scalar_set) = setup(&mut rng); // V -> P: Prover sends `proofs` and Verifier returns a list of 10 uids and the challenge. let (_, committed_uids) = VerifierSetGenerator::generate_committed_set( PrivateUids(private_uid_scalar_set), Some(UID_SET_MAX), &mut rng, ) .unwrap(); group.bench_function("Parallel", |b| { b.iter(|| { // On a machine with 20 cpus, the following should create 20 threads since the max // thread count is the number of cpus. let _ = Prover::generate_proofs(&claims, &committed_uids, &mut rng).unwrap(); }) }); group.bench_function("Sequential", |b| { b.iter(|| { let _ = Prover::generate_proofs(&claims, &committed_uids, &mut rng).unwrap(); }) }); group.finish(); } fn bench_verify_proofs(c: &mut Criterion) { let mut group = c.benchmark_group("Proof Verification"); let mut rng = StdRng::from_seed([20u8; 32]); let (claims, private_uid_scalar_set) = setup(&mut rng); // Prover generates cdd_id and places it on the chain. let cdd_ids = claims .iter() .map(|claim| compute_cdd_id(claim)) .collect::<Vec<_>>(); // V -> P: Prover sends `proofs` and Verifier returns a list of 10 uids and the challenge. let (verifier_secrets, committed_uids) = VerifierSetGenerator::generate_committed_set( PrivateUids(private_uid_scalar_set), Some(UID_SET_MAX), &mut rng, ) .unwrap(); // On a machine with 20 cpus, the following should create 20 threads since the max // thread count is the number of cpus. let (initial_message, final_response, re_committed_uids) = Prover::generate_proofs(&claims, &committed_uids, &mut rng).unwrap(); group.bench_function("Parallel", |b| { b.iter(|| { // On a machine with 20 cpus, the following should create 20 threads since the max // thread count is the number of cpus. Verifier::verify_proofs( &initial_message, &final_response, &cdd_ids, &verifier_secrets, &re_committed_uids, ) .iter() .for_each(|res| assert!(res.is_ok())); }) }); group.bench_function("Sequential", |b| { b.iter(|| { // On a machine with 20 cpus, the following should create 20 threads since the max // thread count is the number of cpus. Verifier::verify_proofs( &initial_message, &final_response, &cdd_ids, &verifier_secrets, &re_committed_uids, ) .iter() .for_each(|res| assert!(res.is_ok())); }) }); group.finish(); } criterion_group! { name = provider_proof_gen; // Lower the sample size to run faster; larger shuffle sizes are // long so we're not microbenchmarking anyways. // 10 is the minimum allowed sample size in Criterion. config = Criterion::default() .sample_size(10); targets = bench_proof_generation, bench_verify_proofs } criterion_main!(provider_proof_gen);
33.566176
99
0.618839
3357f252647915896baf296192a0ea5c3ff14580
930
pub const PAGESIZE: usize = 4096; pub const PAGESHIFT: usize = 12; pub const KERNELBASE: usize = 0x40000000; pub const MEMSIZE: usize = 1 << 30; // 1GB pub const PHYEND: usize = KERNELBASE + MEMSIZE; pub const UARTBASE: usize = 0x09000000; pub const UARTSIZE: usize = 0x00001000; pub const GICDBASE: usize = 0x08000000; pub const GICDSIZE: usize = 0x00010000; pub const GICCBASE: usize = 0x08010000; pub const GICCSIZE: usize = 0x00010000; pub const VIRTMMIOBASE: usize = 0x0a000000; pub const VIRTMMIOSIZE: usize = 0x00004000; pub fn round_up_with(v: usize, s: usize) -> usize { assert!(s & (s - 1) == 0); (v + s - 1) & !(s - 1) } pub fn round_down_with(v: usize, s: usize) -> usize { assert!(s & (s - 1) == 0); v & !(s - 1) } pub fn round_up(addr: usize) -> usize { round_up_with(addr, PAGESIZE) } pub fn round_down(addr: usize) -> usize { round_down_with(addr, PAGESIZE) }
25.833333
53
0.653763
6a3c34a5197c371266414b46f1dae02cebef30df
1,441
//! Joined locking //! use crate::{Lock, LockToken}; pub fn lock2<'token, A, B>(_: &'token mut LockToken, a: A, b: B) -> (A::Output, B::Output) where A: Lock<'token> + 'token, B: Lock<'token> + 'token, { unsafe { let lock_a = a.lock_info(); let lock_b = b.lock_info(); let mut locks = [&lock_a, &lock_b]; if locks[0].id > locks[1].id { let tmp = locks[0]; locks[0] = locks[1]; locks[1] = tmp; } locks[0].guard.lock(); locks[1].guard.lock(); (a.lock_unchecked(), b.lock_unchecked()) } } // TODO: add more `joinN` functions // TODO: add macro #[cfg(test)] mod tests { use super::*; use crate::*; #[test] fn test_lock2() { let mut group = LockGroup::new(); let mut token = group.token(); let mutex_a = group.mutex(42); let mutex_b = group.mutex(35); let (a, mut b) = lock2(&mut token, mutex_a.read(), mutex_b.write()); assert_eq!(*a, 42); assert_eq!(*b, 35); *b = 15; } #[test] fn test_lock2_rev() { let mut group = LockGroup::new(); let mut token = group.token(); let mutex_b = group.mutex(35); let mutex_a = group.mutex(42); let (a, mut b) = lock2(&mut token, mutex_a.read(), mutex_b.write()); assert_eq!(*a, 42); assert_eq!(*b, 35); *b = 15; } }
20.295775
90
0.501041
87cb749d8af5fbb5aa04b5056a09b1c8fbc14da8
2,138
use glob::glob; use std::{env, path::PathBuf}; const VERSION: &str = env!("CARGO_PKG_VERSION"); fn main() { // Forcibly disable backtraces. env::remove_var("RUST_LIB_BACKTRACE"); env::remove_var("RUST_BACKTRACE"); let mut app = clap::App::new("trash"); app = app.version(VERSION); app = app.about("Move files and folders to the trash"); app = app.arg( clap::Arg::with_name("files") .takes_value(true) .multiple(true) .help("Files to process") .required(true), ); // Ignore all flags of `rm` program. for flag in &["r", "f", "i", "d", "P", "R", "v", "W"] { app = app.arg( clap::Arg::with_name(flag) .takes_value(false) .multiple(false) .short(flag) .hidden(true) .required(false), ); } let globs: Vec<String> = app .get_matches() .values_of("files") .map_or_else(Vec::new, |v| { v.map(::std::convert::From::from) .map(|glob: String| { if glob == "." || glob == ".." || glob == "./" || glob == "../" || glob.ends_with("/.") || glob.ends_with("/..") || glob.ends_with("/./") || glob.ends_with("/../") { eprintln!(r#"Trash: "." and ".." may not be moved to the trash"#); std::process::exit(exitcode::USAGE); } else if glob == "/" { eprintln!(r#"Trash: "/" may not be moved to the trash"#); std::process::exit(exitcode::USAGE); } else { glob } }) .collect::<Vec<String>>() }); for pattern in globs { let paths: Vec<PathBuf> = glob(&pattern).unwrap().filter_map(Result::ok).collect(); trash::delete_all(paths).unwrap(); } }
33.40625
91
0.418148
d7d962c07d6ded530ebc93efdbffab04b27338b0
1,384
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // ignore-android: FIXME(#10381) // compile-flags:-g // gdb-command:rbreak zzz // gdb-command:run // STRUCT // gdb-command:finish // gdb-command:print arg1 // gdb-check:$1 = 1 // gdb-command:print arg2 // gdb-check:$2 = 2 // gdb-command:continue // ENUM // gdb-command:finish // gdb-command:print arg1 // gdb-check:$3 = -3 // gdb-command:print arg2 // gdb-check:$4 = 4.5 // gdb-command:print arg3 // gdb-check:$5 = 5 // gdb-command:continue #![feature(struct_variant)] struct Struct { x: int } impl Struct { fn static_method(arg1: int, arg2: int) -> int { zzz(); arg1 + arg2 } } enum Enum { Variant1 { x: int }, Variant2, Variant3(f64, int, char), } impl Enum { fn static_method(arg1: int, arg2: f64, arg3: uint) -> int { zzz(); arg1 } } fn main() { Struct::static_method(1, 2); Enum::static_method(-3, 4.5, 5); } fn zzz() {()}
20.057971
69
0.641618
0e0b0b518d587efc815ddadf375e76588cc3a349
3,103
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /* A parallel version of fibonacci numbers. This version is meant mostly as a way of stressing and benchmarking the task system. It supports a lot of old command-line arguments to control how it runs. */ extern crate getopts; extern crate time; use std::os; use std::result::{Ok, Err}; use std::task; use std::uint; fn fib(n: int) -> int { fn pfib(tx: &Sender<int>, n: int) { if n == 0 { tx.send(0); } else if n <= 2 { tx.send(1); } else { let (tx1, rx) = channel(); let tx2 = tx1.clone(); task::spawn(proc() pfib(&tx2, n - 1)); let tx2 = tx1.clone(); task::spawn(proc() pfib(&tx2, n - 2)); tx.send(rx.recv() + rx.recv()); } } let (tx, rx) = channel(); spawn(proc() pfib(&tx, n) ); rx.recv() } struct Config { stress: bool } fn parse_opts(argv: Vec<String> ) -> Config { let opts = vec!(getopts::optflag("", "stress", "")); let argv = argv.iter().map(|x| x.to_string()).collect::<Vec<_>>(); let opt_args = argv.slice(1, argv.len()); match getopts::getopts(opt_args, opts.as_slice()) { Ok(ref m) => { return Config {stress: m.opt_present("stress")} } Err(_) => { fail!(); } } } fn stress_task(id: int) { let mut i = 0i; loop { let n = 15i; assert_eq!(fib(n), fib(n)); i += 1; println!("{}: Completed {} iterations", id, i); } } fn stress(num_tasks: int) { let mut results = Vec::new(); for i in range(0, num_tasks) { results.push(task::try_future(proc() { stress_task(i); })); } for r in results.move_iter() { r.unwrap(); } } fn main() { let args = os::args(); let args = if os::getenv("RUST_BENCH").is_some() { vec!("".to_string(), "20".to_string()) } else if args.len() <= 1u { vec!("".to_string(), "8".to_string()) } else { args.move_iter().map(|x| x.to_string()).collect() }; let opts = parse_opts(args.clone()); if opts.stress { stress(2); } else { let max = uint::parse_bytes(args.get(1).as_bytes(), 10u).unwrap() as int; let num_trials = 10; for n in range(1, max + 1) { for _ in range(0u, num_trials) { let start = time::precise_time_ns(); let fibn = fib(n); let stop = time::precise_time_ns(); let elapsed = stop - start; println!("{}\t{}\t{}", n, fibn, elapsed.to_string()); } } } }
25.227642
76
0.536255
18499dfd31f57595bf942c3f0e8ddaec32121602
49,973
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::F24R2 { #[doc = r" Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline(always)] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct FB0R { bits: bool, } impl FB0R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB1R { bits: bool, } impl FB1R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB2R { bits: bool, } impl FB2R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB3R { bits: bool, } impl FB3R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB4R { bits: bool, } impl FB4R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB5R { bits: bool, } impl FB5R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB6R { bits: bool, } impl FB6R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB7R { bits: bool, } impl FB7R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB8R { bits: bool, } impl FB8R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB9R { bits: bool, } impl FB9R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB10R { bits: bool, } impl FB10R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB11R { bits: bool, } impl FB11R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB12R { bits: bool, } impl FB12R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB13R { bits: bool, } impl FB13R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB14R { bits: bool, } impl FB14R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB15R { bits: bool, } impl FB15R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB16R { bits: bool, } impl FB16R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB17R { bits: bool, } impl FB17R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB18R { bits: bool, } impl FB18R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB19R { bits: bool, } impl FB19R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB20R { bits: bool, } impl FB20R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB21R { bits: bool, } impl FB21R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB22R { bits: bool, } impl FB22R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB23R { bits: bool, } impl FB23R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB24R { bits: bool, } impl FB24R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB25R { bits: bool, } impl FB25R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB26R { bits: bool, } impl FB26R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB27R { bits: bool, } impl FB27R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB28R { bits: bool, } impl FB28R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB29R { bits: bool, } impl FB29R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB30R { bits: bool, } impl FB30R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FB31R { bits: bool, } impl FB31R { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline(always)] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline(always)] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Proxy"] pub struct _FB0W<'a> { w: &'a mut W, } impl<'a> _FB0W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB1W<'a> { w: &'a mut W, } impl<'a> _FB1W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB2W<'a> { w: &'a mut W, } impl<'a> _FB2W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB3W<'a> { w: &'a mut W, } impl<'a> _FB3W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB4W<'a> { w: &'a mut W, } impl<'a> _FB4W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB5W<'a> { w: &'a mut W, } impl<'a> _FB5W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB6W<'a> { w: &'a mut W, } impl<'a> _FB6W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB7W<'a> { w: &'a mut W, } impl<'a> _FB7W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB8W<'a> { w: &'a mut W, } impl<'a> _FB8W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB9W<'a> { w: &'a mut W, } impl<'a> _FB9W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB10W<'a> { w: &'a mut W, } impl<'a> _FB10W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB11W<'a> { w: &'a mut W, } impl<'a> _FB11W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 11; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB12W<'a> { w: &'a mut W, } impl<'a> _FB12W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB13W<'a> { w: &'a mut W, } impl<'a> _FB13W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 13; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB14W<'a> { w: &'a mut W, } impl<'a> _FB14W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 14; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB15W<'a> { w: &'a mut W, } impl<'a> _FB15W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 15; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB16W<'a> { w: &'a mut W, } impl<'a> _FB16W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB17W<'a> { w: &'a mut W, } impl<'a> _FB17W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 17; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB18W<'a> { w: &'a mut W, } impl<'a> _FB18W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 18; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB19W<'a> { w: &'a mut W, } impl<'a> _FB19W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 19; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB20W<'a> { w: &'a mut W, } impl<'a> _FB20W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB21W<'a> { w: &'a mut W, } impl<'a> _FB21W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 21; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB22W<'a> { w: &'a mut W, } impl<'a> _FB22W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 22; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB23W<'a> { w: &'a mut W, } impl<'a> _FB23W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 23; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB24W<'a> { w: &'a mut W, } impl<'a> _FB24W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 24; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB25W<'a> { w: &'a mut W, } impl<'a> _FB25W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 25; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB26W<'a> { w: &'a mut W, } impl<'a> _FB26W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 26; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB27W<'a> { w: &'a mut W, } impl<'a> _FB27W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 27; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB28W<'a> { w: &'a mut W, } impl<'a> _FB28W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB29W<'a> { w: &'a mut W, } impl<'a> _FB29W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 29; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB30W<'a> { w: &'a mut W, } impl<'a> _FB30W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 30; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _FB31W<'a> { w: &'a mut W, } impl<'a> _FB31W<'a> { #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 31; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - Filter bits"] #[inline(always)] pub fn fb0(&self) -> FB0R { let bits = { const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB0R { bits } } #[doc = "Bit 1 - Filter bits"] #[inline(always)] pub fn fb1(&self) -> FB1R { let bits = { const MASK: bool = true; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB1R { bits } } #[doc = "Bit 2 - Filter bits"] #[inline(always)] pub fn fb2(&self) -> FB2R { let bits = { const MASK: bool = true; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB2R { bits } } #[doc = "Bit 3 - Filter bits"] #[inline(always)] pub fn fb3(&self) -> FB3R { let bits = { const MASK: bool = true; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB3R { bits } } #[doc = "Bit 4 - Filter bits"] #[inline(always)] pub fn fb4(&self) -> FB4R { let bits = { const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB4R { bits } } #[doc = "Bit 5 - Filter bits"] #[inline(always)] pub fn fb5(&self) -> FB5R { let bits = { const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB5R { bits } } #[doc = "Bit 6 - Filter bits"] #[inline(always)] pub fn fb6(&self) -> FB6R { let bits = { const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB6R { bits } } #[doc = "Bit 7 - Filter bits"] #[inline(always)] pub fn fb7(&self) -> FB7R { let bits = { const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB7R { bits } } #[doc = "Bit 8 - Filter bits"] #[inline(always)] pub fn fb8(&self) -> FB8R { let bits = { const MASK: bool = true; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB8R { bits } } #[doc = "Bit 9 - Filter bits"] #[inline(always)] pub fn fb9(&self) -> FB9R { let bits = { const MASK: bool = true; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB9R { bits } } #[doc = "Bit 10 - Filter bits"] #[inline(always)] pub fn fb10(&self) -> FB10R { let bits = { const MASK: bool = true; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB10R { bits } } #[doc = "Bit 11 - Filter bits"] #[inline(always)] pub fn fb11(&self) -> FB11R { let bits = { const MASK: bool = true; const OFFSET: u8 = 11; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB11R { bits } } #[doc = "Bit 12 - Filter bits"] #[inline(always)] pub fn fb12(&self) -> FB12R { let bits = { const MASK: bool = true; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB12R { bits } } #[doc = "Bit 13 - Filter bits"] #[inline(always)] pub fn fb13(&self) -> FB13R { let bits = { const MASK: bool = true; const OFFSET: u8 = 13; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB13R { bits } } #[doc = "Bit 14 - Filter bits"] #[inline(always)] pub fn fb14(&self) -> FB14R { let bits = { const MASK: bool = true; const OFFSET: u8 = 14; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB14R { bits } } #[doc = "Bit 15 - Filter bits"] #[inline(always)] pub fn fb15(&self) -> FB15R { let bits = { const MASK: bool = true; const OFFSET: u8 = 15; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB15R { bits } } #[doc = "Bit 16 - Filter bits"] #[inline(always)] pub fn fb16(&self) -> FB16R { let bits = { const MASK: bool = true; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB16R { bits } } #[doc = "Bit 17 - Filter bits"] #[inline(always)] pub fn fb17(&self) -> FB17R { let bits = { const MASK: bool = true; const OFFSET: u8 = 17; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB17R { bits } } #[doc = "Bit 18 - Filter bits"] #[inline(always)] pub fn fb18(&self) -> FB18R { let bits = { const MASK: bool = true; const OFFSET: u8 = 18; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB18R { bits } } #[doc = "Bit 19 - Filter bits"] #[inline(always)] pub fn fb19(&self) -> FB19R { let bits = { const MASK: bool = true; const OFFSET: u8 = 19; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB19R { bits } } #[doc = "Bit 20 - Filter bits"] #[inline(always)] pub fn fb20(&self) -> FB20R { let bits = { const MASK: bool = true; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB20R { bits } } #[doc = "Bit 21 - Filter bits"] #[inline(always)] pub fn fb21(&self) -> FB21R { let bits = { const MASK: bool = true; const OFFSET: u8 = 21; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB21R { bits } } #[doc = "Bit 22 - Filter bits"] #[inline(always)] pub fn fb22(&self) -> FB22R { let bits = { const MASK: bool = true; const OFFSET: u8 = 22; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB22R { bits } } #[doc = "Bit 23 - Filter bits"] #[inline(always)] pub fn fb23(&self) -> FB23R { let bits = { const MASK: bool = true; const OFFSET: u8 = 23; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB23R { bits } } #[doc = "Bit 24 - Filter bits"] #[inline(always)] pub fn fb24(&self) -> FB24R { let bits = { const MASK: bool = true; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB24R { bits } } #[doc = "Bit 25 - Filter bits"] #[inline(always)] pub fn fb25(&self) -> FB25R { let bits = { const MASK: bool = true; const OFFSET: u8 = 25; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB25R { bits } } #[doc = "Bit 26 - Filter bits"] #[inline(always)] pub fn fb26(&self) -> FB26R { let bits = { const MASK: bool = true; const OFFSET: u8 = 26; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB26R { bits } } #[doc = "Bit 27 - Filter bits"] #[inline(always)] pub fn fb27(&self) -> FB27R { let bits = { const MASK: bool = true; const OFFSET: u8 = 27; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB27R { bits } } #[doc = "Bit 28 - Filter bits"] #[inline(always)] pub fn fb28(&self) -> FB28R { let bits = { const MASK: bool = true; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB28R { bits } } #[doc = "Bit 29 - Filter bits"] #[inline(always)] pub fn fb29(&self) -> FB29R { let bits = { const MASK: bool = true; const OFFSET: u8 = 29; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB29R { bits } } #[doc = "Bit 30 - Filter bits"] #[inline(always)] pub fn fb30(&self) -> FB30R { let bits = { const MASK: bool = true; const OFFSET: u8 = 30; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB30R { bits } } #[doc = "Bit 31 - Filter bits"] #[inline(always)] pub fn fb31(&self) -> FB31R { let bits = { const MASK: bool = true; const OFFSET: u8 = 31; ((self.bits >> OFFSET) & MASK as u32) != 0 }; FB31R { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline(always)] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 0 - Filter bits"] #[inline(always)] pub fn fb0(&mut self) -> _FB0W { _FB0W { w: self } } #[doc = "Bit 1 - Filter bits"] #[inline(always)] pub fn fb1(&mut self) -> _FB1W { _FB1W { w: self } } #[doc = "Bit 2 - Filter bits"] #[inline(always)] pub fn fb2(&mut self) -> _FB2W { _FB2W { w: self } } #[doc = "Bit 3 - Filter bits"] #[inline(always)] pub fn fb3(&mut self) -> _FB3W { _FB3W { w: self } } #[doc = "Bit 4 - Filter bits"] #[inline(always)] pub fn fb4(&mut self) -> _FB4W { _FB4W { w: self } } #[doc = "Bit 5 - Filter bits"] #[inline(always)] pub fn fb5(&mut self) -> _FB5W { _FB5W { w: self } } #[doc = "Bit 6 - Filter bits"] #[inline(always)] pub fn fb6(&mut self) -> _FB6W { _FB6W { w: self } } #[doc = "Bit 7 - Filter bits"] #[inline(always)] pub fn fb7(&mut self) -> _FB7W { _FB7W { w: self } } #[doc = "Bit 8 - Filter bits"] #[inline(always)] pub fn fb8(&mut self) -> _FB8W { _FB8W { w: self } } #[doc = "Bit 9 - Filter bits"] #[inline(always)] pub fn fb9(&mut self) -> _FB9W { _FB9W { w: self } } #[doc = "Bit 10 - Filter bits"] #[inline(always)] pub fn fb10(&mut self) -> _FB10W { _FB10W { w: self } } #[doc = "Bit 11 - Filter bits"] #[inline(always)] pub fn fb11(&mut self) -> _FB11W { _FB11W { w: self } } #[doc = "Bit 12 - Filter bits"] #[inline(always)] pub fn fb12(&mut self) -> _FB12W { _FB12W { w: self } } #[doc = "Bit 13 - Filter bits"] #[inline(always)] pub fn fb13(&mut self) -> _FB13W { _FB13W { w: self } } #[doc = "Bit 14 - Filter bits"] #[inline(always)] pub fn fb14(&mut self) -> _FB14W { _FB14W { w: self } } #[doc = "Bit 15 - Filter bits"] #[inline(always)] pub fn fb15(&mut self) -> _FB15W { _FB15W { w: self } } #[doc = "Bit 16 - Filter bits"] #[inline(always)] pub fn fb16(&mut self) -> _FB16W { _FB16W { w: self } } #[doc = "Bit 17 - Filter bits"] #[inline(always)] pub fn fb17(&mut self) -> _FB17W { _FB17W { w: self } } #[doc = "Bit 18 - Filter bits"] #[inline(always)] pub fn fb18(&mut self) -> _FB18W { _FB18W { w: self } } #[doc = "Bit 19 - Filter bits"] #[inline(always)] pub fn fb19(&mut self) -> _FB19W { _FB19W { w: self } } #[doc = "Bit 20 - Filter bits"] #[inline(always)] pub fn fb20(&mut self) -> _FB20W { _FB20W { w: self } } #[doc = "Bit 21 - Filter bits"] #[inline(always)] pub fn fb21(&mut self) -> _FB21W { _FB21W { w: self } } #[doc = "Bit 22 - Filter bits"] #[inline(always)] pub fn fb22(&mut self) -> _FB22W { _FB22W { w: self } } #[doc = "Bit 23 - Filter bits"] #[inline(always)] pub fn fb23(&mut self) -> _FB23W { _FB23W { w: self } } #[doc = "Bit 24 - Filter bits"] #[inline(always)] pub fn fb24(&mut self) -> _FB24W { _FB24W { w: self } } #[doc = "Bit 25 - Filter bits"] #[inline(always)] pub fn fb25(&mut self) -> _FB25W { _FB25W { w: self } } #[doc = "Bit 26 - Filter bits"] #[inline(always)] pub fn fb26(&mut self) -> _FB26W { _FB26W { w: self } } #[doc = "Bit 27 - Filter bits"] #[inline(always)] pub fn fb27(&mut self) -> _FB27W { _FB27W { w: self } } #[doc = "Bit 28 - Filter bits"] #[inline(always)] pub fn fb28(&mut self) -> _FB28W { _FB28W { w: self } } #[doc = "Bit 29 - Filter bits"] #[inline(always)] pub fn fb29(&mut self) -> _FB29W { _FB29W { w: self } } #[doc = "Bit 30 - Filter bits"] #[inline(always)] pub fn fb30(&mut self) -> _FB30W { _FB30W { w: self } } #[doc = "Bit 31 - Filter bits"] #[inline(always)] pub fn fb31(&mut self) -> _FB31W { _FB31W { w: self } } }
25.587814
59
0.491706
1cad982f92eb04611108edbfd805bfec01b90816
1,272
// Copyright (c) 2017, Dennis Hamester <[email protected]> // // Permission to use, copy, modify, and/or distribute this software for any // purpose with or without fee is hereby granted, provided that the above // copyright notice and this permission notice appear in all copies. // // THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH // REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND // FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, // INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR // OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR // PERFORMANCE OF THIS SOFTWARE. //! [`VK_EXT_shader_viewport_index_layer`](https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#VK_EXT_shader_viewport_index_layer) pub const VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION: u32 = 1; pub const VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME: &'static [u8; 35] = b"VK_EXT_shader_viewport_index_layer\x00"; pub const VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME_STR: &'static str = "VK_EXT_shader_viewport_index_layer";
63.6
156
0.802673
f5ee7e63b55a4d57730f2c73752661c637dbfd4d
4,479
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::flags::build_flag_combinations, fidl::endpoints::create_proxy, fidl_fuchsia_io as io, fidl_fuchsia_io_test as io_test, fidl_fuchsia_sys2 as fsys, fuchsia_zircon as zx, }; /// Helper struct for connecting to an io1 test harness and running a conformance test on it. pub struct TestHarness { /// FIDL proxy to the io1 test harness. pub proxy: io_test::Io1HarnessProxy, /// Config for the filesystem. pub config: io_test::Io1Config, /// All rights supported by the filesystem. pub all_rights: u32, } impl TestHarness { /// Connects to the test harness and returns a `TestHarness` struct. pub async fn new() -> TestHarness { let proxy = connect_to_harness().await; let config = proxy.get_config().await.expect("Could not get config from proxy"); let all_rights = get_supported_rights(&config); TestHarness { proxy, config, all_rights } } /// Creates and returns a directory with the given structure from the test harness. pub fn get_directory(&self, root: io_test::Directory, flags: u32) -> io::DirectoryProxy { let (client, server) = create_proxy::<io::DirectoryMarker>().expect("Cannot create proxy"); self.proxy .get_directory(root, flags, server) .expect("Cannot get directory from test harness"); client } /// Returns all combinations of supported flags. pub fn all_flag_combos(&self) -> Vec<u32> { build_flag_combinations(0, self.all_rights) } /// Returns all combinations of supported flags that include `OPEN_RIGHT_READABLE`. pub fn readable_flag_combos(&self) -> Vec<u32> { build_flag_combinations(io::OPEN_RIGHT_READABLE, self.all_rights) } /// Returns all combinations of supported flags that include `OPEN_RIGHT_WRITABLE`. pub fn writable_flag_combos(&self) -> Vec<u32> { build_flag_combinations(io::OPEN_RIGHT_WRITABLE, self.all_rights) } /// Returns all combinations of supported flags that do not include `OPEN_RIGHT_READABLE`. pub fn non_readable_flag_combos(&self) -> Vec<u32> { let non_readable_rights = self.all_rights & !io::OPEN_RIGHT_READABLE; build_flag_combinations(0, non_readable_rights) } /// Returns all combinations of supported flags that do not include `OPEN_RIGHT_WRITABLE`. pub fn non_writable_flag_combos(&self) -> Vec<u32> { let non_writable_rights = self.all_rights & !io::OPEN_RIGHT_WRITABLE; build_flag_combinations(0, non_writable_rights) } } async fn connect_to_harness() -> io_test::Io1HarnessProxy { // Connect to the realm to get acccess to the outgoing directory for the harness. let (client, server) = zx::Channel::create().expect("Cannot create channel"); fuchsia_component::client::connect_channel_to_protocol::<fsys::RealmMarker>(server) .expect("Cannot connect to Realm service"); let realm = fsys::RealmSynchronousProxy::new(client); // fs_test is the name of the child component defined in the manifest. let mut child_ref = fsys::ChildRef { name: "fs_test".to_string(), collection: None }; let (client, server) = zx::Channel::create().expect("Cannot create channel"); realm .bind_child( &mut child_ref, fidl::endpoints::ServerEnd::<io::DirectoryMarker>::new(server), zx::Time::INFINITE, ) .expect("FIDL error when binding to child in Realm") .expect("Cannot bind to test harness child in Realm"); let exposed_dir = io::DirectoryProxy::new( fidl::AsyncChannel::from_channel(client).expect("Cannot create async channel"), ); fuchsia_component::client::connect_to_protocol_at_dir_root::<io_test::Io1HarnessMarker>( &exposed_dir, ) .expect("Cannot connect to test harness protocol") } /// Returns a constant representing the aggregate of all io.fidl supported_rights that are supported by the /// test harness. fn get_supported_rights(config: &io_test::Io1Config) -> u32 { let mut rights = io::OPEN_RIGHT_READABLE | io::OPEN_RIGHT_WRITABLE; if !config.no_exec.unwrap_or_default() { rights |= io::OPEN_RIGHT_EXECUTABLE; } if !config.no_admin.unwrap_or_default() { rights |= io::OPEN_RIGHT_ADMIN; } rights }
40.718182
107
0.694351
0a60618ac358d1064b188ce3c5171a976a957238
308
use diesel::pg::PgConnection; use diesel::prelude::*; use crate::CONFIG; pub fn establish_connection() -> PgConnection { let config = CONFIG.clone(); PgConnection::establish(&config.server.database_url).expect(&format!( "Error connecting to {}", config.server.database_url )) }
23.692308
73
0.675325
1a2cd901c3e71e86a8df076638ec0dc1468b9662
9,967
use serde::{Deserialize, Serialize}; use tracing::{error, trace}; use ibc::{ ics02_client::client_state::{ClientState, IdentifiedAnyClientState}, ics03_connection::connection::{ ConnectionEnd, IdentifiedConnectionEnd, State as ConnectionState, }, ics04_channel::channel::{ChannelEnd, IdentifiedChannelEnd, State}, ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortChannelId, PortId}, Height, }; use ibc_proto::ibc::core::{ channel::v1::QueryConnectionChannelsRequest, connection::v1::QueryClientConnectionsRequest, }; use crate::channel::ChannelError; use crate::supervisor::Error; use super::handle::ChainHandle; pub fn counterparty_chain_from_connection( src_chain: &dyn ChainHandle, src_connection_id: &ConnectionId, ) -> Result<ChainId, Error> { let connection_end = src_chain .query_connection(&src_connection_id, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; let client_id = connection_end.client_id(); let client_state = src_chain .query_client_state(&client_id, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; trace!( chain_id=%src_chain.id(), connection_id=%src_connection_id, "counterparty chain: {}", client_state.chain_id() ); Ok(client_state.chain_id()) } fn connection_on_destination( connection_id_on_source: &ConnectionId, counterparty_client_id: &ClientId, counterparty_chain: &dyn ChainHandle, ) -> Result<Option<ConnectionEnd>, Error> { let req = QueryClientConnectionsRequest { client_id: counterparty_client_id.to_string(), }; let counterparty_connections = counterparty_chain .query_client_connections(req) .map_err(|e| { Error::QueryFailed(format!( "counterparty::query_client_connections({}) failed with error: {}", counterparty_client_id, e )) })?; for counterparty_connection in counterparty_connections.into_iter() { let counterparty_connection_end = counterparty_chain .query_connection(&counterparty_connection, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; let local_connection_end = &counterparty_connection_end.counterparty(); if let Some(local_connection_id) = local_connection_end.connection_id() { if local_connection_id == connection_id_on_source { return Ok(Some(counterparty_connection_end)); } } } Ok(None) } pub fn connection_state_on_destination( connection: IdentifiedConnectionEnd, counterparty_chain: &dyn ChainHandle, ) -> Result<ConnectionState, Error> { if let Some(remote_connection_id) = connection.connection_end.counterparty().connection_id() { let connection_end = counterparty_chain .query_connection(remote_connection_id, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; Ok(connection_end.state) } else { // The remote connection id (used on `counterparty_chain`) is unknown. // Try to retrieve this id by looking at client connections. let counterparty_client_id = connection.connection_end.counterparty().client_id(); let dst_connection = connection_on_destination( &connection.connection_id, counterparty_client_id, counterparty_chain, )?; dst_connection.map_or_else( || Ok(ConnectionState::Uninitialized), |remote_connection| Ok(remote_connection.state), ) } } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ChannelConnectionClient { pub channel: IdentifiedChannelEnd, pub connection: IdentifiedConnectionEnd, pub client: IdentifiedAnyClientState, } impl ChannelConnectionClient { pub fn new( channel: IdentifiedChannelEnd, connection: IdentifiedConnectionEnd, client: IdentifiedAnyClientState, ) -> Self { Self { channel, connection, client, } } } /// Returns the [`ChannelConnectionClient`] associated with the /// provided port and channel id. pub fn channel_connection_client( chain: &dyn ChainHandle, port_id: &PortId, channel_id: &ChannelId, ) -> Result<ChannelConnectionClient, Error> { let channel_end = chain .query_channel(port_id, channel_id, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; if channel_end.state_matches(&State::Uninitialized) { return Err(Error::ChannelUninitialized( port_id.clone(), channel_id.clone(), chain.id(), )); } let connection_id = channel_end .connection_hops() .first() .ok_or_else(|| Error::MissingConnectionHops(channel_id.clone(), chain.id()))?; let connection_end = chain .query_connection(connection_id, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; if !connection_end.is_open() { return Err(Error::ConnectionNotOpen( connection_id.clone(), channel_id.clone(), chain.id(), )); } let client_id = connection_end.client_id(); let client_state = chain .query_client_state(client_id, Height::zero()) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; let client = IdentifiedAnyClientState::new(client_id.clone(), client_state); let connection = IdentifiedConnectionEnd::new(connection_id.clone(), connection_end); let channel = IdentifiedChannelEnd::new(port_id.clone(), channel_id.clone(), channel_end); Ok(ChannelConnectionClient::new(channel, connection, client)) } pub fn counterparty_chain_from_channel( src_chain: &dyn ChainHandle, src_channel_id: &ChannelId, src_port_id: &PortId, ) -> Result<ChainId, Error> { channel_connection_client(src_chain, src_port_id, src_channel_id) .map(|c| c.client.client_state.chain_id()) } fn fetch_channel_on_destination( port_id: &PortId, channel_id: &ChannelId, counterparty_chain: &dyn ChainHandle, remote_connection_id: &ConnectionId, ) -> Result<Option<ChannelEnd>, Error> { let req = QueryConnectionChannelsRequest { connection: remote_connection_id.to_string(), pagination: ibc_proto::cosmos::base::query::pagination::all(), }; let counterparty_channels = counterparty_chain .query_connection_channels(req) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; for counterparty_channel in counterparty_channels.into_iter() { let local_channel_end = &counterparty_channel.channel_end.remote; if let Some(local_channel_id) = local_channel_end.channel_id() { if local_channel_id == channel_id && local_channel_end.port_id() == port_id { return Ok(Some(counterparty_channel.channel_end)); } } } Ok(None) } pub fn channel_state_on_destination( channel: &IdentifiedChannelEnd, connection: &IdentifiedConnectionEnd, counterparty_chain: &dyn ChainHandle, ) -> Result<State, Error> { let remote_channel = channel_on_destination(channel, connection, counterparty_chain)?; Ok(remote_channel.map_or_else( || State::Uninitialized, |remote_channel| remote_channel.state, )) } pub fn channel_on_destination( channel: &IdentifiedChannelEnd, connection: &IdentifiedConnectionEnd, counterparty_chain: &dyn ChainHandle, ) -> Result<Option<ChannelEnd>, Error> { if let Some(remote_channel_id) = channel.channel_end.remote.channel_id() { let counterparty = counterparty_chain .query_channel( channel.channel_end.remote.port_id(), remote_channel_id, Height::zero(), ) .map_err(|e| Error::QueryFailed(format!("{}", e)))?; Ok(Some(counterparty)) } else if let Some(remote_connection_id) = connection.end().counterparty().connection_id() { fetch_channel_on_destination( &channel.port_id, &channel.channel_id, counterparty_chain, remote_connection_id, ) } else { Ok(None) } } /// Queries a channel end on a [`ChainHandle`], and verifies /// that the counterparty field on that channel end matches an /// expected counterparty. /// Returns `Ok` if the counterparty matches, and `Err` otherwise. pub fn check_channel_counterparty( target_chain: Box<dyn ChainHandle>, target_pchan: &PortChannelId, expected: &PortChannelId, ) -> Result<(), ChannelError> { let channel_end_dst = target_chain .query_channel( &target_pchan.port_id, &target_pchan.channel_id, Height::zero(), ) .map_err(|e| ChannelError::QueryError(target_chain.id(), e))?; let counterparty = channel_end_dst.remote; match counterparty.channel_id { Some(actual_channel_id) => { let actual = PortChannelId { channel_id: actual_channel_id, port_id: counterparty.port_id, }; if &actual != expected { return Err(ChannelError::MismatchingChannelEnds( target_pchan.clone(), target_chain.id(), expected.clone(), actual, )); } } None => { error!( "channel {} on chain {} has no counterparty channel id ", target_pchan, target_chain.id() ); return Err(ChannelError::IncompleteChannelState( target_pchan.clone(), target_chain.id(), )); } } Ok(()) }
33.786441
98
0.641818
21789d552ab4cdd28dd1454f71b3e0461e45ae6e
404
pub fn build_proverb(list: &[&str]) -> String { let mut result = String::new(); for i in 0..list.len() { if i == (list.len() - 1) { result = format!("{}\nAnd all for the want of a {}.\n", result, list[0]); } else { result = format!("{}\nFor want of a {} the {} was lost.", result, list[i], list[i + 1]); } } String::from(result.trim()) }
28.857143
100
0.482673