hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
5b927ad00ce22ccba8dcfc10028075f9fae9c468
9,385
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 #![forbid(unsafe_code)] use anyhow::{format_err, Result}; use serde::{Deserialize, Serialize}; use starcoin_types::account_address::AccountAddress; use starcoin_vm_types::file_format::CodeOffset; use starcoin_vm_types::identifier::{IdentStr, Identifier}; use std::{ collections::BTreeMap, fs::File, io::{BufRead, BufReader, Read, Write}, path::Path, }; pub type FunctionCoverage = BTreeMap<u64, u64>; #[derive(Debug, Serialize, Deserialize)] pub struct CoverageMap { pub exec_maps: BTreeMap<String, ExecCoverageMap>, } #[derive(Debug, Serialize, Deserialize)] pub struct ModuleCoverageMap { pub module_addr: AccountAddress, pub module_name: Identifier, pub function_maps: BTreeMap<Identifier, FunctionCoverage>, } #[derive(Debug, Serialize, Deserialize)] pub struct ExecCoverageMap { pub exec_id: String, pub module_maps: BTreeMap<(AccountAddress, Identifier), ModuleCoverageMap>, } #[derive(Debug, Serialize, Deserialize)] pub struct TraceEntry { pub module_addr: AccountAddress, pub module_name: Identifier, pub func_name: Identifier, pub func_pc: CodeOffset, } #[derive(Debug, Serialize, Deserialize)] pub struct TraceMap { pub exec_maps: BTreeMap<String, Vec<TraceEntry>>, } impl CoverageMap { /// Takes in a file containing a raw VM trace, and returns an updated coverage map. pub fn update_coverage_from_trace_file<P: AsRef<Path>>(mut self, filename: P) -> Self { let file = File::open(filename).unwrap(); for line in BufReader::new(file).lines() { let line = line.unwrap(); let mut splits = line.split(','); let exec_id = splits.next().unwrap(); let context = splits.next().unwrap(); let pc = splits.next().unwrap().parse::<u64>().unwrap(); let mut context_segs: Vec<_> = context.split("::").collect(); let is_script = context_segs.len() == 2; if !is_script { let func_name = Identifier::new(context_segs.pop().unwrap()).unwrap(); let module_name = Identifier::new(context_segs.pop().unwrap()).unwrap(); let module_addr = AccountAddress::from_hex_literal(context_segs.pop().unwrap()).unwrap(); self.insert(exec_id, module_addr, module_name, func_name, pc); } else { // Don't count scripts (for now) assert_eq!(context_segs.pop().unwrap(), "main",); assert_eq!(context_segs.pop().unwrap(), "Script",); } } self } /// Takes in a file containing a raw VM trace, and returns a coverage map. pub fn from_trace_file<P: AsRef<Path>>(filename: P) -> Self { let empty_module_map = CoverageMap { exec_maps: BTreeMap::new(), }; empty_module_map.update_coverage_from_trace_file(filename) } /// Takes in a file containing a serialized coverage map and returns a coverage map. pub fn from_binary_file<P: AsRef<Path>>(filename: P) -> Self { let mut bytes = Vec::new(); File::open(filename) .ok() .and_then(|mut file| file.read_to_end(&mut bytes).ok()) .ok_or_else(|| format_err!("Error while reading in coverage map binary")) .unwrap(); bcs_ext::from_bytes(&bytes) .map_err(|_| format_err!("Error deserializing into coverage map")) .unwrap() } // add entries in a cascading manner pub fn insert( &mut self, exec_id: &str, module_addr: AccountAddress, module_name: Identifier, func_name: Identifier, pc: u64, ) { let exec_entry = self .exec_maps .entry(exec_id.to_owned()) .or_insert_with(|| ExecCoverageMap::new(exec_id.to_owned())); exec_entry.insert(module_addr, module_name, func_name, pc); } pub fn to_unified_exec_map(&self) -> ExecCoverageMap { let mut unified_map = ExecCoverageMap::new(String::new()); for (_, exec_map) in self.exec_maps.iter() { for ((module_addr, module_name), module_map) in exec_map.module_maps.iter() { for (func_name, func_map) in module_map.function_maps.iter() { for (pc, count) in func_map.iter() { unified_map.insert_multi( *module_addr, module_name.clone(), func_name.clone(), *pc, *count, ); } } } } unified_map } } impl ModuleCoverageMap { pub fn new(module_addr: AccountAddress, module_name: Identifier) -> Self { ModuleCoverageMap { module_addr, module_name, function_maps: BTreeMap::new(), } } pub fn insert_multi(&mut self, func_name: Identifier, pc: u64, count: u64) { let func_entry = self .function_maps .entry(func_name) .or_insert_with(FunctionCoverage::new); let pc_entry = func_entry.entry(pc).or_insert(0); *pc_entry += count; } pub fn insert(&mut self, func_name: Identifier, pc: u64) { self.insert_multi(func_name, pc, 1); } pub fn get_function_coverage(&self, func_name: &IdentStr) -> Option<&FunctionCoverage> { self.function_maps.get(func_name) } } impl ExecCoverageMap { pub fn new(exec_id: String) -> Self { ExecCoverageMap { exec_id, module_maps: BTreeMap::new(), } } pub fn insert_multi( &mut self, module_addr: AccountAddress, module_name: Identifier, func_name: Identifier, pc: u64, count: u64, ) { let module_entry = self .module_maps .entry((module_addr, module_name.clone())) .or_insert_with(|| ModuleCoverageMap::new(module_addr, module_name)); module_entry.insert_multi(func_name, pc, count); } pub fn insert( &mut self, module_addr: AccountAddress, module_name: Identifier, func_name: Identifier, pc: u64, ) { self.insert_multi(module_addr, module_name, func_name, pc, 1); } } impl TraceMap { /// Takes in a file containing a raw VM trace, and returns an updated coverage map. pub fn update_from_trace_file<P: AsRef<Path>>(mut self, filename: P) -> Self { let file = File::open(filename).unwrap(); for line in BufReader::new(file).lines() { let line = line.unwrap(); let mut splits = line.split(','); let exec_id = splits.next().unwrap(); let context = splits.next().unwrap(); let pc = splits.next().unwrap().parse::<u64>().unwrap(); let mut context_segs: Vec<_> = context.split("::").collect(); let is_script = context_segs.len() == 2; if !is_script { let func_name = Identifier::new(context_segs.pop().unwrap()).unwrap(); let module_name = Identifier::new(context_segs.pop().unwrap()).unwrap(); let module_addr = AccountAddress::from_hex_literal(context_segs.pop().unwrap()).unwrap(); self.insert(exec_id, module_addr, module_name, func_name, pc); } else { // Don't count scripts (for now) assert_eq!(context_segs.pop().unwrap(), "main",); assert_eq!(context_segs.pop().unwrap(), "Script",); } } self } // Takes in a file containing a raw VM trace, and returns a parsed trace. pub fn from_trace_file<P: AsRef<Path>>(filename: P) -> Self { let trace_map = TraceMap { exec_maps: BTreeMap::new(), }; trace_map.update_from_trace_file(filename) } // Takes in a file containing a serialized trace and deserialize it. pub fn from_binary_file<P: AsRef<Path>>(filename: P) -> Self { let mut bytes = Vec::new(); File::open(filename) .ok() .and_then(|mut file| file.read_to_end(&mut bytes).ok()) .ok_or_else(|| format_err!("Error while reading in coverage map binary")) .unwrap(); bcs_ext::from_bytes(&bytes) .map_err(|_| format_err!("Error deserializing into coverage map")) .unwrap() } // add entries in a cascading manner pub fn insert( &mut self, exec_id: &str, module_addr: AccountAddress, module_name: Identifier, func_name: Identifier, pc: u64, ) { let exec_entry = self .exec_maps .entry(exec_id.to_owned()) .or_insert_with(Vec::new); exec_entry.push(TraceEntry { module_addr, module_name, func_name, func_pc: pc as CodeOffset, }); } } pub fn output_map_to_file<M: Serialize, P: AsRef<Path>>(file_name: P, data: &M) -> Result<()> { let bytes = bcs_ext::to_bytes(data)?; let mut file = File::create(file_name)?; file.write_all(&bytes)?; Ok(()) }
34.127273
95
0.581034
79e305e8de39fc386ee5669b50c06de5c934baab
17,552
use crate::config::dictionaries::DictionaryFormat; use { super::{FastlyConfig, LocalServerConfig, RawLocalServerConfig}, crate::{config::DictionaryName, error::FastlyConfigError}, std::{fs::File, io::Write}, tempfile::tempdir, }; #[test] fn error_when_fastly_toml_files_cannot_be_read() { match FastlyConfig::from_file("nonexistent.toml") { Err(FastlyConfigError::IoError { path, .. }) if path == "nonexistent.toml" => {} res => panic!("unexpected result: {:?}", res), } } #[test] fn fastly_toml_files_can_be_read() { // Parse a valid `fastly.toml`, check that it succeeds. let config = FastlyConfig::from_str( r#" name = "simple-toml-example" description = "a simple toml example" authors = ["Jill Bryson <[email protected]>", "Rose McDowall <[email protected]>"] language = "rust" "#, ) .expect("can read toml data"); // Check that the name, description, authors, and language fields were parsed correctly. assert_eq!(config.name(), "simple-toml-example"); assert_eq!(config.description(), "a simple toml example"); assert_eq!( config.authors(), [ "Jill Bryson <[email protected]>", "Rose McDowall <[email protected]>" ] ); assert_eq!(config.language(), "rust"); } /// Show that we can successfully parse a `fastly.toml` with backend configurations. /// /// This provides an example `fastly.toml` file including a `#[local_server.backends]` section. This /// includes various backend definitions, that may or may not include an environment key. #[test] fn fastly_toml_files_with_simple_backend_configurations_can_be_read() { let config = FastlyConfig::from_str( r#" manifest_version = "1.2.3" name = "backend-config-example" description = "a toml example with backend configuration" authors = [ "Amelia Watson <[email protected]>", "Inugami Korone <[email protected]>", ] language = "rust" [local_server] [local_server.backends] [local_server.backends.dog] url = "http://localhost:7878/dog-mocks" [local_server.backends."shark.server"] url = "http://localhost:7878/shark-mocks" override_host = "somehost.com" [local_server.backends.detective] url = "http://www.elementary.org/" "#, ) .expect("can read toml data containing backend configurations"); let backend = config .backends() .get("dog") .expect("backend configurations can be accessed"); assert_eq!(backend.uri, "http://localhost:7878/dog-mocks"); assert_eq!(backend.override_host, None); let backend = config .backends() .get("shark.server") .expect("backend configurations can be accessed"); assert_eq!(backend.uri, "http://localhost:7878/shark-mocks"); assert_eq!( backend.override_host, Some("somehost.com".parse().expect("can parse override_host")) ); } /// Show that we can successfully parse a `fastly.toml` with local_server.dictionaries configurations. /// /// This provides an example `fastly.toml` file including a `#[local_server.dictionaries]` section. #[test] fn fastly_toml_files_with_simple_dictionary_configurations_can_be_read() { let dir = tempdir().unwrap(); let file_path = dir.path().join("a.json"); let mut file = File::create(&file_path).unwrap(); writeln!(file, "{{}}").unwrap(); let config = FastlyConfig::from_str(format!( r#" manifest_version = "1.2.3" name = "dictionary-config-example" description = "a toml example with dictionary configuration" authors = [ "Amelia Watson <[email protected]>", "Inugami Korone <[email protected]>", ] language = "rust" [local_server] [local_server.dictionaries] [local_server.dictionaries.a] file='{}' format = "json" "#, &file_path.to_str().unwrap() )) .expect("can read toml data containing local dictionary configurations"); let dictionary = config .dictionaries() .get(&DictionaryName::new("a".to_string())) .expect("dictionary configurations can be accessed"); assert_eq!(dictionary.file, file_path); assert_eq!(dictionary.format, DictionaryFormat::Json); } /// Unit tests for the `local_server` section of a `fastly.toml` package manifest. /// /// In particular, these tests check that we deserialize and validate the backend configurations /// section of the TOML data properly. In the interest of brevity, this section works with TOML data /// that would be placed beneath the `local_server` key, rather than an entire package manifest as in /// the tests above. mod local_server_config_tests { use std::fs::File; use std::io::Write; use tempfile::tempdir; use { super::{LocalServerConfig, RawLocalServerConfig}, crate::error::{ BackendConfigError, DictionaryConfigError, FastlyConfigError::{self, InvalidBackendDefinition, InvalidDictionaryDefinition}, }, std::convert::TryInto, }; fn read_toml_config(toml: &str) -> Result<LocalServerConfig, FastlyConfigError> { toml::from_str::<'_, RawLocalServerConfig>(toml) .expect("valid toml data") .try_into() } /// Check that the `local_server` section can be deserialized. // This case is technically redundant, but it is nice to have a unit test that demonstrates the // happy path for this group of unit tests. #[test] fn local_server_configs_can_be_deserialized() { let dir = tempdir().unwrap(); let file_path = dir.path().join("secrets.json"); let mut file = File::create(&file_path).unwrap(); writeln!(file, "{{}}").unwrap(); let local_server = format!( r#" [backends] [backends.dog] url = "http://localhost:7878/dog-mocks" [dicionaries] [dicionaries.secrets] file = '{}' format = "json" "#, file_path.to_str().unwrap() ); match read_toml_config(&local_server) { Ok(_) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that backend definitions must be given as TOML tables. #[test] fn backend_configs_must_use_toml_tables() { use BackendConfigError::InvalidEntryType; static BAD_DEF: &str = r#" [backends] "shark" = "https://a.com" "#; match read_toml_config(BAD_DEF) { Err(InvalidBackendDefinition { err: InvalidEntryType, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that backend definitions cannot contain unrecognized keys. #[test] fn backend_configs_cannot_contain_unrecognized_keys() { use BackendConfigError::UnrecognizedKey; static BAD_DEFAULT: &str = r#" [backends] shark = { url = "https://a.com", shrimp = true } "#; match read_toml_config(BAD_DEFAULT) { Err(InvalidBackendDefinition { err: UnrecognizedKey(key), .. }) if key == "shrimp" => {} res => panic!("unexpected result: {:?}", res), } } /// Check that backend definitions *must* include a `url` field. #[test] fn backend_configs_must_provide_a_url() { use BackendConfigError::MissingUrl; static NO_URL: &str = r#" [backends] "shark" = {} "#; match read_toml_config(NO_URL) { Err(InvalidBackendDefinition { err: MissingUrl, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that backend definitions *must* include a `url` field. #[test] fn backend_configs_must_provide_urls_as_a_string() { use BackendConfigError::InvalidUrlEntry; static BAD_URL_FIELD: &str = r#" [backends] "shark" = { url = 3 } "#; match read_toml_config(BAD_URL_FIELD) { Err(InvalidBackendDefinition { err: InvalidUrlEntry, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that backend definitions must include a *valid* `url` field. #[test] fn backend_configs_must_provide_a_valid_url() { use BackendConfigError::InvalidUrl; static BAD_URL_FIELD: &str = r#" [backends] "shark" = { url = "http:://[:::1]" } "#; match read_toml_config(BAD_URL_FIELD) { Err(InvalidBackendDefinition { err: InvalidUrl(_), .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that override_host field is a string. #[test] fn backend_configs_must_provide_override_host_as_a_string() { use BackendConfigError::InvalidOverrideHostEntry; static BAD_OVERRIDE_HOST_FIELD: &str = r#" [backends] "shark" = { url = "http://a.com", override_host = 3 } "#; match read_toml_config(BAD_OVERRIDE_HOST_FIELD) { Err(InvalidBackendDefinition { err: InvalidOverrideHostEntry, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that override_host field is non empty. #[test] fn backend_configs_must_provide_a_non_empty_override_host() { use BackendConfigError::EmptyOverrideHost; static EMPTY_OVERRIDE_HOST_FIELD: &str = r#" [backends] "shark" = { url = "http://a.com", override_host = "" } "#; match read_toml_config(EMPTY_OVERRIDE_HOST_FIELD) { Err(InvalidBackendDefinition { err: EmptyOverrideHost, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that override_host field is valid. #[test] fn backend_configs_must_provide_a_valid_override_host() { use BackendConfigError::InvalidOverrideHost; static BAD_OVERRIDE_HOST_FIELD: &str = r#" [backends] "shark" = { url = "http://a.com", override_host = "somehost.com\n" } "#; match read_toml_config(BAD_OVERRIDE_HOST_FIELD) { Err(InvalidBackendDefinition { err: InvalidOverrideHost(_), .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that dictionary definitions must be given as TOML tables. #[test] fn dictionary_configs_must_use_toml_tables() { use DictionaryConfigError::InvalidEntryType; static BAD_DEF: &str = r#" [dictionaries] "thing" = "stuff" "#; match read_toml_config(BAD_DEF) { Err(InvalidDictionaryDefinition { err: InvalidEntryType, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that dictionary definitions cannot contain unrecognized keys. #[test] fn dictionary_configs_cannot_contain_unrecognized_keys() { let dir = tempdir().unwrap(); let file_path = dir.path().join("secrets.json"); let mut file = File::create(&file_path).unwrap(); writeln!(file, "{{}}").unwrap(); use DictionaryConfigError::UnrecognizedKey; let bad_default = format!( r#" [dictionaries] thing = {{ file = '{}', format = "json", shrimp = true }} "#, file_path.to_str().unwrap() ); match read_toml_config(&bad_default) { Err(InvalidDictionaryDefinition { err: UnrecognizedKey(key), .. }) if key == "shrimp" => {} res => panic!("unexpected result: {:?}", res), } } /// Check that dictionary definitions *must* include a `file` field. #[test] fn dictionary_configs_must_provide_a_file() { use DictionaryConfigError::MissingFile; static NO_FILE: &str = r#" [dictionaries] thing = {format = "json"} "#; match read_toml_config(NO_FILE) { Err(InvalidDictionaryDefinition { err: MissingFile, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that dictionary definitions *must* include a `format` field. #[test] fn dictionary_configs_must_provide_a_format() { use DictionaryConfigError::MissingFormat; let dir = tempdir().unwrap(); let file_path = dir.path().join("secrets.json"); let mut file = File::create(&file_path).unwrap(); writeln!(file, "{{}}").unwrap(); let no_format_field = format!( r#" [dictionaries] "thing" = {{ file = '{}' }} "#, file_path.to_str().unwrap() ); match read_toml_config(&no_format_field) { Err(InvalidDictionaryDefinition { err: MissingFormat, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that dictionary definitions must include a *valid* `name` field. #[test] fn dictionary_configs_must_provide_a_valid_name() { use DictionaryConfigError::InvalidName; let dir = tempdir().unwrap(); let file_path = dir.path().join("secrets.json"); let mut file = File::create(&file_path).unwrap(); writeln!(file, "{{}}").unwrap(); let bad_name_field = format!( r#" [dictionaries] "1" = {{ file = '{}', format = "json" }} "#, file_path.to_str().unwrap() ); match read_toml_config(&bad_name_field) { Err(InvalidDictionaryDefinition { err: InvalidName(_), .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that file field is a string. #[test] fn dictionary_configs_must_provide_file_as_a_string() { use DictionaryConfigError::InvalidFileEntry; static BAD_FILE_FIELD: &str = r#" [dictionaries] "thing" = { file = 3, format = "json" } "#; match read_toml_config(BAD_FILE_FIELD) { Err(InvalidDictionaryDefinition { err: InvalidFileEntry, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that file field is non empty. #[test] fn dictionary_configs_must_provide_a_non_empty_file() { use DictionaryConfigError::EmptyFileEntry; static EMPTY_FILE_FIELD: &str = r#" [dictionaries] "thing" = { file = "", format = "json" } "#; match read_toml_config(EMPTY_FILE_FIELD) { Err(InvalidDictionaryDefinition { err: EmptyFileEntry, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that format field is a string. #[test] fn dictionary_configs_must_provide_format_as_a_string() { use DictionaryConfigError::InvalidFormatEntry; static BAD_FORMAT_FIELD: &str = r#" [dictionaries] "thing" = { format = 3} "#; match read_toml_config(BAD_FORMAT_FIELD) { Err(InvalidDictionaryDefinition { err: InvalidFormatEntry, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that format field is non empty. #[test] fn dictionary_configs_must_provide_a_non_empty_format() { use DictionaryConfigError::EmptyFormatEntry; static EMPTY_FORMAT_FIELD: &str = r#" [dictionaries] "thing" = { format = "" } "#; match read_toml_config(EMPTY_FORMAT_FIELD) { Err(InvalidDictionaryDefinition { err: EmptyFormatEntry, .. }) => {} res => panic!("unexpected result: {:?}", res), } } /// Check that format field set to json is valid. #[test] fn valid_dictionary_config_with_format_set_to_json() { let dir = tempdir().unwrap(); let file_path = dir.path().join("secrets.json"); let mut file = File::create(&file_path).unwrap(); writeln!(file, "{{}}").unwrap(); let dictionary = format!( r#" [dictionaries] "thing" = {{ file = '{}', format = "json" }} "#, file_path.to_str().unwrap() ); read_toml_config(&dictionary).expect( "can read toml data containing local dictionary configurations using json format", ); } }
34.756436
102
0.557942
3820e5eecfc42c71d7d94713431aa9e743369c70
270
/*! Corpus reading utilities Code is organized in the same manner as the [crate::io::writer] mod, with {text/meta}reader and a reader that contains both for a given language. !*/ pub mod corpus; mod metareader; pub mod reader; mod textreader; pub use corpus::Corpus;
22.5
145
0.748148
14ab8209e1dc45b224bdd3aeeaca9fe86a8b609a
1,952
use e2d2::headers::*; use e2d2::operators::*; use e2d2::utils::{Flow, Ipv4Prefix}; use fnv::FnvHasher; use std::collections::HashSet; use std::hash::BuildHasherDefault; type FnvHash = BuildHasherDefault<FnvHasher>; #[derive(Clone)] pub struct Acl { pub src_ip: Option<Ipv4Prefix>, pub dst_ip: Option<Ipv4Prefix>, pub src_port: Option<u16>, pub dst_port: Option<u16>, pub established: Option<bool>, // Related not done pub drop: bool, } impl Acl { pub fn matches(&self, flow: &Flow, connections: &HashSet<Flow, FnvHash>) -> bool { if (self.src_ip.is_none() || self.src_ip.unwrap().in_range(flow.src_ip)) && (self.dst_ip.is_none() || self.dst_ip.unwrap().in_range(flow.dst_ip)) && (self.src_port.is_none() || flow.src_port == self.src_port.unwrap()) && (self.dst_port.is_none() || flow.dst_port == self.dst_port.unwrap()) { if let Some(established) = self.established { let rev_flow = flow.reverse_flow(); (connections.contains(flow) || connections.contains(&rev_flow)) == established } else { true } } else { false } } } pub fn acl_match<T: 'static + Batch<Header = NullHeader>>(parent: T, acls: Vec<Acl>) -> CompositionBatch { let mut flow_cache = HashSet::<Flow, FnvHash>::with_hasher(Default::default()); parent .parse::<MacHeader>() .transform(box move |p| { p.get_mut_header().swap_addresses(); }) .parse::<IpHeader>() .filter(box move |p| { let flow = p.get_header().flow().unwrap(); for acl in &acls { if acl.matches(&flow, &flow_cache) { if !acl.drop { flow_cache.insert(flow); } return !acl.drop; } } return false; }) .compose() }
33.084746
106
0.554303
f4cd5958255e01c69cace7115eb93aa23adf8e9e
738
use crate::postgres::def::TableDef; use sea_query::{Alias, Table, TableCreateStatement}; impl TableDef { pub fn write(&self) -> TableCreateStatement { let mut table = Table::create(); table.table(Alias::new(self.info.name.as_ref())); for col in self.columns.iter() { table.col(&mut col.write()); } for primary_key in self.primary_key_constraints.iter() { table.primary_key(&mut primary_key.write()); } for unique in self.unique_constraints.iter() { table.index(&mut unique.write()); } for reference in self.reference_constraints.iter() { table.foreign_key(&mut reference.write()); } table } }
32.086957
64
0.594851
711803754408f50a757d4d51534221e99256c98b
859
// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // FIXME(31528) we emit a bunch of silly errors here due to continuing past the // first one. This would be easy-ish to address by better recovery in tokenisation. pub fn trace_option(option: Option<isize>) { option.map(|some| 42; //~^ ERROR: expected one of } //~ ERROR: incorrect close delimiter //~^ ERROR: expected expression, found `)` fn main() {}
39.045455
83
0.706636
9007037c538f3c20e96446b2b50cd838e2bc09e7
54
use epoch_conv; fn main() { epoch_conv::run(); }
9
22
0.592593
2631fff3add96d5e9d22a37587b68e114b48f823
1,737
extern crate dbus; use dbus::{Connection, BusType, Message}; use dbus::obj::ObjectPath; // #[cfg(test)] // mod tests { // use super::*; // #[test] // fn test_list_connections() { // let connection_items = list_connection_items(); // assert_eq!(vec!["tan", "stand", "at", "yo"], connection_items); // } // } pub fn get_connection_settings(s: &str) -> Vec<dbus::MessageItem> { let c = Connection::get_private(BusType::System).unwrap(); let m = Message::new_method_call("org.freedesktop.NetworkManager", s, "org.freedesktop.NetworkManager.Settings.Connection", "GetSettings") .unwrap(); c.send_with_reply_and_block(m, 2000).unwrap().get_items() } pub fn get_connection_list() { let c = Connection::get_private(BusType::System); assert_eq!(c.is_ok(), true); let c = c.unwrap(); let m = Message::new_method_call("org.freedesktop.NetworkManager", "/org/freedesktop/NetworkManager/Settings", "org.freedesktop.NetworkManager.Settings", "ListConnections") .unwrap(); let r = c.send_with_reply_and_block(m, 2000); assert_eq!(r.is_ok(), true); let r = r.unwrap(); // println!("{:?}", r); let mut i = r.iter_init(); println!("{:?}", i.get()); // println!("{:?}", i.next()); println!("{:?}", i); // println!("{:?}", r.get1()); // for name in arr { // println!("{}", name); // } } // get_items(&self) -> Vec<MessageItem> // iter_init<'a>(&'a self) -> Iter<'a> // List devices
24.125
90
0.522165
01b4de7a394b3adf62c1200081adc67ac0d9965d
12,545
//! This module contains functionality that allows the usage of JavaScript to define visualizations. //! //! The `Instance` defines a generic way to wrap JS function calls and allow interaction with //! JS code and the visualization system. //! //! An `Instance` can be created via `Instance::from_object` where the a JS object is provided that //! fullfills the spec described in `java_script/definition.rs use crate::prelude::*; use crate::component::visualization::*; use crate::component::visualization::instance::PreprocessorConfiguration; use crate::component::visualization::java_script::binding::JsConsArgs; use crate::component::visualization::java_script::method; use crate::component::visualization; use core::result; use enso_frp as frp; use ensogl::data::color; use ensogl::display::DomScene; use ensogl::display::DomSymbol; use ensogl::display::Scene; use ensogl::display::shape::StyleWatch; use ensogl::display; use ensogl::system::web::JsValue; use ensogl::system::web; use ensogl::system::web::StyleSetter; use js_sys; use std::fmt::Formatter; // ============== // === Errors === // ============== /// Errors that can occur when transforming JS source to a visualization. #[derive(Clone,Debug)] #[allow(missing_docs)] pub enum Error { /// The provided `JsValue` was expected to be of type `object`, but was not. ValueIsNotAnObject { object:JsValue }, /// The object was expected to have the named property but does not. PropertyNotFoundOnObject { object:JsValue, property:String }, /// An error occurred on the javascript side when calling the class constructor. ConstructorError { js_error:JsValue }, } impl Display for Error { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Error::ValueIsNotAnObject { object } => { f.write_fmt(format_args! ("JsValue was expected to be of type `object`, but was not: {:?}",object)) }, Error::PropertyNotFoundOnObject { object, property } => { f.write_fmt(format_args! ("Object was expected to have property {:?} but has not: {:?}",property,object)) }, Error::ConstructorError { js_error } => { f.write_fmt(format_args!("Error while constructing object: {:?}",js_error)) }, } } } impl std::error::Error for Error {} /// Internal helper type to propagate results that can fail due to `JsVisualizationError`s. pub type Result<T> = result::Result<T, Error>; // ===================== // === InstanceModel === // ===================== /// Helper type for the callback used to set the preprocessor code. pub trait PreprocessorCallback = Fn(PreprocessorConfiguration); /// Internal helper type to store the preprocessor callback. type PreprocessorCallbackCell = Rc<RefCell<Option<Box<dyn PreprocessorCallback>>>>; /// `JsVisualizationGeneric` allows the use of arbitrary javascript to create visualizations. It /// takes function definitions as strings and proved those functions with data. #[derive(Clone,CloneRef,Derivative)] #[derivative(Debug)] #[allow(missing_docs)] pub struct InstanceModel { pub root_node : DomSymbol, pub logger : Logger, on_data_received : Rc<Option<js_sys::Function>>, set_size : Rc<Option<js_sys::Function>>, #[derivative(Debug="ignore")] object : Rc<java_script::binding::Visualization>, #[derivative(Debug="ignore")] preprocessor_change : PreprocessorCallbackCell, scene : Scene, } impl InstanceModel { fn get_background_color(scene:&Scene) -> color::Rgba { let styles = StyleWatch::new(&scene.style_sheet); styles.get_color(ensogl_theme::graph_editor::visualization::background) } fn create_root(scene:&Scene,logger:&Logger) -> result::Result<DomSymbol, Error> { let div = web::create_div(); let root_node = DomSymbol::new(&div); root_node.dom().set_attribute("class","visualization") .map_err(|js_error|Error::ConstructorError{js_error})?; let bg_color = Self::get_background_color(scene); let bg_red = bg_color.red*255.0; let bg_green = bg_color.green*255.0; let bg_blue = bg_color.blue*255.0; let bg_hex = format!("rgba({},{},{},{})",bg_red,bg_green,bg_blue,bg_color.alpha); root_node.dom().set_style_or_warn("background",bg_hex,logger); Ok(root_node) } /// We need to provide a closure to the Visualisation on the JS side, which we then later /// can hook up to the FRP. Here we create a `PreprocessorCallbackCell`, which can hold a /// closure, and a `PreprocessorCallback` which holds a weak reference to the closure inside of /// the `PreprocessorCallbackCell`. This allows us to pass the `PreprocessorCallback` to the /// javascript code, and call from there the closure stored in the `PreprocessorCallbackCell`. /// We will later on set the closure inside of the `PreprocessorCallbackCell` to emit an FRP /// event. fn preprocessor_change_callback () -> (PreprocessorCallbackCell,impl PreprocessorCallback) { let closure_cell = PreprocessorCallbackCell::default(); let weak_closure_cell = Rc::downgrade(&closure_cell); let closure = move |preprocessor_config| { if let Some(callback) = weak_closure_cell.upgrade() { callback.borrow().map_ref(|f|f(preprocessor_config)); } }; (closure_cell,closure) } fn instantiate_class_with_args(class:&JsValue, args:JsConsArgs) -> result::Result<java_script::binding::Visualization,Error> { let js_new = js_sys::Function::new_with_args("cls,arg", "return new cls(arg)"); let context = JsValue::NULL; let object = js_new.call2(&context,class,&args.into()) .map_err(|js_error|Error::ConstructorError {js_error})?; if !object.is_object() { return Err(Error::ValueIsNotAnObject { object } ) } let object:java_script::binding::Visualization = object.into(); Ok(object) } /// Tries to create a InstanceModel from the given visualisation class. pub fn from_class(class:&JsValue,scene:&Scene) -> result::Result<Self, Error> { let logger = Logger::new("Instance"); let root_node = Self::create_root(scene,&logger)?; let (preprocessor_change,closure) = Self::preprocessor_change_callback(); let styles = StyleWatch::new(&scene.style_sheet); let init_data = JsConsArgs::new(root_node.clone_ref(), styles, closure); let object = Self::instantiate_class_with_args(class,init_data)?; let on_data_received = get_method(object.as_ref(),method::ON_DATA_RECEIVED).ok(); let on_data_received = Rc::new(on_data_received); let set_size = get_method(object.as_ref(),method::SET_SIZE).ok(); let set_size = Rc::new(set_size); let object = Rc::new(object); let scene = scene.clone_ref(); Ok(InstanceModel{root_node,logger,on_data_received,set_size,object,preprocessor_change, scene}) } /// Hooks the root node into the given scene. /// /// MUST be called to make this visualization visible. pub fn set_dom_layer(&self, scene:&DomScene) { scene.manage(&self.root_node); } fn set_size(&self, size:Vector2) { let data_json = JsValue::from_serde(&size).unwrap(); let _ = self.try_call1(&self.set_size,&data_json); self.root_node.set_size(size); } fn receive_data(&self, data:&Data) -> result::Result<(),DataError> { let data_json = match data { Data::Json {content} => content, _ => return Err(DataError::BinaryNotSupported), }; let data_json:&serde_json::Value = data_json.deref(); let data_js = match JsValue::from_serde(data_json) { Ok(value) => value, Err(_) => return Err(DataError::InvalidDataType), }; self.try_call1(&self.on_data_received, &data_js) .map_err(|_| DataError::InternalComputationError)?; Ok(()) } /// Prompt visualization JS object to emit preprocessor change with its currently desired state. pub fn update_preprocessor(&self) -> result::Result<(),JsValue> { self.object.emitPreprocessorChange() } /// Helper method to call methods on the wrapped javascript object. fn try_call1(&self, method:&Option<js_sys::Function>, arg:&JsValue) -> result::Result<(),JsValue> { if let Some(method) = method { if let Err(error) = method.call1(&self.object, arg) { warning!(self.logger,"Failed to call method {method:?} with error: {error:?}"); return Err(error) } } Ok(()) } fn set_layer(&self, layer:Layer) { layer.apply_for_html_component(&self.scene,&self.root_node) } } // ================ // === Instance === // ================ /// Sample visualization that renders the given data as text. Useful for debugging and testing. #[derive(Clone,CloneRef,Debug,Shrinkwrap)] #[allow(missing_docs)] pub struct Instance { #[shrinkwrap(main_field)] model : InstanceModel, frp : visualization::instance::Frp, network : frp::Network, } impl Instance { /// Constructor. pub fn new(class:&JsValue, scene:&Scene) -> result::Result<Instance, Error> { let network = frp::Network::new("js_visualization_instance"); let frp = visualization::instance::Frp::new(&network); let model = InstanceModel::from_class(class,scene)?; model.set_dom_layer(&scene.dom.layers.back); Ok(Instance{model,frp,network}.init_frp(scene).init_preprocessor_change_callback()) } fn init_frp(self, scene:&Scene) -> Self { let network = &self.network; let model = self.model.clone_ref(); let frp = self.frp.clone_ref(); frp::extend! { network eval frp.set_size ((size) model.set_size(*size)); eval frp.send_data ([frp,model](data) { if let Err(e) = model.receive_data(data) { frp.data_receive_error.emit(Some(e)); } }); eval frp.set_layer ((layer) model.set_layer(*layer)); } frp.pass_events_to_dom_if_active(scene,network); self } fn init_preprocessor_change_callback(self) -> Self { // FIXME Does it leak memory? To be checked. let change = self.frp.preprocessor_change.clone_ref(); let callback = move |preprocessor_config| { change.emit(preprocessor_config) }; let callback = Box::new(callback); self.model.preprocessor_change.borrow_mut().replace(callback); if let Err(js_error) = self.model.update_preprocessor() { use enso_frp::web::js_to_string; let logger = self.model.logger.clone(); error!(logger,"Failed to trigger initial preprocessor update from JS: \ {js_to_string(&js_error)}"); } self } } impl From<Instance> for visualization::Instance { fn from(t:Instance) -> Self { Self::new(&t,&t.frp,&t.network,Some(t.model.root_node.clone_ref())) } } impl display::Object for Instance { fn display_object(&self) -> &display::object::Instance { self.model.root_node.display_object() } } // === Utils === /// Try to return the method specified by the given name on the given object as a /// `js_sys::Function`. fn get_method(object:&js_sys::Object, property:&str) -> Result<js_sys::Function> { let method_value = js_sys::Reflect::get(object,&property.into()); let method_value = method_value.map_err( |object| Error::PropertyNotFoundOnObject{object,property:property.to_string()})?; if method_value.is_undefined() { let object:JsValue = object.into(); return Err(Error::PropertyNotFoundOnObject{object,property:property.to_string()}); } let method_function:js_sys::Function = method_value.into(); Ok(method_function) }
39.825397
102
0.628218
cc76eeafca02d75def1b44d963ff19e0267773f4
4,511
use std::io::prelude::*; use std::path::Path; use std::sync::Arc; use std::{env, sync::Mutex}; use log::Level; use shrust::{Shell, ShellIO}; use synthizer::{ Buffer, BufferGenerator, LoggingBackend, Protocol, Source, Source3D, Synthizer, SynthizerError, }; struct Data { source: Source3D, generator: BufferGenerator, } fn main() -> Result<(), SynthizerError> { let args = env::args().collect::<Vec<String>>(); let file = args.get(1); if let Some(file) = file { synthizer::set_log_level(Level::Debug); synthizer::configure_logging_backend(LoggingBackend::Stderr)?; let synthizer = Synthizer::new()?; let mut context = synthizer.new_context()?; let path = Path::new(file); if path.exists() { let buffer = Buffer::new(Protocol::File, path, "")?; let generator = context.new_buffer_generator()?; generator.set_buffer(buffer.clone())?; let source = context.new_source3d()?; let data = Data { source: source, generator: generator, }; data.source.add_generator(&data.generator)?; let mut shell = Shell::new(Arc::new(data)); shell.new_command_noargs("play", "Play media.", move |_io, data| { data.source.add_generator(&data.generator)?; Ok(()) }); shell.new_command_noargs("pause", "Pause media.", |_io, data| { data.source.remove_generator(&data.generator)?; Ok(()) }); // Track this here because I'm too lazy to implement `DerefMut` on `Data`. let looping = Arc::new(Mutex::new(false)); shell.new_command_noargs("loop", "Toggle looping.", move |io, data| { let looping = looping.clone(); let mut looping = looping.lock().unwrap(); *looping = !*looping; data.generator.set_looping(*looping)?; if *looping { writeln!(io, "Looping")?; } else { writeln!(io, "Not looping")?; } Ok(()) }); shell.new_command( "gain", "Control the gain of the generator, in DB.", 1, |io, data, args| { let gain = args[0].parse::<f64>(); if let Some(gain) = gain.ok() { let base: f64 = 10.; let gain = base.powf(gain / 20.); writeln!(io, "Setting gain to {} DB", args[0])?; data.source.set_gain(gain)?; } else { writeln!(io, "{} not a valid gain", args[0])?; } Ok(()) }, ); shell.new_command("seek", "Seek in seconds.", 1, |io, data, args| { let position = args[0].parse::<f64>(); if let Some(position) = position.ok() { writeln!(io, "Seeking to {}", args[0])?; data.generator.set_position(position)?; } else { writeln!(io, "{} not a valid position", args[0])?; } Ok(()) }); shell.new_command( "pos", "Move the source. X is right, Y is forward, Z is up.", 3, |io, data, args| { let x = args[0].parse::<f64>(); let y = args[1].parse::<f64>(); let z = args[2].parse::<f64>(); if let (Some(x), Some(y), Some(z)) = (x.ok(), y.ok(), z.ok()) { writeln!(io, "Moving to ({}, {}, {})", x, y, z)?; data.source.set_position(x, y, z)?; } else { writeln!(io, "{:?} not a valid position", args)?; } Ok(()) }, ); shell.new_command_noargs("quit", "End this madness.", |_io, _data| { std::process::exit(0); }); shell.run_loop(&mut ShellIO::default()); } else { eprintln!("Path not found"); } } else { eprintln!( "Usage: {} <path>", env::current_exe().unwrap().to_string_lossy() ); } Ok(()) }
38.228814
99
0.441366
b92207cd812c7fabfe7689d7a8609c76accdda4d
3,014
//! //! Common utilities for dealing with PostgreSQL non-relation files. //! use crate::{pg_constants, transaction_id_precedes}; use bytes::BytesMut; use log::*; use crate::MultiXactId; pub fn transaction_id_set_status(xid: u32, status: u8, page: &mut BytesMut) { trace!( "handle_apply_request for RM_XACT_ID-{} (1-commit, 2-abort, 3-sub_commit)", status ); let byteno: usize = ((xid as u32 % pg_constants::CLOG_XACTS_PER_PAGE as u32) / pg_constants::CLOG_XACTS_PER_BYTE) as usize; let bshift: u8 = ((xid % pg_constants::CLOG_XACTS_PER_BYTE) * pg_constants::CLOG_BITS_PER_XACT as u32) as u8; page[byteno] = (page[byteno] & !(pg_constants::CLOG_XACT_BITMASK << bshift)) | (status << bshift); } pub fn transaction_id_get_status(xid: u32, page: &[u8]) -> u8 { let byteno: usize = ((xid as u32 % pg_constants::CLOG_XACTS_PER_PAGE as u32) / pg_constants::CLOG_XACTS_PER_BYTE) as usize; let bshift: u8 = ((xid % pg_constants::CLOG_XACTS_PER_BYTE) * pg_constants::CLOG_BITS_PER_XACT as u32) as u8; ((page[byteno] >> bshift) & pg_constants::CLOG_XACT_BITMASK) as u8 } // See CLOGPagePrecedes in clog.c pub const fn clogpage_precedes(page1: u32, page2: u32) -> bool { let mut xid1 = page1 * pg_constants::CLOG_XACTS_PER_PAGE; xid1 += pg_constants::FIRST_NORMAL_TRANSACTION_ID + 1; let mut xid2 = page2 * pg_constants::CLOG_XACTS_PER_PAGE; xid2 += pg_constants::FIRST_NORMAL_TRANSACTION_ID + 1; transaction_id_precedes(xid1, xid2) && transaction_id_precedes(xid1, xid2 + pg_constants::CLOG_XACTS_PER_PAGE - 1) } // See SlruMayDeleteSegment() in slru.c pub fn slru_may_delete_clogsegment(segpage: u32, cutoff_page: u32) -> bool { let seg_last_page = segpage + pg_constants::SLRU_PAGES_PER_SEGMENT - 1; assert_eq!(segpage % pg_constants::SLRU_PAGES_PER_SEGMENT, 0); clogpage_precedes(segpage, cutoff_page) && clogpage_precedes(seg_last_page, cutoff_page) } // Multixact utils pub fn mx_offset_to_flags_offset(xid: MultiXactId) -> usize { ((xid / pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP as u32) as u16 % pg_constants::MULTIXACT_MEMBERGROUPS_PER_PAGE * pg_constants::MULTIXACT_MEMBERGROUP_SIZE) as usize } pub fn mx_offset_to_flags_bitshift(xid: MultiXactId) -> u16 { (xid as u16) % pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP * pg_constants::MXACT_MEMBER_BITS_PER_XACT } /* Location (byte offset within page) of TransactionId of given member */ pub fn mx_offset_to_member_offset(xid: MultiXactId) -> usize { mx_offset_to_flags_offset(xid) + (pg_constants::MULTIXACT_FLAGBYTES_PER_GROUP + (xid as u16 % pg_constants::MULTIXACT_MEMBERS_PER_MEMBERGROUP) * 4) as usize } fn mx_offset_to_member_page(xid: u32) -> u32 { xid / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32 } pub fn mx_offset_to_member_segment(xid: u32) -> i32 { (mx_offset_to_member_page(xid) / pg_constants::SLRU_PAGES_PER_SEGMENT) as i32 }
36.313253
100
0.723623
2ff185aa56d350274c97b301703a33c50ae8b825
9,241
use crate::prelude::Day; use chrono::prelude::*; /// The amount of Chalakim in an hour. pub(crate) const CHALAKIM_PER_HOUR: u64 = 1080; /// The amount of Chalakim between two Molads. // See https://www.chabad.org/library/article_cdo/aid/947923/jewish/Kiddush-HaChodesh-Chapter-Six.htm#bartnoteRef8a947923 pub(crate) const CHALAKIM_BETWEEN_MOLAD: u64 = 29 * 24 * CHALAKIM_PER_HOUR + 12 * CHALAKIM_PER_HOUR + 793; //An array documenting which years are leap years. The Hebrew calendar has a 19 year cycle of leap //years. const LEAP_YEARS: [bool; 19] = [ false, false, true, false, false, true, false, true, false, false, true, false, false, true, false, false, true, false, true, ]; // There are three starting dates. Right now, we don't work with negative Gregorian dates, so the // Epoch period is the first year of the first 19 year cycle after year 0. // // 1. Epoch - this is the first day, is on 6:00 PM Shabbos (Saturay) afternoon. // 2. FIRST_MOLAD - the amount of Chalakim from Epoch to the first Molad -(Tishrei 3673). It was on Monday, September 23rd at 12:16:6 Chalakim // 3. FIRST_YEAR: Self described - this is the first Hebrew calendar since the epoch. // 4. FIRST_RH: The first Rosh Hashana since the Epoch. pub(crate) const FIRST_MOLAD: u64 = 24 * 1080 + 18 * 1080 + (16 * 1080 / 60) + 6; pub(crate) const FIRST_YEAR: u64 = 3763; lazy_static! { pub(crate) static ref FIRST_RH: chrono::DateTime<Utc> = Utc.ymd(2, 9, 23).and_hms(18, 0, 0); pub(crate) static ref EPOCH: chrono::DateTime<Utc> = Utc.ymd(2, 9, 21).and_hms(18, 0, 0); } // Return the correct schedule for they year. There can be only six possible amount of days, so // short of a bug on my part, this should never panic. pub(crate) fn return_year_sched(days: u64) -> usize { match days { 353 => 0, 354 => 1, 355 => 2, 383 => 3, 384 => 4, 385 => 5, _ => panic!(format!("Wrong amount of days {}", days)), } } pub(crate) const YEAR_SCHED: [[u8; 14]; 6] = [ [30, 29, 29, 29, 30, 29, 0, 0, 30, 29, 30, 29, 30, 29], [30, 29, 30, 29, 30, 29, 0, 0, 30, 29, 30, 29, 30, 29], [30, 30, 30, 29, 30, 29, 0, 0, 30, 29, 30, 29, 30, 29], [30, 29, 29, 29, 30, 0, 30, 29, 30, 29, 30, 29, 30, 29], [30, 29, 30, 29, 30, 0, 30, 29, 30, 29, 30, 29, 30, 29], [30, 30, 30, 29, 30, 0, 30, 29, 30, 29, 30, 29, 30, 29], ]; //This calculates the amount of Chalakim per 19 year cycle. pub(crate) const AMNT_CHALAKIM_PER_CYCLE: u64 = 7 * 13 * CHALAKIM_BETWEEN_MOLAD + 12 * 12 * CHALAKIM_BETWEEN_MOLAD; fn get_molad_for_year(year: u64) -> u64 { let amnt_of_cycles = (year - FIRST_YEAR) / 19; let mut amnt_chalakim = AMNT_CHALAKIM_PER_CYCLE * amnt_of_cycles; let cur_year_in_cycle = (year - FIRST_YEAR) % 19; for i in 0..cur_year_in_cycle { amnt_chalakim += if LEAP_YEARS[i as usize] { 13 } else { 12 } * CHALAKIM_BETWEEN_MOLAD; } amnt_chalakim } //Does short calculation if this year is a leap year. pub(crate) fn months_per_year(year: u64) -> u64 { let year_in_cycle = ((year - FIRST_YEAR) % 19) as usize; if LEAP_YEARS[year_in_cycle] { 13 } else { 12 } } //Calculate how many Chalakim between Epoch and Rosh Hashana, and which day of the week does it //fall out on. pub(crate) fn get_rosh_hashana(year: u64) -> (u64, Day, u64) { let amnt_chalakim_since_first_molad = get_molad_for_year(year); let amnt_chalakim_since_epoch = amnt_chalakim_since_first_molad + FIRST_MOLAD; let mut amnt_days = amnt_chalakim_since_epoch / (CHALAKIM_PER_HOUR * 24); let amnt_chalakim = amnt_chalakim_since_epoch % (CHALAKIM_PER_HOUR * 24); let mut reg_postpone = false; //If the Molad is in the afternoon, postpone Rosh Hashana by a day if amnt_chalakim > 18 * CHALAKIM_PER_HOUR { amnt_days += 1; reg_postpone = true; } //This shouldn't panic, as there are seven options in Day (seven days in week). let mut dow = Day::from((amnt_days) % 7); // Lo Adu Rosh if dow == Day::Sunday || dow == Day::Wednesday || dow == Day::Friday { amnt_days += 1; reg_postpone = true; } // See Hilchos Kiddush HaChodesh Halacha 4 if !reg_postpone && dow == Day::Tuesday && amnt_chalakim > 9 * CHALAKIM_PER_HOUR + 204 && months_per_year(year) == 12 { amnt_days += 2; } if !reg_postpone && months_per_year(year - 1) == 13 && dow == Day::Monday && amnt_chalakim > 12 * CHALAKIM_PER_HOUR + 3 * CHALAKIM_PER_HOUR + 589 { amnt_days += 1; } //This shouldn't panic, as there are seven options in Day (seven days in week). dow = Day::from((amnt_days) % 7); (amnt_days, dow, amnt_chalakim_since_first_molad) } pub(crate) fn day_of_last_rh(days_since_first_rh: u64) -> u64 { let mut cur_year = (FIRST_YEAR) + 19 * days_since_first_rh / 6956; if get_rosh_hashana(cur_year).0 > days_since_first_rh { panic!("get_rosh_hashana(cur_year).0 < days_since_first_rh "); } while get_rosh_hashana(cur_year + 1).0 <= days_since_first_rh { cur_year += 1; } cur_year } #[cfg(test)] mod tests { use crate::convert::HebrewDate; use crate::prelude::*; use chrono::Duration; use std::num::NonZeroI8; use super::*; #[test] fn years_correct_sum() { assert_eq!(YEAR_SCHED[0].iter().map(|x| (*x) as u64).sum::<u64>(), 353); assert_eq!(YEAR_SCHED[1].iter().map(|x| (*x) as u64).sum::<u64>(), 354); assert_eq!(YEAR_SCHED[2].iter().map(|x| (*x) as u64).sum::<u64>(), 355); assert_eq!(YEAR_SCHED[3].iter().map(|x| (*x) as u64).sum::<u64>(), 383); assert_eq!(YEAR_SCHED[4].iter().map(|x| (*x) as u64).sum::<u64>(), 384); assert_eq!(YEAR_SCHED[5].iter().map(|x| (*x) as u64).sum::<u64>(), 385); } #[test] fn years_have_right_days() { use rayon; use rayon::prelude::*; ((FIRST_YEAR + 1)..1000000) .into_par_iter() .map(|i| { let amnt_days_between_rh_and_epoch = get_rosh_hashana(i).0; let amnt_days_in_year = get_rosh_hashana(i + 1).0 - amnt_days_between_rh_and_epoch; return_year_sched(amnt_days_in_year); }) .count(); } #[test] fn compare_hebrew_day_elul_sanity_check() { let mut orig_date = Utc.ymd(1901, 8, 15).and_hms(18, 0, 0); for j in 1..=29 { let heb_day = HebrewDate::from_ymd(5661, HebrewMonth::Elul, NonZeroI8::new(j).unwrap()).unwrap(); let back = heb_day.to_gregorian(); println!("{}", j); assert_eq!(orig_date, back); orig_date = orig_date + Duration::days(1); } } #[test] fn compare_hebrew_day_tishrei_sanity_check() { let mut orig_date = Utc.ymd(1900, 9, 23).and_hms(18, 0, 0); for j in 1..=30 { let heb_day = HebrewDate::from_ymd(5661, HebrewMonth::Tishrei, NonZeroI8::new(j).unwrap()) .unwrap(); let back = heb_day.to_gregorian(); println!("{}", j); assert_eq!(orig_date, back); orig_date = orig_date + Duration::days(1); } } #[test] fn compare_hebrew_day_adar1_sanity_check() { let mut orig_date = Utc.ymd(1900, 1, 30).and_hms(18, 0, 0); for j in 1..=30 { let heb_day = HebrewDate::from_ymd(5660, HebrewMonth::Adar1, NonZeroI8::new(j).unwrap()).unwrap(); let back = heb_day.to_gregorian(); println!("{}", j); assert_eq!(orig_date, back); orig_date = orig_date + Duration::days(1); } } #[test] fn test_rh_against_working_list() { test_against_working_list("RoshHashanaList", 1, HebrewMonth::Tishrei); } #[test] fn test_adar1_against_working_list() { test_against_working_list("Adar1List", 1, HebrewMonth::Adar1); } fn test_against_working_list(filename: &str, day: u8, month: HebrewMonth) { let file_contents = std::fs::read_to_string(format!("./testing/{}", filename)).unwrap(); file_contents .split("\n") .filter(|x| *x != "") .for_each(|x| { let res = x.split(" ").collect::<Vec<&str>>(); if res.len() != 1 { let eng_day = HebrewDate::from_ymd( res[0].parse::<u64>().unwrap(), month, NonZeroI8::new(day as i8).unwrap(), ) .unwrap() .to_gregorian() + Duration::days(1); println!("{:?}", eng_day); let sp = res[1].split("/").collect::<Vec<&str>>(); let (month, day, year) = (sp[0], sp[1], sp[2]); assert_eq!(month.parse::<u64>().unwrap() as u32, eng_day.month()); assert_eq!(day.parse::<u64>().unwrap() as u32, eng_day.day()); assert_eq!(year.parse::<u64>().unwrap() as i32, eng_day.year()); } }); } }
38.028807
142
0.584352
188f2b348e345d2541486225f9d29495fc0920af
4,277
// Re-export only the actual code, and then only use this re-export // The `generated` module below is just some boilerplate to properly isolate stuff // and avoid exposing internal details. // // You can use all the types from my_protocol as if they went from `wayland_client::protocol`. pub use generated::server::wl_drm; mod generated { // The generated code tends to trigger a lot of warnings // so we isolate it into a very permissive module #![allow(dead_code,non_camel_case_types,unused_unsafe,unused_variables)] #![allow(non_upper_case_globals,non_snake_case,unused_imports)] pub mod server { use smithay::reexports::{wayland_commons, wayland_server}; // These imports are used by the generated code pub(crate) use wayland_server::{Main, AnonymousObject, Resource, ResourceMap}; pub(crate) use wayland_commons::map::{Object, ObjectMetadata}; pub(crate) use wayland_commons::{Interface, MessageGroup}; pub(crate) use wayland_commons::wire::{Argument, MessageDesc, ArgumentType, Message}; pub(crate) use wayland_commons::smallvec; pub(crate) use wayland_server::sys; pub(crate) use wayland_server::protocol::wl_buffer; include!(concat!(env!("OUT_DIR"), "/wl_drm.rs")); } } use smithay::{ backend::allocator::{ Format, Fourcc, Modifier, dmabuf::{Dmabuf, DmabufFlags}, }, reexports::wayland_server::{Client, Display, Filter, Global, Main}, }; use std::{ convert::TryFrom, path::PathBuf, }; pub fn init_wl_drm_global<F>( display: &mut Display, device_path: PathBuf, mut formats: Vec<Format>, client_filter: F, ) -> Global<wl_drm::WlDrm> where F: FnMut(Client) -> bool + 'static { formats.dedup_by(|f1, f2| f1.code == f2.code); let global = Filter::new(move |(drm, version): (Main<wl_drm::WlDrm>, u32), _, _| { drm.quick_assign(move |drm, req, _| { match req { wl_drm::Request::Authenticate { .. } => drm.authenticated(), wl_drm::Request::CreateBuffer { id, .. } => { id.as_ref().post_error(wl_drm::Error::InvalidName.to_raw(), String::from("Flink handles are unsupported, use PRIME")); }, wl_drm::Request::CreatePlanarBuffer { id, .. } => { id.as_ref().post_error(wl_drm::Error::InvalidName.to_raw(), String::from("Flink handles are unsupported, use PRIME")); }, wl_drm::Request::CreatePrimeBuffer { id, name, width, height, format, offset0, stride0, .. } => { let format = match Fourcc::try_from(format) { Ok(format) => format, Err(_) => { id.as_ref().post_error(wl_drm::Error::InvalidFormat.to_raw(), String::from("Format not advertised by wl_drm")); return; } }; if width < 1 || height < 1 { id.as_ref().post_error(wl_drm::Error::InvalidFormat.to_raw(), String::from("width or height not positive")); return; } let mut dma = Dmabuf::builder((width, height), format, DmabufFlags::empty()); dma.add_plane(name, 0, offset0 as u32, stride0 as u32, Modifier::Invalid); id.as_ref().user_data().set_threadsafe(|| dma.build().unwrap()); id.quick_assign(|_, _, _| {}); slog_scope::trace!("Created a new validated dma wl_buffer via wl_drm."); }, } }); drm.device(device_path.to_string_lossy().into_owned()); if version >= 2 { drm.capabilities(wl_drm::Capability::Prime.to_raw()); } for format in &formats { if let Some(converted) = wl_drm::Format::from_raw(format.code as u32) { drm.format(converted.to_raw()); } } }); display.create_global_with_filter(2, global, client_filter) }
40.733333
139
0.559037
f977cb47cd9c0d52be736ef7b8738a290453eeef
1,705
use iron::status; use iron::{BeforeMiddleware, IronError, IronResult, Request, Response}; use crate::util::StringError; pub struct AuthChecker { username: String, password: String, } impl AuthChecker { pub fn new(s: &str) -> AuthChecker { let parts = s.splitn(2, ':').collect::<Vec<&str>>(); AuthChecker { username: parts[0].to_owned(), password: parts[1].to_owned(), } } } impl BeforeMiddleware for AuthChecker { fn before(&self, req: &mut Request) -> IronResult<()> { use iron::headers::{Authorization, Basic}; match req.headers.get::<Authorization<Basic>>() { Some(&Authorization(Basic { ref username, ref password, })) => { if username == self.username.as_str() && password == &Some(self.password.clone()) { Ok(()) } else { Err(IronError { error: Box::new(StringError("authorization error".to_owned())), response: Response::with(( status::Unauthorized, "Wrong username or password.", )), }) } } None => { let mut resp = Response::with(status::Unauthorized); resp.headers .set_raw("WWW-Authenticate", vec![b"Basic realm=\"main\"".to_vec()]); Err(IronError { error: Box::new(StringError("authorization error".to_owned())), response: resp, }) } } } }
31.574074
99
0.469795
1aa2d0347e6139f3631fd66c4419ba5080157571
2,148
use crate::{client::Client, request::Request, response::ResponseFuture, routing::Route}; use twilight_model::invite::Invite; struct GetInviteFields { with_counts: bool, with_expiration: bool, } /// Get information about an invite by its code. /// /// If [`with_counts`] is called, the returned invite will contain approximate /// member counts. If [`with_expiration`] is called, it will contain the /// expiration date. /// /// # Examples /// /// ```rust,no_run /// use twilight_http::Client; /// /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn std::error::Error>> { /// let client = Client::new("my token".to_owned()); /// /// let invite = client /// .invite("code") /// .with_counts() /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// [`with_counts`]: Self::with_counts /// [`with_expiration`]: Self::with_expiration #[must_use = "requests must be configured and executed"] pub struct GetInvite<'a> { code: &'a str, fields: GetInviteFields, http: &'a Client, } impl<'a> GetInvite<'a> { pub(crate) const fn new(http: &'a Client, code: &'a str) -> Self { Self { code, fields: GetInviteFields { with_counts: false, with_expiration: false, }, http, } } /// Whether the invite returned should contain approximate member counts. pub const fn with_counts(mut self) -> Self { self.fields.with_counts = true; self } /// Whether the invite returned should contain its expiration date. pub const fn with_expiration(mut self) -> Self { self.fields.with_expiration = true; self } /// Execute the request, returning a future resolving to a [`Response`]. /// /// [`Response`]: crate::response::Response pub fn exec(self) -> ResponseFuture<Invite> { let request = Request::from_route(&Route::GetInviteWithExpiration { code: self.code, with_counts: self.fields.with_counts, with_expiration: self.fields.with_expiration, }); self.http.request(request) } }
26.85
88
0.599162
61b96feb7e13cd26840a8ef1cbd163773ab829c6
1,209
use failure::Error; use utility; pub fn rosalind_ba3c(filename: &str) -> Result<(), Error> { let contents = utility::io::input_from_file(filename)?; let kmers: Vec<_> = contents.split('\n').collect(); let overlap_graph = get_overlap_graph(&kmers, kmers[0].len() - 1); for (kmer_1, kmer_2) in overlap_graph { println!("{} -> {}", kmer_1, kmer_2); } Ok(()) } pub fn get_overlap_graph(sequences: &[&str], overlap_length: usize) -> Vec<(String, String)> { let nodes = sequences .iter() .map(|sequence| { let length = sequence.len(); ( *sequence, &sequence[0..overlap_length], &sequence[(length - overlap_length)..], ) }) .collect::<Vec<(&str, &str, &str)>>(); let mut edges = Vec::new(); for i in 0..nodes.len() { for j in 0..nodes.len() { if i != j { let (key_0, _, suffix_0) = nodes[i]; let (key_1, prefix_1, _) = nodes[j]; if suffix_0 == prefix_1 { edges.push((key_0.to_owned(), key_1.to_owned())); } } } } edges }
29.487805
94
0.490488
4800d3c9bb7abeb7314874f9982b47a798bdf688
9,223
use std::fmt; use std::convert::From; use std::borrow::Cow; use yansi::Paint; use crate::codegen::StaticRouteInfo; use crate::handler::Handler; use crate::http::{uri, Method, MediaType}; use crate::router::RouteUri; /// A route: a method, its handler, path, rank, and format/media type. #[derive(Clone)] pub struct Route { /// The name of this route, if one was given. pub name: Option<Cow<'static, str>>, /// The method this route matches against. pub method: Method, /// The function that should be called when the route matches. pub handler: Box<dyn Handler>, /// The route URI. pub uri: RouteUri<'static>, /// The rank of this route. Lower ranks have higher priorities. pub rank: isize, /// The media type this route matches against, if any. pub format: Option<MediaType>, } impl Route { /// Creates a new route with the given method, path, and handler with a base /// of `/`. /// /// # Ranking /// /// The default rank prefers static components over dynamic components in /// both paths and queries: the _more_ static a route's path and query are, /// the higher its precedence. /// /// There are three "colors" to paths and queries: /// 1. `static`, meaning all components are static /// 2. `partial`, meaning at least one component is dynamic /// 3. `wild`, meaning all components are dynamic /// /// Static paths carry more weight than static queries. The same is true for /// partial and wild paths. This results in the following default ranking /// table: /// /// | path | query | rank | /// |---------|---------|------| /// | static | static | -12 | /// | static | partial | -11 | /// | static | wild | -10 | /// | static | none | -9 | /// | partial | static | -8 | /// | partial | partial | -7 | /// | partial | wild | -6 | /// | partial | none | -5 | /// | wild | static | -4 | /// | wild | partial | -3 | /// | wild | wild | -2 | /// | wild | none | -1 | /// /// Note that _lower_ ranks have _higher_ precedence. /// /// # Example /// /// ```rust /// use rocket::Route; /// use rocket::http::Method; /// # use rocket::handler::{dummy as handler}; /// /// macro_rules! assert_rank { /// ($($uri:expr => $rank:expr,)*) => {$( /// let route = Route::new(Method::Get, $uri, handler); /// assert_eq!(route.rank, $rank); /// )*} /// } /// /// assert_rank! { /// "/?foo" => -12, // static path, static query /// "/foo/bar?a=b&bob" => -12, // static path, static query /// "/?a=b&bob" => -12, // static path, static query /// /// "/?a&<zoo..>" => -11, // static path, partial query /// "/foo?a&<zoo..>" => -11, // static path, partial query /// "/?a&<zoo>" => -11, // static path, partial query /// /// "/?<zoo..>" => -10, // static path, wild query /// "/foo?<zoo..>" => -10, // static path, wild query /// "/foo?<a>&<b>" => -10, // static path, wild query /// /// "/" => -9, // static path, no query /// "/foo/bar" => -9, // static path, no query /// /// "/a/<b>?foo" => -8, // partial path, static query /// "/a/<b..>?foo" => -8, // partial path, static query /// "/<a>/b?foo" => -8, // partial path, static query /// /// "/a/<b>?<b>&c" => -7, // partial path, partial query /// "/a/<b..>?a&<c..>" => -7, // partial path, partial query /// /// "/a/<b>?<c..>" => -6, // partial path, wild query /// "/a/<b..>?<c>&<d>" => -6, // partial path, wild query /// "/a/<b..>?<c>" => -6, // partial path, wild query /// /// "/a/<b>" => -5, // partial path, no query /// "/<a>/b" => -5, // partial path, no query /// "/a/<b..>" => -5, // partial path, no query /// /// "/<b>/<c>?foo&bar" => -4, // wild path, static query /// "/<a>/<b..>?foo" => -4, // wild path, static query /// "/<b..>?cat" => -4, // wild path, static query /// /// "/<b>/<c>?<foo>&bar" => -3, // wild path, partial query /// "/<a>/<b..>?a&<b..>" => -3, // wild path, partial query /// "/<b..>?cat&<dog>" => -3, // wild path, partial query /// /// "/<b>/<c>?<foo>" => -2, // wild path, wild query /// "/<a>/<b..>?<b..>" => -2, // wild path, wild query /// "/<b..>?<c>&<dog>" => -2, // wild path, wild query /// /// "/<b>/<c>" => -1, // wild path, no query /// "/<a>/<b..>" => -1, // wild path, no query /// "/<b..>" => -1, // wild path, no query /// } /// ``` /// /// # Panics /// /// Panics if `path` is not a valid origin URI or Rocket route URI. pub fn new<H: Handler>(method: Method, uri: &str, handler: H) -> Route { Route::ranked(None, method, uri, handler) } /// Creates a new route with the given rank, method, path, and handler with /// a base of `/`. /// /// # Example /// /// ```rust /// use rocket::Route; /// use rocket::http::Method; /// # use rocket::handler::{dummy as handler}; /// /// // this is a rank 1 route matching requests to `GET /` /// let index = Route::ranked(1, Method::Get, "/", handler); /// ``` /// /// # Panics /// /// Panics if `path` is not a valid origin URI or Rocket route URI. pub fn ranked<H, R>(rank: R, method: Method, uri: &str, handler: H) -> Route where H: Handler + 'static, R: Into<Option<isize>>, { let uri = RouteUri::new("/", uri); let rank = rank.into().unwrap_or_else(|| uri.default_rank()); Route { name: None, format: None, handler: Box::new(handler), rank, uri, method, } } /// Maps the `base` of this route using `mapper`, returning a new `Route` /// with the returned base. /// /// `mapper` is called with the current base. The returned `String` is used /// as the new base if it is a valid URI. If the returned base URI contains /// a query, it is ignored. Returns an error if the base produced by /// `mapper` is not a valid origin URI. /// /// # Example /// /// ```rust /// use rocket::Route; /// use rocket::http::{Method, uri::Origin}; /// # use rocket::handler::{dummy as handler, Outcome, HandlerFuture}; /// /// let index = Route::new(Method::Get, "/foo/bar", handler); /// assert_eq!(index.uri.base(), "/"); /// assert_eq!(index.uri.unmounted_origin.path(), "/foo/bar"); /// assert_eq!(index.uri.path(), "/foo/bar"); /// /// let index = index.map_base(|base| format!("{}{}", "/boo", base)).unwrap(); /// assert_eq!(index.uri.base(), "/boo"); /// assert_eq!(index.uri.unmounted_origin.path(), "/foo/bar"); /// assert_eq!(index.uri.path(), "/boo/foo/bar"); /// ``` pub fn map_base<'a, F>(mut self, mapper: F) -> Result<Self, uri::Error<'static>> where F: FnOnce(uri::Origin<'a>) -> String { let base = mapper(self.uri.base); self.uri = RouteUri::try_new(&base, &self.uri.unmounted_origin.to_string())?; Ok(self) } } impl fmt::Display for Route { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if let Some(ref n) = self.name { write!(f, "{}{}{} ", Paint::cyan("("), Paint::white(n), Paint::cyan(")"))?; } write!(f, "{} ", Paint::green(&self.method))?; if self.uri.base() != "/" { write!(f, "{}", Paint::blue(self.uri.base()).underline())?; } write!(f, "{}", Paint::blue(&self.uri.unmounted_origin))?; if self.rank > 1 { write!(f, " [{}]", Paint::default(&self.rank).bold())?; } if let Some(ref format) = self.format { write!(f, " {}", Paint::yellow(format))?; } Ok(()) } } impl fmt::Debug for Route { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Route") .field("name", &self.name) .field("method", &self.method) .field("uri", &self.uri) .field("rank", &self.rank) .field("format", &self.format) .finish() } } #[doc(hidden)] impl From<StaticRouteInfo> for Route { fn from(info: StaticRouteInfo) -> Route { // This should never panic since `info.path` is statically checked. let mut route = Route::new(info.method, info.path, info.handler); route.format = info.format; route.name = Some(info.name.into()); if let Some(rank) = info.rank { route.rank = rank; } route } }
36.892
87
0.47902
89262ac0573f27cda91cab3a8a702fc33d6dcc60
12,203
#![macro_use] use core::marker::PhantomData; use core::ptr; use core::task::Poll; use embassy::interrupt::{Interrupt, InterruptExt}; use embassy::util::Unborrow; use embassy_hal_common::drop::DropBomb; use embassy_hal_common::unborrow; use futures::future::poll_fn; use crate::gpio::sealed::Pin as _; use crate::gpio::{self, Pin as GpioPin}; use crate::pac; pub use crate::pac::qspi::ifconfig0::ADDRMODE_A as AddressMode; pub use crate::pac::qspi::ifconfig0::PPSIZE_A as WritePageSize; pub use crate::pac::qspi::ifconfig0::READOC_A as ReadOpcode; pub use crate::pac::qspi::ifconfig0::WRITEOC_A as WriteOpcode; // TODO // - config: // - 32bit address mode // - SPI freq // - SPI sck delay // - Deep power down mode (DPM) // - SPI mode 3 // - activate/deactivate // - set gpio in high drive pub struct DeepPowerDownConfig { /// Time required for entering DPM, in units of 16us pub enter_time: u16, /// Time required for exiting DPM, in units of 16us pub exit_time: u16, } #[non_exhaustive] pub struct Config { pub xip_offset: u32, pub read_opcode: ReadOpcode, pub write_opcode: WriteOpcode, pub write_page_size: WritePageSize, pub deep_power_down: Option<DeepPowerDownConfig>, } impl Default for Config { fn default() -> Self { Self { read_opcode: ReadOpcode::READ4IO, write_opcode: WriteOpcode::PP4IO, xip_offset: 0, write_page_size: WritePageSize::_256BYTES, deep_power_down: None, } } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] #[non_exhaustive] pub enum Error { // TODO add "not in data memory" error and check for it } pub struct Qspi<'d, T: Instance> { dpm_enabled: bool, phantom: PhantomData<&'d mut T>, } impl<'d, T: Instance> Qspi<'d, T> { pub async fn new( _qspi: impl Unborrow<Target = T> + 'd, irq: impl Unborrow<Target = T::Interrupt> + 'd, sck: impl Unborrow<Target = impl GpioPin> + 'd, csn: impl Unborrow<Target = impl GpioPin> + 'd, io0: impl Unborrow<Target = impl GpioPin> + 'd, io1: impl Unborrow<Target = impl GpioPin> + 'd, io2: impl Unborrow<Target = impl GpioPin> + 'd, io3: impl Unborrow<Target = impl GpioPin> + 'd, config: Config, ) -> Qspi<'d, T> { unborrow!(irq, sck, csn, io0, io1, io2, io3); let r = T::regs(); let sck = sck.degrade(); let csn = csn.degrade(); let io0 = io0.degrade(); let io1 = io1.degrade(); let io2 = io2.degrade(); let io3 = io3.degrade(); for pin in [&sck, &csn, &io0, &io1, &io2, &io3] { pin.set_high(); pin.conf().write(|w| w.dir().output().drive().h0h1()); } r.psel.sck.write(|w| unsafe { w.bits(sck.psel_bits()) }); r.psel.csn.write(|w| unsafe { w.bits(csn.psel_bits()) }); r.psel.io0.write(|w| unsafe { w.bits(io0.psel_bits()) }); r.psel.io1.write(|w| unsafe { w.bits(io1.psel_bits()) }); r.psel.io2.write(|w| unsafe { w.bits(io2.psel_bits()) }); r.psel.io3.write(|w| unsafe { w.bits(io3.psel_bits()) }); r.ifconfig0.write(|w| { w.addrmode().variant(AddressMode::_24BIT); w.dpmenable().bit(config.deep_power_down.is_some()); w.ppsize().variant(config.write_page_size); w.readoc().variant(config.read_opcode); w.writeoc().variant(config.write_opcode); w }); if let Some(dpd) = &config.deep_power_down { r.dpmdur.write(|w| unsafe { w.enter().bits(dpd.enter_time); w.exit().bits(dpd.exit_time); w }) } r.ifconfig1.write(|w| unsafe { w.sckdelay().bits(80); w.dpmen().exit(); w.spimode().mode0(); w.sckfreq().bits(3); w }); r.xipoffset.write(|w| unsafe { w.xipoffset().bits(config.xip_offset); w }); irq.set_handler(Self::on_interrupt); irq.unpend(); irq.enable(); // Enable it r.enable.write(|w| w.enable().enabled()); let mut res = Self { dpm_enabled: config.deep_power_down.is_some(), phantom: PhantomData, }; r.events_ready.reset(); r.intenset.write(|w| w.ready().set()); r.tasks_activate.write(|w| w.tasks_activate().bit(true)); res.wait_ready().await; res } fn on_interrupt(_: *mut ()) { let r = T::regs(); let s = T::state(); if r.events_ready.read().bits() != 0 { s.ready_waker.wake(); r.intenclr.write(|w| w.ready().clear()); } } pub async fn custom_instruction( &mut self, opcode: u8, req: &[u8], resp: &mut [u8], ) -> Result<(), Error> { let bomb = DropBomb::new(); assert!(req.len() <= 8); assert!(resp.len() <= 8); let mut dat0: u32 = 0; let mut dat1: u32 = 0; for i in 0..4 { if i < req.len() { dat0 |= (req[i] as u32) << (i * 8); } } for i in 0..4 { if i + 4 < req.len() { dat1 |= (req[i + 4] as u32) << (i * 8); } } let len = core::cmp::max(req.len(), resp.len()) as u8; let r = T::regs(); r.cinstrdat0.write(|w| unsafe { w.bits(dat0) }); r.cinstrdat1.write(|w| unsafe { w.bits(dat1) }); r.events_ready.reset(); r.intenset.write(|w| w.ready().set()); r.cinstrconf.write(|w| { let w = unsafe { w.opcode().bits(opcode) }; let w = unsafe { w.length().bits(len + 1) }; let w = w.lio2().bit(true); let w = w.lio3().bit(true); let w = w.wipwait().bit(true); let w = w.wren().bit(true); let w = w.lfen().bit(false); let w = w.lfstop().bit(false); w }); self.wait_ready().await; let r = T::regs(); let dat0 = r.cinstrdat0.read().bits(); let dat1 = r.cinstrdat1.read().bits(); for i in 0..4 { if i < resp.len() { resp[i] = (dat0 >> (i * 8)) as u8; } } for i in 0..4 { if i + 4 < resp.len() { resp[i] = (dat1 >> (i * 8)) as u8; } } bomb.defuse(); Ok(()) } async fn wait_ready(&mut self) { poll_fn(move |cx| { let r = T::regs(); let s = T::state(); s.ready_waker.register(cx.waker()); if r.events_ready.read().bits() != 0 { return Poll::Ready(()); } Poll::Pending }) .await } pub async fn read(&mut self, address: usize, data: &mut [u8]) -> Result<(), Error> { let bomb = DropBomb::new(); assert_eq!(data.as_ptr() as u32 % 4, 0); assert_eq!(data.len() as u32 % 4, 0); assert_eq!(address as u32 % 4, 0); let r = T::regs(); r.read .src .write(|w| unsafe { w.src().bits(address as u32) }); r.read .dst .write(|w| unsafe { w.dst().bits(data.as_ptr() as u32) }); r.read .cnt .write(|w| unsafe { w.cnt().bits(data.len() as u32) }); r.events_ready.reset(); r.intenset.write(|w| w.ready().set()); r.tasks_readstart.write(|w| w.tasks_readstart().bit(true)); self.wait_ready().await; bomb.defuse(); Ok(()) } pub async fn write(&mut self, address: usize, data: &[u8]) -> Result<(), Error> { let bomb = DropBomb::new(); assert_eq!(data.as_ptr() as u32 % 4, 0); assert_eq!(data.len() as u32 % 4, 0); assert_eq!(address as u32 % 4, 0); let r = T::regs(); r.write .src .write(|w| unsafe { w.src().bits(data.as_ptr() as u32) }); r.write .dst .write(|w| unsafe { w.dst().bits(address as u32) }); r.write .cnt .write(|w| unsafe { w.cnt().bits(data.len() as u32) }); r.events_ready.reset(); r.intenset.write(|w| w.ready().set()); r.tasks_writestart.write(|w| w.tasks_writestart().bit(true)); self.wait_ready().await; bomb.defuse(); Ok(()) } pub async fn erase(&mut self, address: usize) -> Result<(), Error> { let bomb = DropBomb::new(); assert_eq!(address as u32 % 4096, 0); let r = T::regs(); r.erase .ptr .write(|w| unsafe { w.ptr().bits(address as u32) }); r.erase.len.write(|w| w.len()._4kb()); r.events_ready.reset(); r.intenset.write(|w| w.ready().set()); r.tasks_erasestart.write(|w| w.tasks_erasestart().bit(true)); self.wait_ready().await; bomb.defuse(); Ok(()) } } impl<'d, T: Instance> Drop for Qspi<'d, T> { fn drop(&mut self) { let r = T::regs(); if self.dpm_enabled { trace!("qspi: doing deep powerdown..."); r.ifconfig1.modify(|_, w| w.dpmen().enter()); // Wait for DPM enter. // Unfortunately we must spin. There's no way to do this interrupt-driven. // The READY event does NOT fire on DPM enter (but it does fire on DPM exit :shrug:) while r.status.read().dpm().is_disabled() {} // Wait MORE for DPM enter. // I have absolutely no idea why, but the wait above is not enough :'( // Tested with mx25r64 in nrf52840-dk, and with mx25r16 in custom board cortex_m::asm::delay(4096); } // it seems events_ready is not generated in response to deactivate. nrfx doesn't wait for it. r.tasks_deactivate.write(|w| w.tasks_deactivate().set_bit()); // Workaround https://infocenter.nordicsemi.com/topic/errata_nRF52840_Rev1/ERR/nRF52840/Rev1/latest/anomaly_840_122.html?cp=4_0_1_2_1_7 // Note that the doc has 2 register writes, but the first one is really the write to tasks_deactivate, // so we only do the second one here. unsafe { ptr::write_volatile(0x40029054 as *mut u32, 1) } r.enable.write(|w| w.enable().disabled()); // Note: we do NOT deconfigure CSN here. If DPM is in use and we disconnect CSN, // leaving it floating, the flash chip might read it as zero which would cause it to // spuriously exit DPM. gpio::deconfigure_pin(r.psel.sck.read().bits()); gpio::deconfigure_pin(r.psel.io0.read().bits()); gpio::deconfigure_pin(r.psel.io1.read().bits()); gpio::deconfigure_pin(r.psel.io2.read().bits()); gpio::deconfigure_pin(r.psel.io3.read().bits()); trace!("qspi: dropped"); } } pub(crate) mod sealed { use embassy::waitqueue::AtomicWaker; use super::*; pub struct State { pub ready_waker: AtomicWaker, } impl State { pub const fn new() -> Self { Self { ready_waker: AtomicWaker::new(), } } } pub trait Instance { fn regs() -> &'static pac::qspi::RegisterBlock; fn state() -> &'static State; } } pub trait Instance: Unborrow<Target = Self> + sealed::Instance + 'static { type Interrupt: Interrupt; } macro_rules! impl_qspi { ($type:ident, $pac_type:ident, $irq:ident) => { impl crate::qspi::sealed::Instance for peripherals::$type { fn regs() -> &'static pac::qspi::RegisterBlock { unsafe { &*pac::$pac_type::ptr() } } fn state() -> &'static crate::qspi::sealed::State { static STATE: crate::qspi::sealed::State = crate::qspi::sealed::State::new(); &STATE } } impl crate::qspi::Instance for peripherals::$type { type Interrupt = crate::interrupt::$irq; } }; }
29.334135
143
0.525199
0a903e2018cc11f9e91656e6f9ff49729c78838e
1,950
mod array_value; mod midi_event_value; mod midi_value; mod num_value; mod tuple_value; pub use self::array_value::{ArrayValue, ARRAY_CAPACITY}; pub use self::midi_event_value::MidiEventValue; pub use self::midi_value::MidiValue; pub use self::num_value::NumValue; pub use self::tuple_value::TupleValue; use crate::mir::{ConstantValue, VarType}; use inkwell::context::Context; use inkwell::types::{BasicType, StructType}; use inkwell::values::{BasicValue, BasicValueEnum}; pub fn remap_type(context: &Context, mir_type: &VarType) -> StructType { match mir_type { VarType::Num => NumValue::get_type(context), VarType::Midi => MidiValue::get_type(context), VarType::Tuple(inner_types) => { let inner_structs: Vec<_> = inner_types.iter().map(|t| remap_type(context, t)).collect(); let inner_types: Vec<_> = inner_structs.iter().map(|t| t as &BasicType).collect(); TupleValue::get_type(context, &inner_types) } VarType::Array(inner_type) => { ArrayValue::get_type(context, remap_type(context, &inner_type)) } VarType::Void => panic!("Void type cannot be remapped"), } } pub fn pass_type_by_val(mir_type: &VarType) -> bool { match mir_type { VarType::Num => true, _ => false, } } pub fn remap_constant(context: &Context, value: &ConstantValue) -> BasicValueEnum { match value { ConstantValue::Num(num) => { NumValue::get_const(context, num.left, num.right, num.form as u8).into() } ConstantValue::Tuple(tuple) => { let values: Vec<_> = tuple .items .iter() .map(|val| remap_constant(context, val)) .collect(); let value_refs: Vec<_> = values.iter().map(|val| val as &BasicValue).collect(); TupleValue::get_const(context, &value_refs).into() } } }
33.62069
94
0.618462
1aade3ba30e498cb2ebd50e77545d274d3615ccb
8,990
//! Rust FFI bindings for StarkWare's [crypto-cpp](https://github.com/starkware-libs/crypto-cpp) library. use num_bigint::BigInt; use num_integer::Integer; use num_traits::{One, Zero}; const CURVE_ORDER_LE: [u8; 32] = [ 47, 77, 198, 173, 65, 162, 102, 30, 50, 178, 231, 202, 109, 18, 129, 183, 255, 255, 255, 255, 255, 255, 255, 255, 16, 0, 0, 0, 0, 0, 0, 8, ]; extern "C" { fn Hash( in1: *const ::std::os::raw::c_char, in2: *const ::std::os::raw::c_char, out: *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int; fn GetPublicKey( private_key: *const ::std::os::raw::c_char, out: *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int; fn Verify( stark_key: *const ::std::os::raw::c_char, msg_hash: *const ::std::os::raw::c_char, r_bytes: *const ::std::os::raw::c_char, w_bytes: *const ::std::os::raw::c_char, ) -> ::std::os::raw::c_int; fn Sign( private_key: *const ::std::os::raw::c_char, message: *const ::std::os::raw::c_char, k: *const ::std::os::raw::c_char, out: *mut ::std::os::raw::c_char, ) -> ::std::os::raw::c_int; } /// Stark ECDSA signature #[derive(Debug, PartialEq, Eq, Clone)] pub struct Signature { /// The `r` value of a signature r: [u8; 32], /// The `r` value of a signature s: [u8; 32], } /// Computes the Starkware version of the Pedersen hash of x and y. All inputs are little-endian. /// /// ### Arguments /// /// * `in1`: The x coordinate in **little endian** format /// * `in2`: The y coordinate in **little endian** format pub fn hash(in1: &[u8; 32], in2: &[u8; 32]) -> Result<[u8; 32], i32> { let mut buffer = [0u8; 1024]; let res = unsafe { Hash( in1.as_ptr() as *const i8, in2.as_ptr() as *const i8, buffer.as_mut_ptr() as *mut i8, ) }; if res == 0 { let mut output = [0u8; 32]; output.copy_from_slice(&buffer[0..32]); Ok(output) } else { Err(res) } } /// Computes the public key given a Stark private key. /// /// ### Arguments /// /// * `private_key`: The private key in **little endian** format pub fn get_public_key(private_key: &[u8; 32]) -> Result<[u8; 32], i32> { let mut buffer = [0u8; 1024]; let res = unsafe { GetPublicKey( private_key.as_ptr() as *const i8, buffer.as_mut_ptr() as *mut i8, ) }; if res == 0 { let mut output = [0u8; 32]; output.copy_from_slice(&buffer[0..32]); Ok(output) } else { Err(res) } } /// Verifies if a signature is valid over a message hash given a Stark public key. /// /// ### Arguments /// /// * `stark_key`: The public key in **little endian** format /// * `msg_hash`: The message hash in **little endian** format /// * `r_bytes`: The `r` value of the signature in **little endian** format /// * `s_bytes`: The `s` value of the signature in **little endian** format pub fn verify( stark_key: &[u8; 32], msg_hash: &[u8; 32], r_bytes: &[u8; 32], s_bytes: &[u8; 32], ) -> bool { let w_bytes = invert_on_curve(s_bytes); let res = unsafe { Verify( stark_key.as_ptr() as *const i8, msg_hash.as_ptr() as *const i8, r_bytes.as_ptr() as *const i8, w_bytes.as_ptr() as *const i8, ) }; res != 0 } /// Computes ECDSA signature given a Stark private key and message hash. /// /// ### Arguments /// /// * `private_key`: The private key in **little endian** format /// * `message`: The message hash in **little endian** format /// * `k`: A random `k` value in **little endian** format. You **MUST NOT** use the same `k` on /// different signatures pub fn sign(private_key: &[u8; 32], message: &[u8; 32], k: &[u8; 32]) -> Result<Signature, i32> { let mut buffer = [0u8; 1024]; let res = unsafe { Sign( private_key.as_ptr() as *const i8, message.as_ptr() as *const i8, k.as_ptr() as *const i8, buffer.as_mut_ptr() as *mut i8, ) }; if res == 0 { let mut output_r = [0u8; 32]; let mut output_w = [0u8; 32]; output_r.copy_from_slice(&buffer[0..32]); output_w.copy_from_slice(&buffer[32..64]); let output_s = invert_on_curve(&output_w); Ok(Signature { r: output_r, s: output_s, }) } else { Err(res) } } fn invert_on_curve(num: &[u8; 32]) -> [u8; 32] { let num = BigInt::from_bytes_le(num_bigint::Sign::Plus, num); let curve_order = BigInt::from_bytes_le(num_bigint::Sign::Plus, &CURVE_ORDER_LE); // Ported from: // https://github.com/dignifiedquire/num-bigint/blob/56576b592fea6341b7e1711a1629e4cc1bfc419c/src/algorithms/mod_inverse.rs#L11 let extended_gcd = num.extended_gcd(&curve_order); if extended_gcd.gcd != BigInt::one() { panic!("GCD must be one"); } let mod_inverse = if extended_gcd.x < BigInt::zero() { extended_gcd.x + curve_order } else { extended_gcd.x }; let (_, buffer) = mod_inverse.to_bytes_le(); let mut result = [0u8; 32]; result[0..buffer.len()].copy_from_slice(&buffer[..]); result } #[cfg(test)] mod tests { use super::*; use hex_literal::hex; // Test cases ported from: // https://github.com/starkware-libs/crypto-cpp/blob/95864fbe11d5287e345432dbe1e80dea3c35fc58/src/starkware/crypto/ffi/crypto_lib_test.go #[test] fn test_hash() { let mut in1 = hex!("03d937c035c878245caf64531a5756109c53068da139362728feb561405371cb"); let mut in2 = hex!("0208a0a10250e382e1e4bbe2880906c2791bf6275695e02fbbc6aeff9cd8b31a"); let mut expected_hash = hex!("030e480bed5fe53fa909cc0f8c4d99b8f9f2c016be4c41e13a4848797979c662"); // Little endian in1.reverse(); in2.reverse(); expected_hash.reverse(); match hash(&in1, &in2) { Ok(output) => assert_eq!(output, expected_hash), Err(err_code) => panic!("Hash() failed with error code: {}", err_code), }; } #[test] fn test_get_public_key() { let mut private_key = hex!("03c1e9550e66958296d11b60f8e8e7a7ad990d07fa65d5f7652c4a6c87d4e3cc"); let mut expected_key = hex!("077a3b314db07c45076d11f62b6f9e748a39790441823307743cf00d6597ea43"); // Little endian private_key.reverse(); expected_key.reverse(); match get_public_key(&private_key) { Ok(output) => assert_eq!(output, expected_key), Err(err_code) => panic!("GetPublicKey() failed with error code: {}", err_code), }; } #[test] fn test_verify_valid_message() { let mut stark_key = hex!("01ef15c18599971b7beced415a40f0c7deacfd9b0d1819e03d723d8bc943cfca"); let mut msg_hash = hex!("0000000000000000000000000000000000000000000000000000000000000002"); let mut r_bytes = hex!("0411494b501a98abd8262b0da1351e17899a0c4ef23dd2f96fec5ba847310b20"); let mut s_bytes = hex!("0405c3191ab3883ef2b763af35bc5f5d15b3b4e99461d70e84c654a351a7c81b"); // Little endian stark_key.reverse(); msg_hash.reverse(); r_bytes.reverse(); s_bytes.reverse(); assert_eq!(verify(&stark_key, &msg_hash, &r_bytes, &s_bytes), true); } #[test] fn test_verify_invalid_message() { let mut stark_key = hex!("077a4b314db07c45076d11f62b6f9e748a39790441823307743cf00d6597ea43"); let mut msg_hash = hex!("0397e76d1667c4454bfb83514e120583af836f8e32a516765497823eabe16a3f"); let mut r_bytes = hex!("0173fd03d8b008ee7432977ac27d1e9d1a1f6c98b1a2f05fa84a21c84c44e882"); let mut s_bytes = hex!("01f2c44a7798f55192f153b4c48ea5c1241fbb69e6132cc8a0da9c5b62a4286e"); // Little endian stark_key.reverse(); msg_hash.reverse(); r_bytes.reverse(); s_bytes.reverse(); assert_eq!(verify(&stark_key, &msg_hash, &r_bytes, &s_bytes), false); } #[test] fn test_sign() { let mut private_key = hex!("0000000000000000000000000000000000000000000000000000000000000001"); let mut message = hex!("0000000000000000000000000000000000000000000000000000000000000002"); let mut k = hex!("0000000000000000000000000000000000000000000000000000000000000003"); // Little endian private_key.reverse(); message.reverse(); k.reverse(); let signature = match sign(&private_key, &message, &k) { Ok(output) => output, Err(err_code) => panic!("Sign() failed with error code: {}", err_code), }; let public_key = get_public_key(&private_key).unwrap(); assert_eq!( verify(&public_key, &message, &signature.r, &signature.s), true ); } }
31.107266
143
0.600111
69d25d64edef720a849b18893d452d260889f7ae
20,595
//! Low-level filesystem operation request. //! //! A request represents information about a filesystem operation the kernel driver wants us to //! perform. use fuse_abi::*; use std::convert::TryFrom; use std::ffi::OsString; use std::{error, fmt, mem}; use super::argument::ArgumentIterator; /// Error that may occur while reading and parsing a request from the kernel driver. #[derive(Debug)] pub enum RequestError { /// Not enough data for parsing header (short read). ShortReadHeader(usize), /// Kernel requested an unknown operation. UnknownOperation(u32), /// Not enough data for arguments (short read). ShortRead(usize, usize), /// Insufficient argument data. InsufficientData, } impl fmt::Display for RequestError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { RequestError::ShortReadHeader(len) => write!(f, "Short read of FUSE request header ({} < {})", len, mem::size_of::<fuse_in_header>()), RequestError::UnknownOperation(opcode) => write!(f, "Unknown FUSE opcode ({})", opcode), RequestError::ShortRead(len, total) => write!(f, "Short read of FUSE request ({} < {})", len, total), RequestError::InsufficientData => write!(f, "Insufficient argument data"), } } } impl error::Error for RequestError {} /// Filesystem operation (and arguments) the kernel driver wants us to perform. The fields of each /// variant needs to match the actual arguments the kernel driver sends for the specific operation. #[derive(Debug)] pub enum Operation { Lookup { name: OsString, }, Forget { arg: fuse_forget_in, }, GetAttr, SetAttr { arg: fuse_setattr_in, }, ReadLink, SymLink { name: OsString, link: OsString, }, MkNod { arg: fuse_mknod_in, name: OsString, }, MkDir { arg: fuse_mkdir_in, name: OsString, }, Unlink { name: OsString, }, RmDir { name: OsString, }, Rename { arg: fuse_rename_in, name: OsString, newname: OsString, }, Link { arg: fuse_link_in, name: OsString, }, Open { arg: fuse_open_in, }, Read { arg: fuse_read_in, }, Write { arg: fuse_write_in, data: Vec<u8>, }, StatFs, Release { arg: fuse_release_in, }, FSync { arg: fuse_fsync_in, }, SetXAttr { arg: fuse_setxattr_in, name: OsString, value: Vec<u8>, }, GetXAttr { arg: fuse_getxattr_in, name: OsString, }, ListXAttr { arg: fuse_getxattr_in, }, RemoveXAttr { name: OsString, }, Flush { arg: fuse_flush_in, }, Init { arg: fuse_init_in, }, OpenDir { arg: fuse_open_in, }, ReadDir { arg: fuse_read_in, }, ReleaseDir { arg: fuse_release_in, }, FSyncDir { arg: fuse_fsync_in, }, GetLk { arg: fuse_lk_in, }, SetLk { arg: fuse_lk_in, }, SetLkW { arg: fuse_lk_in, }, Access { arg: fuse_access_in, }, Create { arg: fuse_create_in, name: OsString, }, Interrupt { arg: fuse_interrupt_in, }, BMap { arg: fuse_bmap_in, }, Destroy, // TODO: FUSE_IOCTL since ABI 7.11 // IoCtl { // arg: fuse_ioctl_in, // data: Vec<u8>, // }, // TODO: FUSE_POLL since ABI 7.11 // Poll { // arg: fuse_poll_in, // }, // TODO: FUSE_NOTIFY_REPLY since ABI 7.15 // NotifyReply { // data: Vec<u8>, // }, // TODO: FUSE_BATCH_FORGET since ABI 7.16 // BatchForget { // arg: fuse_forget_in, // nodes: Vec<fuse_forget_one>, // }, // TODO: FUSE_FALLOCATE since ABI 7.19 // FAllocate { // arg: fuse_fallocate_in, // }, #[cfg(target_os = "macos")] SetVolName { name: OsString, }, #[cfg(target_os = "macos")] GetXTimes, #[cfg(target_os = "macos")] Exchange { arg: fuse_exchange_in, oldname: OsString, newname: OsString, }, // TODO: CUSE_INIT since ABI 7.12 // CuseInit { // arg: fuse_init_in, // }, } impl<'a> fmt::Display for Operation { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Operation::Lookup { name } => write!(f, "LOOKUP name {:?}", name), Operation::Forget { arg } => write!(f, "FORGET nlookup {}", arg.nlookup), Operation::GetAttr => write!(f, "GETATTR"), Operation::SetAttr { arg } => write!(f, "SETATTR valid {:#x}", arg.valid), Operation::ReadLink => write!(f, "READLINK"), Operation::SymLink { name, link } => write!(f, "SYMLINK name {:?}, link {:?}", name, link), Operation::MkNod { arg, name } => write!(f, "MKNOD name {:?}, mode {:#05o}, rdev {}", name, arg.mode, arg.rdev), Operation::MkDir { arg, name } => write!(f, "MKDIR name {:?}, mode {:#05o}", name, arg.mode), Operation::Unlink { name } => write!(f, "UNLINK name {:?}", name), Operation::RmDir { name } => write!(f, "RMDIR name {:?}", name), Operation::Rename { arg, name, newname } => write!(f, "RENAME name {:?}, newdir {:#018x}, newname {:?}", name, arg.newdir, newname), Operation::Link { arg, name } => write!(f, "LINK name {:?}, oldnodeid {:#018x}", name, arg.oldnodeid), Operation::Open { arg } => write!(f, "OPEN flags {:#x}", arg.flags), Operation::Read { arg } => write!(f, "READ fh {}, offset {}, size {}", arg.fh, arg.offset, arg.size), Operation::Write { arg, .. } => write!(f, "WRITE fh {}, offset {}, size {}, write flags {:#x}", arg.fh, arg.offset, arg.size, arg.write_flags), Operation::StatFs => write!(f, "STATFS"), Operation::Release { arg } => write!(f, "RELEASE fh {}, flags {:#x}, release flags {:#x}, lock owner {}", arg.fh, arg.flags, arg.release_flags, arg.lock_owner), Operation::FSync { arg } => write!(f, "FSYNC fh {}, fsync flags {:#x}", arg.fh, arg.fsync_flags), Operation::SetXAttr { arg, name, .. } => write!(f, "SETXATTR name {:?}, size {}, flags {:#x}", name, arg.size, arg.flags), Operation::GetXAttr { arg, name } => write!(f, "GETXATTR name {:?}, size {}", name, arg.size), Operation::ListXAttr { arg } => write!(f, "LISTXATTR size {}", arg.size), Operation::RemoveXAttr { name } => write!(f, "REMOVEXATTR name {:?}", name), Operation::Flush { arg } => write!(f, "FLUSH fh {}, lock owner {}", arg.fh, arg.lock_owner), Operation::Init { arg } => write!(f, "INIT kernel ABI {}.{}, flags {:#x}, max readahead {}", arg.major, arg.minor, arg.flags, arg.max_readahead), Operation::OpenDir { arg } => write!(f, "OPENDIR flags {:#x}", arg.flags), Operation::ReadDir { arg } => write!(f, "READDIR fh {}, offset {}, size {}", arg.fh, arg.offset, arg.size), Operation::ReleaseDir { arg } => write!(f, "RELEASEDIR fh {}, flags {:#x}, release flags {:#x}, lock owner {}", arg.fh, arg.flags, arg.release_flags, arg.lock_owner), Operation::FSyncDir { arg } => write!(f, "FSYNCDIR fh {}, fsync flags {:#x}", arg.fh, arg.fsync_flags), Operation::GetLk { arg } => write!(f, "GETLK fh {}, lock owner {}", arg.fh, arg.owner), Operation::SetLk { arg } => write!(f, "SETLK fh {}, lock owner {}", arg.fh, arg.owner), Operation::SetLkW { arg } => write!(f, "SETLKW fh {}, lock owner {}", arg.fh, arg.owner), Operation::Access { arg } => write!(f, "ACCESS mask {:#05o}", arg.mask), Operation::Create { arg, name } => write!(f, "CREATE name {:?}, mode {:#05o}, flags {:#x}", name, arg.mode, arg.flags), Operation::Interrupt { arg } => write!(f, "INTERRUPT unique {}", arg.unique), Operation::BMap { arg } => write!(f, "BMAP blocksize {}, ids {}", arg.blocksize, arg.block), Operation::Destroy => write!(f, "DESTROY"), #[cfg(target_os = "macos")] Operation::SetVolName { name } => write!(f, "SETVOLNAME name {:?}", name), #[cfg(target_os = "macos")] Operation::GetXTimes => write!(f, "GETXTIMES"), #[cfg(target_os = "macos")] Operation::Exchange { arg, oldname, newname } => write!(f, "EXCHANGE olddir {:#018x}, oldname {:?}, newdir {:#018x}, newname {:?}, options {:#x}", arg.olddir, oldname, arg.newdir, newname, arg.options), } } } impl Operation { fn parse(opcode: &fuse_opcode, data: &mut ArgumentIterator<'_>) -> Option<Self> { unsafe { Some(match opcode { fuse_opcode::FUSE_LOOKUP => Operation::Lookup { name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_FORGET => Operation::Forget { arg: *data.fetch()? }, fuse_opcode::FUSE_GETATTR => Operation::GetAttr, fuse_opcode::FUSE_SETATTR => Operation::SetAttr { arg: *data.fetch()? }, fuse_opcode::FUSE_READLINK => Operation::ReadLink, fuse_opcode::FUSE_SYMLINK => Operation::SymLink { name: data.fetch_str()?.into(), link: data.fetch_str()?.into(), }, fuse_opcode::FUSE_MKNOD => Operation::MkNod { arg: *data.fetch()?, name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_MKDIR => Operation::MkDir { arg: *data.fetch()?, name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_UNLINK => Operation::Unlink { name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_RMDIR => Operation::RmDir { name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_RENAME => Operation::Rename { arg: *data.fetch()?, name: data.fetch_str()?.into(), newname: data.fetch_str()?.into(), }, fuse_opcode::FUSE_LINK => Operation::Link { arg: *data.fetch()?, name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_OPEN => Operation::Open { arg: *data.fetch()? }, fuse_opcode::FUSE_READ => Operation::Read { arg: *data.fetch()? }, fuse_opcode::FUSE_WRITE => Operation::Write { arg: *data.fetch()?, data: data.fetch_all().to_vec(), }, fuse_opcode::FUSE_STATFS => Operation::StatFs, fuse_opcode::FUSE_RELEASE => Operation::Release { arg: *data.fetch()? }, fuse_opcode::FUSE_FSYNC => Operation::FSync { arg: *data.fetch()? }, fuse_opcode::FUSE_SETXATTR => Operation::SetXAttr { arg: *data.fetch()?, name: data.fetch_str()?.into(), value: data.fetch_all().to_vec(), }, fuse_opcode::FUSE_GETXATTR => Operation::GetXAttr { arg: *data.fetch()?, name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_LISTXATTR => Operation::ListXAttr { arg: *data.fetch()? }, fuse_opcode::FUSE_REMOVEXATTR => Operation::RemoveXAttr { name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_FLUSH => Operation::Flush { arg: *data.fetch()? }, fuse_opcode::FUSE_INIT => Operation::Init { arg: *data.fetch()? }, fuse_opcode::FUSE_OPENDIR => Operation::OpenDir { arg: *data.fetch()? }, fuse_opcode::FUSE_READDIR => Operation::ReadDir { arg: *data.fetch()? }, fuse_opcode::FUSE_RELEASEDIR => Operation::ReleaseDir { arg: *data.fetch()? }, fuse_opcode::FUSE_FSYNCDIR => Operation::FSyncDir { arg: *data.fetch()? }, fuse_opcode::FUSE_GETLK => Operation::GetLk { arg: *data.fetch()? }, fuse_opcode::FUSE_SETLK => Operation::SetLk { arg: *data.fetch()? }, fuse_opcode::FUSE_SETLKW => Operation::SetLkW { arg: *data.fetch()? }, fuse_opcode::FUSE_ACCESS => Operation::Access { arg: *data.fetch()? }, fuse_opcode::FUSE_CREATE => Operation::Create { arg: *data.fetch()?, name: data.fetch_str()?.into(), }, fuse_opcode::FUSE_INTERRUPT => Operation::Interrupt { arg: *data.fetch()? }, fuse_opcode::FUSE_BMAP => Operation::BMap { arg: *data.fetch()? }, fuse_opcode::FUSE_DESTROY => Operation::Destroy, #[cfg(target_os = "macos")] fuse_opcode::FUSE_SETVOLNAME => Operation::SetVolName { name: data.fetch_str()?.into(), }, #[cfg(target_os = "macos")] fuse_opcode::FUSE_GETXTIMES => Operation::GetXTimes, #[cfg(target_os = "macos")] fuse_opcode::FUSE_EXCHANGE => Operation::Exchange { arg: *data.fetch()?, oldname: data.fetch_str()?.into(), newname: data.fetch_str()?.into(), }, }) } } } /// Low-level request of a filesystem operation the kernel driver wants to perform. #[derive(Debug)] pub struct Request { header: fuse_in_header, operation: Operation, } impl fmt::Display for Request { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "FUSE({:3}) ino {:#018x}: {}", self.header.unique, self.header.nodeid, self.operation) } } impl TryFrom<&[u8]> for Request { type Error = RequestError; fn try_from(data: &[u8]) -> Result<Self, Self::Error> { // Parse a raw packet as sent by the kernel driver into typed data. Every request always // begins with a `fuse_in_header` struct followed by arguments depending on the opcode. let data_len = data.len(); let mut data = ArgumentIterator::new(data); // Parse header let header: &fuse_in_header = unsafe { data.fetch() }.ok_or_else(|| RequestError::ShortReadHeader(data.len()))?; // Parse/check opcode let opcode = fuse_opcode::try_from(header.opcode) .map_err(|_: InvalidOpcodeError| RequestError::UnknownOperation(header.opcode))?; // Check data size if data_len < header.len as usize { return Err(RequestError::ShortRead(data_len, header.len as usize)); } // Parse/check operation arguments let operation = Operation::parse(&opcode, &mut data).ok_or_else(|| RequestError::InsufficientData)?; let header = *header; Ok(Self { header, operation }) } } impl Request { /// Returns the unique identifier of this request. /// /// The FUSE kernel driver assigns a unique id to every concurrent request. This allows to /// distinguish between multiple concurrent requests. The unique id of a request may be /// reused in later requests after it has completed. #[inline] pub fn unique(&self) -> u64 { self.header.unique } /// Returns the node id of the inode this request is targeted to. #[inline] pub fn nodeid(&self) -> u64 { self.header.nodeid } /// Returns the UID that the process that triggered this request runs under. #[inline] pub fn uid(&self) -> u32 { self.header.uid } /// Returns the GID that the process that triggered this request runs under. #[inline] pub fn gid(&self) -> u32 { self.header.gid } /// Returns the PID of the process that triggered this request. #[inline] pub fn pid(&self) -> u32 { self.header.pid } /// Returns the filesystem operation (and its arguments) of this request. #[inline] pub fn operation(&self) -> &Operation { &self.operation } } #[cfg(test)] mod tests { use super::*; #[cfg(target_endian = "big")] const INIT_REQUEST: [u8; 56] = [ 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x1a, // len, opcode 0xde, 0xad, 0xbe, 0xef, 0xba, 0xad, 0xd0, 0x0d, // unique 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, // nodeid 0xc0, 0x01, 0xd0, 0x0d, 0xc0, 0x01, 0xca, 0xfe, // uid, gid 0xc0, 0xde, 0xba, 0x5e, 0x00, 0x00, 0x00, 0x00, // pid, padding 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x08, // major, minor 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, // max_readahead, flags ]; #[cfg(target_endian = "little")] const INIT_REQUEST: [u8; 56] = [ 0x38, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x00, 0x00, // len, opcode 0x0d, 0xf0, 0xad, 0xba, 0xef, 0xbe, 0xad, 0xde, // unique 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, // nodeid 0x0d, 0xd0, 0x01, 0xc0, 0xfe, 0xca, 0x01, 0xc0, // uid, gid 0x5e, 0xba, 0xde, 0xc0, 0x00, 0x00, 0x00, 0x00, // pid, padding 0x07, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // major, minor 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // max_readahead, flags ]; #[cfg(target_endian = "big")] const MKNOD_REQUEST: [u8; 56] = [ 0x00, 0x00, 0x00, 0x38, 0x00, 0x00, 0x00, 0x08, // len, opcode 0xde, 0xad, 0xbe, 0xef, 0xba, 0xad, 0xd0, 0x0d, // unique 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, // nodeid 0xc0, 0x01, 0xd0, 0x0d, 0xc0, 0x01, 0xca, 0xfe, // uid, gid 0xc0, 0xde, 0xba, 0x5e, 0x00, 0x00, 0x00, 0x00, // pid, padding 0x00, 0x00, 0x01, 0xa4, 0x00, 0x00, 0x00, 0x00, // mode, rdev 0x66, 0x6f, 0x6f, 0x2e, 0x74, 0x78, 0x74, 0x00, // name ]; #[cfg(target_endian = "little")] const MKNOD_REQUEST: [u8; 56] = [ 0x38, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, // len, opcode 0x0d, 0xf0, 0xad, 0xba, 0xef, 0xbe, 0xad, 0xde, // unique 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, // nodeid 0x0d, 0xd0, 0x01, 0xc0, 0xfe, 0xca, 0x01, 0xc0, // uid, gid 0x5e, 0xba, 0xde, 0xc0, 0x00, 0x00, 0x00, 0x00, // pid, padding 0xa4, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // mode, rdev 0x66, 0x6f, 0x6f, 0x2e, 0x74, 0x78, 0x74, 0x00, // name ]; #[test] fn short_read_header() { match Request::try_from(&INIT_REQUEST[..20]) { Err(RequestError::ShortReadHeader(20)) => (), _ => panic!("Unexpected request parsing result"), } } #[test] fn short_read() { match Request::try_from(&INIT_REQUEST[..48]) { Err(RequestError::ShortRead(48, 56)) => (), _ => panic!("Unexpected request parsing result"), } } #[test] fn init() { let req = Request::try_from(&INIT_REQUEST[..]).unwrap(); assert_eq!(req.header.len, 56); assert_eq!(req.header.opcode, 26); assert_eq!(req.unique(), 0xdead_beef_baad_f00d); assert_eq!(req.nodeid(), 0x1122_3344_5566_7788); assert_eq!(req.uid(), 0xc001_d00d); assert_eq!(req.gid(), 0xc001_cafe); assert_eq!(req.pid(), 0xc0de_ba5e); match req.operation() { Operation::Init { arg } => { assert_eq!(arg.major, 7); assert_eq!(arg.minor, 8); assert_eq!(arg.max_readahead, 4096); } _ => panic!("Unexpected request operation"), } } #[test] fn mknod() { let req = Request::try_from(&MKNOD_REQUEST[..]).unwrap(); assert_eq!(req.header.len, 56); assert_eq!(req.header.opcode, 8); assert_eq!(req.unique(), 0xdead_beef_baad_f00d); assert_eq!(req.nodeid(), 0x1122_3344_5566_7788); assert_eq!(req.uid(), 0xc001_d00d); assert_eq!(req.gid(), 0xc001_cafe); assert_eq!(req.pid(), 0xc0de_ba5e); match req.operation() { Operation::MkNod { arg, name } => { assert_eq!(arg.mode, 0o644); assert_eq!(*name, "foo.txt"); } _ => panic!("Unexpected request operation"), } } }
38.858491
214
0.542025
7a9b16a0db551ab598e36caa4c1979a806e4e52d
172
use super::*; pub(super) fn transpile_function_argument_node(node: &FunctionArgumentNode, c: &mut Context) { transpile_ident_node(&node.ident, c); // TODO: add type }
24.571429
94
0.732558
08d50df0b9f1ecaa109bed81f9f0bb32a90eb3fd
13,325
// Copyright (c) 2016 The vulkano developers // Licensed under the Apache License, Version 2.0 // <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, // at your option. All files in the project carrying such // notice may not be copied, modified, or distributed except // according to those terms. // An immutable sampler is a sampler that is integrated into the descriptor set layout // (and thus pipeline layout), instead of being written to an individual descriptor set. // Consequently, all descriptor sets with this layout will share the same sampler. // // This example is almost identical to the image example, but with two differences, which have // been commented: // - The sampler is added to the descriptor set layout at pipeline creation. // - No sampler is included when building a descriptor set. use png; use std::io::Cursor; use std::sync::Arc; use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer, TypedBufferAccess}; use vulkano::command_buffer::{AutoCommandBufferBuilder, CommandBufferUsage, SubpassContents}; use vulkano::descriptor_set::PersistentDescriptorSet; use vulkano::device::physical::{PhysicalDevice, PhysicalDeviceType}; use vulkano::device::{Device, DeviceExtensions, Features}; use vulkano::format::Format; use vulkano::image::{ view::ImageView, ImageDimensions, ImageUsage, ImmutableImage, MipmapsCount, SwapchainImage, }; use vulkano::instance::Instance; use vulkano::pipeline::viewport::Viewport; use vulkano::pipeline::{GraphicsPipeline, PipelineBindPoint}; use vulkano::render_pass::{Framebuffer, FramebufferAbstract, RenderPass, Subpass}; use vulkano::sampler::{Filter, MipmapMode, Sampler, SamplerAddressMode}; use vulkano::swapchain; use vulkano::swapchain::{AcquireError, Swapchain, SwapchainCreationError}; use vulkano::sync; use vulkano::sync::{FlushError, GpuFuture}; use vulkano::Version; use vulkano_win::VkSurfaceBuild; use winit::event::{Event, WindowEvent}; use winit::event_loop::{ControlFlow, EventLoop}; use winit::window::{Window, WindowBuilder}; fn main() { let required_extensions = vulkano_win::required_extensions(); let instance = Instance::new(None, Version::V1_1, &required_extensions, None).unwrap(); let event_loop = EventLoop::new(); let surface = WindowBuilder::new() .build_vk_surface(&event_loop, instance.clone()) .unwrap(); let device_extensions = DeviceExtensions { khr_swapchain: true, ..DeviceExtensions::none() }; let (physical_device, queue_family) = PhysicalDevice::enumerate(&instance) .filter(|&p| p.supported_extensions().is_superset_of(&device_extensions)) .filter_map(|p| { p.queue_families() .find(|&q| q.supports_graphics() && surface.is_supported(q).unwrap_or(false)) .map(|q| (p, q)) }) .min_by_key(|(p, _)| match p.properties().device_type { PhysicalDeviceType::DiscreteGpu => 0, PhysicalDeviceType::IntegratedGpu => 1, PhysicalDeviceType::VirtualGpu => 2, PhysicalDeviceType::Cpu => 3, PhysicalDeviceType::Other => 4, }) .unwrap(); println!( "Using device: {} (type: {:?})", physical_device.properties().device_name, physical_device.properties().device_type, ); let (device, mut queues) = Device::new( physical_device, &Features::none(), &physical_device .required_extensions() .union(&device_extensions), [(queue_family, 0.5)].iter().cloned(), ) .unwrap(); let queue = queues.next().unwrap(); let (mut swapchain, images) = { let caps = surface.capabilities(physical_device).unwrap(); let composite_alpha = caps.supported_composite_alpha.iter().next().unwrap(); let format = caps.supported_formats[0].0; let dimensions: [u32; 2] = surface.window().inner_size().into(); Swapchain::start(device.clone(), surface.clone()) .num_images(caps.min_image_count) .format(format) .dimensions(dimensions) .usage(ImageUsage::color_attachment()) .sharing_mode(&queue) .composite_alpha(composite_alpha) .build() .unwrap() }; #[derive(Default, Debug, Clone)] struct Vertex { position: [f32; 2], } vulkano::impl_vertex!(Vertex, position); let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter( device.clone(), BufferUsage::all(), false, [ Vertex { position: [-0.5, -0.5], }, Vertex { position: [-0.5, 0.5], }, Vertex { position: [0.5, -0.5], }, Vertex { position: [0.5, 0.5], }, ] .iter() .cloned(), ) .unwrap(); let vs = vs::Shader::load(device.clone()).unwrap(); let fs = fs::Shader::load(device.clone()).unwrap(); let render_pass = Arc::new( vulkano::single_pass_renderpass!(device.clone(), attachments: { color: { load: Clear, store: Store, format: swapchain.format(), samples: 1, } }, pass: { color: [color], depth_stencil: {} } ) .unwrap(), ); let (texture, tex_future) = { let png_bytes = include_bytes!("image_img.png").to_vec(); let cursor = Cursor::new(png_bytes); let decoder = png::Decoder::new(cursor); let mut reader = decoder.read_info().unwrap(); let info = reader.info(); let dimensions = ImageDimensions::Dim2d { width: info.width, height: info.height, array_layers: 1, }; let mut image_data = Vec::new(); image_data.resize((info.width * info.height * 4) as usize, 0); reader.next_frame(&mut image_data).unwrap(); let (image, future) = ImmutableImage::from_iter( image_data.iter().cloned(), dimensions, MipmapsCount::One, Format::R8G8B8A8_SRGB, queue.clone(), ) .unwrap(); (ImageView::new(image).unwrap(), future) }; let sampler = Sampler::new( device.clone(), Filter::Linear, Filter::Linear, MipmapMode::Nearest, SamplerAddressMode::Repeat, SamplerAddressMode::Repeat, SamplerAddressMode::Repeat, 0.0, 1.0, 0.0, 0.0, ) .unwrap(); let pipeline = Arc::new( GraphicsPipeline::start() .vertex_input_single_buffer::<Vertex>() .vertex_shader(vs.main_entry_point(), ()) .triangle_strip() .viewports_dynamic_scissors_irrelevant(1) .fragment_shader(fs.main_entry_point(), ()) .blend_alpha_blending() .render_pass(Subpass::from(render_pass.clone(), 0).unwrap()) .with_auto_layout(device.clone(), |set_descs| { // Modify the auto-generated layout by setting an immutable sampler to // set 0 binding 0. set_descs[0].set_immutable_samplers(0, [sampler]); }) .unwrap(), ); let layout = pipeline.layout().descriptor_set_layouts().get(0).unwrap(); let mut set_builder = PersistentDescriptorSet::start(layout.clone()); // Use `add_image` instead of `add_sampled_image`, since the sampler is already in the layout. set_builder.add_image(texture.clone()).unwrap(); let set = Arc::new(set_builder.build().unwrap()); let mut viewport = Viewport { origin: [0.0, 0.0], dimensions: [0.0, 0.0], depth_range: 0.0..1.0, }; let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut viewport); let mut recreate_swapchain = false; let mut previous_frame_end = Some(tex_future.boxed()); event_loop.run(move |event, _, control_flow| match event { Event::WindowEvent { event: WindowEvent::CloseRequested, .. } => { *control_flow = ControlFlow::Exit; } Event::WindowEvent { event: WindowEvent::Resized(_), .. } => { recreate_swapchain = true; } Event::RedrawEventsCleared => { previous_frame_end.as_mut().unwrap().cleanup_finished(); if recreate_swapchain { let dimensions: [u32; 2] = surface.window().inner_size().into(); let (new_swapchain, new_images) = match swapchain.recreate().dimensions(dimensions).build() { Ok(r) => r, Err(SwapchainCreationError::UnsupportedDimensions) => return, Err(e) => panic!("Failed to recreate swapchain: {:?}", e), }; swapchain = new_swapchain; framebuffers = window_size_dependent_setup(&new_images, render_pass.clone(), &mut viewport); recreate_swapchain = false; } let (image_num, suboptimal, acquire_future) = match swapchain::acquire_next_image(swapchain.clone(), None) { Ok(r) => r, Err(AcquireError::OutOfDate) => { recreate_swapchain = true; return; } Err(e) => panic!("Failed to acquire next image: {:?}", e), }; if suboptimal { recreate_swapchain = true; } let clear_values = vec![[0.0, 0.0, 1.0, 1.0].into()]; let mut builder = AutoCommandBufferBuilder::primary( device.clone(), queue.family(), CommandBufferUsage::OneTimeSubmit, ) .unwrap(); builder .begin_render_pass( framebuffers[image_num].clone(), SubpassContents::Inline, clear_values, ) .unwrap() .set_viewport(0, [viewport.clone()]) .bind_pipeline_graphics(pipeline.clone()) .bind_descriptor_sets( PipelineBindPoint::Graphics, pipeline.layout().clone(), 0, set.clone(), ) .bind_vertex_buffers(0, vertex_buffer.clone()) .draw(vertex_buffer.len() as u32, 1, 0, 0) .unwrap() .end_render_pass() .unwrap(); let command_buffer = builder.build().unwrap(); let future = previous_frame_end .take() .unwrap() .join(acquire_future) .then_execute(queue.clone(), command_buffer) .unwrap() .then_swapchain_present(queue.clone(), swapchain.clone(), image_num) .then_signal_fence_and_flush(); match future { Ok(future) => { previous_frame_end = Some(future.boxed()); } Err(FlushError::OutOfDate) => { recreate_swapchain = true; previous_frame_end = Some(sync::now(device.clone()).boxed()); } Err(e) => { println!("Failed to flush future: {:?}", e); previous_frame_end = Some(sync::now(device.clone()).boxed()); } } } _ => (), }); } /// This method is called once during initialization, then again whenever the window is resized fn window_size_dependent_setup( images: &[Arc<SwapchainImage<Window>>], render_pass: Arc<RenderPass>, viewport: &mut Viewport, ) -> Vec<Arc<dyn FramebufferAbstract>> { let dimensions = images[0].dimensions(); viewport.dimensions = [dimensions[0] as f32, dimensions[1] as f32]; images .iter() .map(|image| { let view = ImageView::new(image.clone()).unwrap(); Arc::new( Framebuffer::start(render_pass.clone()) .add(view) .unwrap() .build() .unwrap(), ) as Arc<dyn FramebufferAbstract> }) .collect::<Vec<_>>() } mod vs { vulkano_shaders::shader! { ty: "vertex", src: " #version 450 layout(location = 0) in vec2 position; layout(location = 0) out vec2 tex_coords; void main() { gl_Position = vec4(position, 0.0, 1.0); tex_coords = position + vec2(0.5); }" } } mod fs { vulkano_shaders::shader! { ty: "fragment", src: " #version 450 layout(location = 0) in vec2 tex_coords; layout(location = 0) out vec4 f_color; layout(set = 0, binding = 0) uniform sampler2D tex; void main() { f_color = texture(tex, tex_coords); }" } }
33.992347
100
0.558349
fe43c17bf7ab1ca99e04928d6375e27bf69286e9
2,985
use std::any::{type_name, Any}; use egui::{CollapsingHeader, CtxRef, Ui}; use sabi_resources::{Resource, ResourceId, ResourceTrait, SharedData, SharedDataRc}; use sabi_serialize::generate_random_uid; use crate::{UIProperties, UIPropertiesRegistry}; pub type UIWidgetId = ResourceId; pub trait UIWidgetData: Send + Sync + Any + 'static { fn as_any(&self) -> &dyn Any; fn as_any_mut(&mut self) -> &mut dyn Any; } #[macro_export] macro_rules! implement_widget_data { ($Type:ident) => { unsafe impl Sync for $Type {} unsafe impl Send for $Type {} impl $crate::UIWidgetData for $Type { #[inline] fn as_any(&self) -> &dyn std::any::Any { self } #[inline] fn as_any_mut(&mut self) -> &mut dyn std::any::Any { self } } }; } pub struct UIWidget { type_name: String, data: Box<dyn UIWidgetData>, func: Box<dyn FnMut(&mut dyn UIWidgetData, &CtxRef)>, } impl ResourceTrait for UIWidget { fn on_resource_swap(&mut self, _new: &Self) where Self: Sized, { //println!("UIWidget resource swapped {:?}", self.type_name); } } unsafe impl Send for UIWidget {} unsafe impl Sync for UIWidget {} impl UIProperties for UIWidget { fn show( &mut self, id: &ResourceId, _ui_registry: &UIPropertiesRegistry, ui: &mut Ui, collapsed: bool, ) { CollapsingHeader::new(format!( "UIWidget_{:?} [{:?}]", self.type_name, id.as_simple().to_string() )) .show_background(true) .default_open(!collapsed) .show(ui, |ui| { let widget_name = type_name::<Self>() .split(':') .collect::<Vec<&str>>() .last() .unwrap() .to_string(); ui.label(widget_name); }); } } impl UIWidget { pub fn register<D, F>(shared_data: &SharedDataRc, data: D, f: F) -> Resource<Self> where D: UIWidgetData + Sized, F: FnMut(&mut dyn UIWidgetData, &CtxRef) + 'static, { let ui_page = Self { type_name: type_name::<D>().to_string(), data: Box::new(data), func: Box::new(f), }; SharedData::add_resource::<UIWidget>(shared_data, generate_random_uid(), ui_page) } pub fn data<D>(&self) -> Option<&D> where D: UIWidgetData + Sized, { self.data.as_any().downcast_ref::<D>() } pub fn data_mut<D>(&mut self) -> Option<&mut D> where D: UIWidgetData + Sized + 'static, { self.data.as_any_mut().downcast_mut::<D>() } pub fn execute(&mut self, ui_context: &CtxRef) { sabi_profiler::scoped_profile!( format!("{} {:?}", "ui_widget::execute", self.type_name).as_str() ); (self.func)(self.data.as_mut(), ui_context); } }
26.415929
89
0.546064
62709d886ce80e65b9bfb6e02b1afe03d45a3df3
39,420
//! Simplified objects for codegen. //! //! This contains the necessary objects for generating actual //! API objects, their builders, impls, etc. pub use super::impls::{ApiObjectBuilderImpl, ApiObjectImpl}; use super::{ emitter::{ANY_GENERIC_PARAMETER, EXTRA_PROPS_FIELD, FILE_MARKER}, RUST_KEYWORDS, }; use crate::v2::models::{Coder, CollectionFormat, HttpMethod, ParameterIn}; use heck::{ToPascalCase, ToSnakeCase}; use once_cell::sync::Lazy; use regex::{Captures, Regex}; use std::{ collections::{BTreeMap, HashSet}, fmt::{self, Display, Write}, sync::Arc, }; /// Regex for appropriate escaping in docs. static DOC_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"\[|\]").expect("invalid doc regex?")); /// Regex for renaming properties with leading @ static AT_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^@").expect("invalid at regex?")); /// Represents a (simplified) Rust struct or enum. #[derive(Default, Debug, Clone)] pub struct ApiObject { /// Name of the struct (camel-cased). pub name: String, /// Description for this object (if any), to be used for docs. pub description: Option<String>, /// Path to this object from (generated) root module. pub path: String, /// Fields/variants based on whether this is a struct/enum. pub inner: ObjectContainer, /// Paths with operations which address this object. pub paths: BTreeMap<String, PathOps>, } impl ApiObject { /// Get a mutable reference to the struct fields. **Panics** if this /// is not a struct. pub fn fields_mut(&mut self) -> &mut Vec<ObjectField> { match &mut self.inner { ObjectContainer::Struct { fields } => fields, _ => panic!("cannot obtain fields for enum type"), } } /// Get a reference to the struct fields. **Panics** if this /// is not a struct. pub fn fields(&self) -> &[ObjectField] { match &self.inner { ObjectContainer::Struct { fields } => fields, _ => panic!("cannot obtain fields for enum type"), } } /// Get a mutable reference to the enum variants. **Panics** if this /// is not an enum. pub fn variants_mut(&mut self) -> &mut Vec<ObjectVariant> { match &mut self.inner { ObjectContainer::Enum { variants, .. } => variants, _ => panic!("cannot obtain fields for enum type"), } } /// Get a reference to the enum variants. **Panics** if this /// is not an enum. pub fn variants(&self) -> &[ObjectVariant] { match &self.inner { ObjectContainer::Enum { variants, .. } => variants, _ => panic!("cannot obtain fields for enum type"), } } } /// Container for the struct/enum containing fields/variants. #[derive(Debug, Clone)] pub enum ObjectContainer { Enum { /// Variants if it's an enum. /// /// **NOTE:** Currently, we only support simple enums. variants: Vec<ObjectVariant>, /// Flag to represent whether this enum is a string. /// When that's the case, we leave de/serialization to /// serde completely. is_string: bool, }, Struct { /// Fields if it's a struct. fields: Vec<ObjectField>, }, } impl ObjectContainer { /// Returns whether this object is an enum. pub fn is_enum(&self) -> bool { matches!(self, ObjectContainer::Enum { .. }) } /// Returns whether this enum is to represent a string. pub fn is_string_enum(&self) -> bool { match self { ObjectContainer::Enum { is_string, .. } => *is_string, _ => false, } } } /// Operations in a path. #[derive(Default, Debug, Clone)] pub struct PathOps { /// Operations for this object and their associated requirements. pub req: BTreeMap<HttpMethod, OpRequirement>, /// Parameters required for all operations in this path. pub params: Vec<Parameter>, } /// Requirement for an object corresponding to some operation. #[derive(Debug, Clone)] pub struct OpRequirement { /// Operation ID (if it's provided in the schema). /// /// If there are multiple operations for the same path, then we /// attempt to use this. pub id: Option<String>, /// Description of this operation (if any), to be used for docs. pub description: Option<String>, /// Whether the operation is deprecated or not. pub deprecated: bool, /// Parameters required for this operation. pub params: Vec<Parameter>, /// Whether the object itself is required (in body) for this operation. pub body_required: bool, /// Whether this operation returns a list of the associated `ApiObject`. pub listable: bool, /// Response information for this operation. pub response: Response<String, Vec<Parameter>>, /// Preferred media range and encoder for the client. This is ignored for /// methods that don't accept a body. If there's no coder, then JSON /// encoding is assumed. pub encoding: Option<(String, Arc<Coder>)>, /// Preferred media range and decoder for the client. This is used only /// when objects make use of `Any` type. If there's no coder, then JSON /// encoding is assumed. pub decoding: Option<(String, Arc<Coder>)>, } #[derive(Default, Debug, Clone)] pub struct Response<S, H> { /// Type path for this operation's response (if any). If this is empty, /// then we go for `Any`. pub ty_path: Option<S>, /// Whether the response contains an `Any`. This is useful when operations /// get bound to some other object. pub contains_any: bool, /// Custom response headers for this operation (if any). pub headers: H, } impl<S, H> Response<S, H> where S: AsRef<str>, { /// Returns whether this response is a file. pub fn is_file(&self) -> bool { self.ty_path .as_ref() .map(|s| s.as_ref() == FILE_MARKER) .unwrap_or_default() } } /// Represents some parameter somewhere (header, path, query, etc.). #[derive(Debug, Clone)] pub struct Parameter { /// Name of the parameter. pub name: String, /// Description of this operation (if any), to be used for docs. pub description: Option<String>, /// Type of the parameter as a path. pub ty_path: String, /// Whether this parameter is required. pub required: bool, /// Where the parameter lives. pub presence: ParameterIn, /// If the parameter is an array of values, then the format for collecting them. pub delimiting: Vec<CollectionFormat>, } /// Represents an enum variant. #[derive(Debug, Clone)] pub struct ObjectVariant { /// Name of the variant (case unspecified). pub name: String, /// Value of this variant (if any). Note that this is only applicable /// for simple enums, and it's ignored entirely for string enums. pub value: serde_json::Value, } /// Represents a struct field. #[derive(Debug, Clone)] pub struct ObjectField { /// Name of the field. pub name: String, /// Type of the field as a path. pub ty_path: String, /// Description of this operation (if any), to be used for docs. pub description: Option<String>, /// Whether this field is required (i.e., not optional). pub is_required: bool, /// Whether this field's type "is" or "has" an `Any` type. pub needs_any: bool, /// Whether this field should be boxed. pub boxed: bool, /// Required fields of the "deepest" child type in the given definition. /// /// Now, what do I mean by "deepest"? For example, if we had `Vec<Vec<Vec<T>>>` /// or `Vec<BTreeMap<String, Vec<BTreeMap<String, T>>>>`, then "deepest" child /// type is T (as long as it's not a `Vec` or `BTreeMap`). /// /// To understand why we're doing this, see `ApiObjectBuilderImpl::write_builder_ty` /// and `ApiObjectBuilderImpl::write_value_map` functions. /// /// Yours sincerely. pub child_req_fields: Vec<String>, } pub fn to_snake_case(name: &str) -> String { let new_name = AT_REGEX.replace(name, "at_"); new_name.to_snake_case() } pub fn to_pascal_case(name: &str) -> String { let new_name = AT_REGEX.replace(name, "at_"); new_name.to_pascal_case() } impl ApiObject { /// Create an object with the given name. pub fn with_name<S>(name: S) -> Self where S: Into<String>, { ApiObject { name: name.into(), // NOTE: Even though `path` is empty, it'll be replaced by the emitter. ..Default::default() } } /// Writes `Any` as a generic parameter (including `<>`). pub(super) fn write_any_generic<F>(f: &mut F) -> fmt::Result where F: Write, { f.write_str("<")?; f.write_str(ANY_GENERIC_PARAMETER)?; f.write_str(">") } /// Writes the given string (if any) as Rust documentation into /// the given formatter. pub(super) fn write_docs<F, S>(stuff: Option<S>, f: &mut F, levels: usize) -> fmt::Result where F: Write, S: AsRef<str>, { let indent = " ".repeat(levels * 4); if let Some(desc) = stuff.as_ref() { desc.as_ref().split('\n').try_for_each(|line| { f.write_str("\n")?; f.write_str(&indent)?; f.write_str("///")?; if line.is_empty() { return Ok(()); } f.write_str(" ")?; f.write_str( DOC_REGEX .replace_all(line, |c: &Captures| match &c[0] { "[" => "\\[", "]" => "\\]", _ => unreachable!(), }) .trim_end(), ) })?; f.write_str("\n")?; } Ok(()) } /// Returns whether this type is simple (i.e., not an object defined by us). #[inline] pub(super) fn is_simple_type(ty: &str) -> bool { !ty.contains("::") || ty.ends_with("Delimited") } /// Assuming that the given type "is" or "has" `Any`, this adds /// the appropriate generic parameter. fn write_field_with_any<F>(ty: &str, f: &mut F) -> fmt::Result where F: Write, { if let Some(i) = ty.find('<') { if ty[..i].ends_with("Vec") { f.write_str(&ty[..=i])?; Self::write_field_with_any(&ty[i + 1..ty.len() - 1], f)?; } else if ty[..i].ends_with("std::collections::BTreeMap") { f.write_str(&ty[..i + 9])?; Self::write_field_with_any(&ty[i + 9..ty.len() - 1], f)?; } else { unreachable!("no other generics expected."); } f.write_str(">")?; return Ok(()); } f.write_str(ty)?; if !Self::is_simple_type(ty) { Self::write_any_generic(f)?; } Ok(()) } } /// Represents a builder struct for some API object. #[derive(Default, Debug, Clone)] pub(super) struct ApiObjectBuilder<'a> { /// Index of this builder. pub idx: usize, /// Description if any, for docs. pub description: Option<&'a str>, /// Whether body is required for this builder. pub body_required: bool, /// Prefix for addressing stuff from crate root. pub helper_module_prefix: &'a str, /// Operation ID, if any. pub op_id: Option<&'a str>, /// Whether the operation is deprecated or not. pub deprecated: bool, /// HTTP method for the operation - all builders (other than object builders) /// have this. pub method: Option<HttpMethod>, /// Relative URL path - presence is same as HTTP method. pub rel_path: Option<&'a str>, /// Whether this operation returns a list object. pub is_list_op: bool, /// Response for this operation, if any. pub response: Response<&'a str, &'a [Parameter]>, /// Object to which this builder belongs to. pub object: &'a str, /// Encoding for the operation, if it's not JSON. pub encoding: Option<&'a (String, Arc<Coder>)>, /// Decoding for the operation, if it's not JSON. /// /// **NOTE:** We use this to set the `Accept` header for operations /// which return objects that are (or have) `Any` type. pub decoding: Option<&'a (String, Arc<Coder>)>, /// Whether there are multiple builders for this object. pub multiple_builders_exist: bool, /// Fields in this builder. pub fields: &'a [ObjectField], /// Parameters global to this URL path. pub global_params: &'a [Parameter], /// Parameters local to this operation. pub local_params: &'a [Parameter], /// Whether this builder is generic over `Any` type. pub needs_any: bool, } /// The property we're dealing with. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub(super) enum Property { RequiredField, OptionalField, RequiredParam, OptionalParam, } impl Property { /// Whether this property is required. pub(super) fn is_required(self) -> bool { matches!(self, Property::RequiredField | Property::RequiredParam) } /// Checks whether this property is a parameter. pub(super) fn is_parameter(self) -> bool { matches!(self, Property::RequiredParam | Property::OptionalParam) } /// Checks whether this property is a field. pub(super) fn is_field(self) -> bool { matches!(self, Property::RequiredField | Property::OptionalField) } } /// See `ApiObjectBuilder::write_generics_if_necessary` pub(super) enum TypeParameters<'a> { Generic, ChangeOne(&'a str), ReplaceAll, ChangeAll, } /// Represents a Rust struct field (could be actual object field or a parameter). #[derive(Debug, Clone)] pub(super) struct StructField<'a> { /// Name of this field (case unspecified). pub name: &'a str, /// Type of this field. pub ty: &'a str, /// What this field represents. pub prop: Property, /// If the field is boxed pub boxed: bool, /// Description for this field (if any), for docs. pub desc: Option<&'a str>, /// Whether this field had a collision (i.e., between parameter and object field) pub overridden: bool, /// Required fields of child needed for this field. If they exist, then we /// switch to requiring a builder. pub strict_child_fields: &'a [String], /// Delimiting for array values (if it is a parameter). pub delimiting: &'a [CollectionFormat], /// Location of the parameter (if it is a parameter). pub param_loc: Option<ParameterIn>, /// Whether this field "is" or "has" `Any` type. This is only /// applicable for object fields. pub needs_any: bool, /// Whether this field indicates a file upload. pub needs_file: bool, } impl<'a> ApiObjectBuilder<'a> { /// Name of the constructor function which creates this builder. pub fn constructor_fn_name(&self) -> Option<String> { match (self.op_id, self.method) { // If there's an operation ID, then we go for that ... (Some(id), _) => Some(id.to_snake_case()), // If there's a method and we *don't* have any collisions // (i.e., two or more paths for same object), then we default // to using the method ... (_, Some(meth)) if !self.multiple_builders_exist => { Some(meth.to_string().to_snake_case()) } // If there's a method, then we go for numbered functions ... (_, Some(meth)) => { let mut name = meth.to_string().to_snake_case(); if self.idx > 0 { name.push('_'); name.push_str(&self.idx.to_string()); } Some(name) } // We don't know what to do ... // FIXME: Use route and method to generate a name. _ => None, } } /// Returns an iterator of all fields and parameters required for the Rust builder struct. /// /// **NOTE:** The names yielded by this iterator are unique for a builder. /// If there's a collision between a path-specific parameter and an operation-specific /// parameter, then the latter overrides the former. If there's a collision between a field /// and a parameter, then the latter overrides the former. pub(super) fn struct_fields_iter(&self) -> impl Iterator<Item = StructField<'a>> + 'a { let body_required = self.body_required; let field_iter = self.fields.iter().map(move |field| StructField { name: field.name.as_str(), ty: field.ty_path.as_str(), // We "require" the object fields only if the object itself is required. prop: if body_required && field.is_required { Property::RequiredField } else { Property::OptionalField }, boxed: field.boxed, desc: field.description.as_deref(), strict_child_fields: &*field.child_req_fields, param_loc: None, overridden: false, needs_any: field.needs_any, needs_file: field.ty_path == FILE_MARKER, delimiting: &[], }); let param_iter = self .global_params .iter() .chain(self.local_params.iter()) .scan(HashSet::new(), |set, param| { // Local parameters override global parameters. if set.contains(&param.name) { // Workaround because `scan` stops when it encounters // `None`, but we want filtering. Some(None) } else { set.insert(&param.name); Some(Some(StructField { name: param.name.as_str(), ty: param.ty_path.as_str(), prop: if param.required { Property::RequiredParam } else { Property::OptionalParam }, boxed: false, desc: param.description.as_deref(), strict_child_fields: &[] as &[_], param_loc: Some(param.presence), overridden: false, needs_any: false, needs_file: param.ty_path == FILE_MARKER, delimiting: &param.delimiting, })) } }) .flatten(); let mut fields = vec![]; // Check parameter-field collisions. for field in param_iter.chain(field_iter) { if let Some(v) = fields .iter_mut() .find(|f: &&mut StructField<'_>| f.name == field.name) { if v.ty == field.ty { v.overridden = true; } // We don't know what we should do when we encounter // parameter-field collision and they have different types. continue; } fields.push(field); } fields.into_iter() } /// Write this builder's name into the given formatter. pub(super) fn write_name<F>(&self, f: &mut F) -> fmt::Result where F: Write, { f.write_str(self.object)?; if let Some(method) = self.method { write!(f, "{}", method)?; } f.write_str("Builder")?; if self.idx > 0 { f.write_str(&self.idx.to_string())?; } Ok(()) } /// Writes generic parameters, if needed. /// /// Also takes an enum to specify whether the one/all/none of the parameters /// should make use of actual types. pub(super) fn write_generics_if_necessary<F>( &self, f: &mut F, any_value: Option<&str>, params: TypeParameters<'_>, ) -> Result<usize, fmt::Error> where F: Write, { let mut num_generics = 0; // Inspect fields and parameters and write generics. self.struct_fields_iter() .filter(|f| f.prop.is_required()) .enumerate() .try_for_each(|(i, field)| { num_generics += 1; if i == 0 { f.write_str("<")?; } else { f.write_str(", ")?; } match params { // If the name matches, then change that unit type to `{Name}Exists` TypeParameters::ChangeOne(n) if field.name == n => { f.write_str(self.helper_module_prefix)?; f.write_str("generics::")?; f.write_str(&to_pascal_case(field.name))?; return f.write_str("Exists"); } // All names should be changed to `{Name}Exists` TypeParameters::ChangeAll => { f.write_str(self.helper_module_prefix)?; f.write_str("generics::")?; f.write_str(&to_pascal_case(field.name))?; return f.write_str("Exists"); } // All names should be reset to `Missing{Name}` TypeParameters::ReplaceAll => { f.write_str(self.helper_module_prefix)?; f.write_str("generics::")?; f.write_str("Missing")?; } _ => (), } f.write_str(&to_pascal_case(field.name)) })?; if self.needs_any { if num_generics > 0 { f.write_str(", ")?; } else { f.write_str("<")?; } f.write_str(any_value.unwrap_or(ANY_GENERIC_PARAMETER))?; num_generics += 1; } if num_generics > 0 { f.write_str(">")?; } Ok(num_generics) } /// Returns whether this builder will have at least one field. pub(super) fn has_atleast_one_field(&self) -> bool { self.struct_fields_iter() .any(|f| f.prop.is_parameter() || f.prop.is_required()) } /// Returns whether a separate container is needed for the builder struct. pub(super) fn needs_container(&self) -> bool { // This is perhaps one of those important blocks, because this // decides whether to mark builder structs as `repr(transparent)` // (for unsafely transmuting). It's UB to transmute `repr(Rust)` // structs, so we put stuff into a container and transmute // whenever a builder: // // - Has at least one operation parameter that's required (or) // - Has a body with at least one field that's required and the // operation has at least one parameter. // // Because, we need `mem::transmute` only when we use phantom fields // and we use phantom fields only when there's a "required" constraint. // And, we don't need a container if there's just a body (i.e., no params), // because we can transmute the builder directly. self.local_params .iter() .chain(self.global_params.iter()) .any(|p| p.required) || (self.body_required && self.fields.iter().any(|f| f.is_required) && self.local_params.len() + self.global_params.len() > 0) } /// Write this builder's container name into the given formatter. pub(super) fn write_container_name<F>(&self, f: &mut F) -> fmt::Result where F: Write, { self.write_name(f)?; f.write_str("Container") } /// Given the helper module prefix, type and delimiters for that type, /// wraps the type (if needed) and writes the old or new type to the given formatter. pub(super) fn write_wrapped_ty<F>( module_prefix: &str, ty: &str, delims: &[CollectionFormat], f: &mut F, ) -> fmt::Result where F: fmt::Write, { if !ty.contains("Vec") { return f.write_str(ty); } // In parameters, we're limited to basic types and arrays, // so we can assume that whatever `<>` we encounter, they're // all for `Vec`. let delim_ty = String::from(module_prefix) + "util::Delimited"; let mut ty = ty.replace("Vec", &delim_ty); let mut new_ty = String::new(); // From the reverse, because we replace from inside out. let mut delim_idx = delims.len(); while let Some(idx) = ty.find('>') { delim_idx -= 1; new_ty.push_str(&ty[..idx]); new_ty.push_str(", "); write!(new_ty, "{}util::{:?}", module_prefix, delims[delim_idx])?; new_ty.push('>'); if idx == ty.len() - 1 { break; } ty = ty[idx + 1..].into(); } f.write_str(&new_ty) } /// Writes the body field into the formatter if required. fn write_body_field_if_required<F>(&self, f: &mut F) -> fmt::Result where F: Write, { if self.body_required { // We address with 'self::' because it's possible for body type // to collide with type parameters (if any). f.write_str("\n body: self::")?; f.write_str(self.object)?; if self.needs_any { ApiObject::write_any_generic(f)?; } f.write_str(",")?; } Ok(()) } /// Writes the parameter into the formatter if required. fn write_parameter_if_required<F>( &self, prop: Property, name: &str, ty: &str, delims: &[CollectionFormat], f: &mut F, ) -> fmt::Result where F: Write, { if !prop.is_parameter() { return Ok(()); } f.write_str("\n param_")?; f.write_str(name)?; f.write_str(": Option<")?; if ty == FILE_MARKER { f.write_str("std::path::PathBuf")?; } else { Self::write_wrapped_ty(self.helper_module_prefix, ty, delims, f)?; } f.write_str(">,") } } impl<'a> Display for ApiObjectBuilder<'a> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("/// Builder ")?; if let (Some(name), Some(m)) = (self.constructor_fn_name(), self.method) { f.write_str("created by [`")?; f.write_str(self.object)?; f.write_str("::")?; f.write_str(&name)?; f.write_str("`](./struct.")?; f.write_str(self.object)?; f.write_str(".html#method.")?; f.write_str(&name)?; f.write_str(") method for a `")?; f.write_str(&m.to_string().to_uppercase())?; f.write_str("` operation associated with `")?; f.write_str(self.object)?; f.write_str("`.\n")?; } else { f.write_str("for [`")?; f.write_str(self.object)?; f.write_str("`](./struct.")?; f.write_str(self.object)?; f.write_str(".html) object.\n")?; } // If the builder "needs" parameters/fields, then we go for a separate // container which holds both the body (if any) and the parameters, // so that we can make the actual builder `#[repr(transparent)]` // for safe transmuting. let needs_container = self.needs_container(); if needs_container { f.write_str("#[repr(transparent)]\n")?; } f.write_str("#[derive(Debug, Clone)]\npub struct ")?; self.write_name(f)?; self.write_generics_if_necessary(f, None, TypeParameters::Generic)?; // If structs don't have any fields, then we go for unit structs. let has_fields = self.has_atleast_one_field(); if has_fields || self.body_required || needs_container { f.write_str(" {")?; } let mut container = String::new(); if needs_container { container.push_str("#[derive(Debug, Default, Clone)]\nstruct "); self.write_container_name(&mut container)?; if self.needs_any { ApiObject::write_any_generic(&mut container)?; } container.push_str(" {"); self.write_body_field_if_required(&mut container)?; f.write_str("\n inner: ")?; self.write_container_name(f)?; if self.needs_any { ApiObject::write_any_generic(f)?; } f.write_str(",")?; } else { self.write_body_field_if_required(f)?; } // Write struct fields and the associated markers if needed. self.struct_fields_iter() .try_for_each::<_, fmt::Result>(|field| { let (cc, sk) = (to_pascal_case(field.name), to_snake_case(field.name)); if needs_container { self.write_parameter_if_required( field.prop, &sk, field.ty, field.delimiting, &mut container, )?; } else { self.write_parameter_if_required( field.prop, &sk, field.ty, field.delimiting, f, )?; } if field.prop.is_required() { f.write_str("\n ")?; if field.prop.is_parameter() { f.write_str("_param")?; } f.write_str("_")?; f.write_str(&sk)?; f.write_str(": ")?; f.write_str("core::marker::PhantomData<")?; f.write_str(&cc)?; f.write_str(">,")?; } Ok(()) })?; if has_fields || self.body_required { f.write_str("\n}\n")?; } else { f.write_str(";\n")?; } if needs_container { f.write_str("\n")?; f.write_str(&container)?; f.write_str("\n}\n")?; } Ok(()) } } impl Display for ApiObject { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ApiObject::write_docs(self.description.as_ref(), f, 0)?; if self.inner.is_enum() { return self.write_enum(f); } f.write_str("#[derive(Debug, Default, Clone, Serialize, Deserialize)]\npub struct ")?; f.write_str(&self.name)?; if !self.inner.is_enum() && self.fields().iter().any(|f| f.needs_any) { ApiObject::write_any_generic(f)?; } f.write_str(" {")?; self.fields() .iter() .try_for_each::<_, fmt::Result>(|field| { let mut new_name = to_snake_case(&field.name); // Check if the field matches a Rust keyword and add '_' suffix. if RUST_KEYWORDS.iter().any(|&k| k == new_name) { new_name.push('_'); } ApiObject::write_docs(field.description.as_ref(), f, 1)?; if field.description.is_none() { f.write_str("\n")?; } f.write_str(" ")?; if field.name == EXTRA_PROPS_FIELD { f.write_str("#[serde(flatten)]\n ")?; } else if new_name != field.name.as_str() { f.write_str("#[serde(rename = \"")?; f.write_str(&field.name)?; f.write_str("\")]\n ")?; } f.write_str("pub ")?; f.write_str(&new_name)?; f.write_str(": ")?; if !field.is_required { f.write_str("Option<")?; } if field.boxed { f.write_str("Box<")?; } if field.needs_any { Self::write_field_with_any(&field.ty_path, f)?; } else { f.write_str(&field.ty_path)?; } if field.boxed { f.write_str(">")?; } if !field.is_required { f.write_str(">")?; } f.write_str(",")?; Ok(()) })?; if !self.fields().is_empty() { f.write_str("\n")?; } f.write_str("}\n") } } impl ApiObject { /// Writes an enum declaration along with serde impl if needed. fn write_enum<F>(&self, f: &mut F) -> fmt::Result where F: fmt::Write, { let is_string = self.inner.is_string_enum(); f.write_str("#[derive(Debug, Clone")?; if is_string { f.write_str(", Serialize, Deserialize")?; } f.write_str(")]\n#[allow(non_camel_case_types)]\npub enum ")?; f.write_str(&self.name)?; f.write_str(" {")?; self.variants().iter().try_for_each(|var| { if is_string { f.write_str("\n #[serde(rename = ")?; if let serde_json::Value::String(s) = &var.value { write!(f, "{:?}", s)?; } else { write!(f, "{:?}", var.value.to_string())?; } f.write_str(")]")?; } f.write_str("\n ")?; f.write_str(&var.name)?; f.write_str(",") })?; f.write_str("\n}\n")?; // FIXME: Currently, we're implementing the first value as enum default. // If "default" field exists, then we should use that instead. if let Some(var) = self.variants().get(0) { writeln!( f, "impl Default for {name} {{ fn default() -> Self {{ {name}::{first_var} }} }}", name = &self.name, first_var = &var.name )?; } if !is_string { EnumSerdeImpl::from(self).write_to(f)?; } Ok(()) } } /// Abstraction for implementing Serialize/Deserialize mechanism /// for non-string enums. struct EnumSerdeImpl<'a> { obj: &'a ApiObject, true_: Option<&'a ObjectVariant>, false_: Option<&'a ObjectVariant>, i64_: Vec<&'a ObjectVariant>, u64_: Vec<&'a ObjectVariant>, f64_: Vec<&'a ObjectVariant>, str_: Vec<&'a ObjectVariant>, } impl<'a> EnumSerdeImpl<'a> { fn from(o: &'a ApiObject) -> Self { use serde_json::Value; let mut writer = EnumSerdeImpl { obj: o, true_: None, false_: None, i64_: vec![], u64_: vec![], f64_: vec![], str_: vec![], }; o.variants().iter().for_each(|var| match var.value { Value::Number(ref n) => { if n.is_u64() { writer.u64_.push(var); } else if n.is_i64() { writer.i64_.push(var); } else if n.is_f64() { writer.f64_.push(var); } } Value::Bool(ref b) if *b => writer.true_ = Some(var), Value::Bool(_) => writer.false_ = Some(var), Value::String(_) => writer.str_.push(var), _ => (), }); writer } /// Writes the serde impl to the given formatter. fn write_to<F>(self, f: &mut F) -> fmt::Result where F: fmt::Write, { f.write_str("impl serde::Serialize for ")?; f.write_str(&self.obj.name)?; f.write_str( " { fn serialize<S: serde::Serializer>(&self, ser: S) -> Result<S::Ok, S::Error> { match self {", )?; // We've already checked that we have non-zero variants when we emitted // the enum, so we're okay here. self.obj.variants().iter().try_for_each(|var| { write!( f, " {}::{} => ({}).serialize(ser),", self.obj.name, var.name, var.value ) })?; f.write_str("\n }\n }\n}")?; write!( f, " impl<'de> serde::Deserialize<'de> for {name} {{ fn deserialize<D: serde::Deserializer<'de>>(deser: D) -> Result<Self, D::Error> {{ use serde::de::{{Error, Unexpected, Visitor}}; struct VariantVisitor; const EXPECT_MSG: &str = \"valid value for enum {name}\"; impl<'de> Visitor<'de> for VariantVisitor {{ type Value = {name}; fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {{ f.write_str(EXPECT_MSG) }}", name = self.obj.name, )?; self.write_visit_methods(f)?; f.write_str( "\n } deser.deserialize_any(VariantVisitor)\n }\n}\n", ) } /// Writes the necessary visitor methods for deserializing the enum. fn write_visit_methods<F>(&self, f: &mut F) -> fmt::Result where F: fmt::Write, { let mut bool_vis = vec![]; if let Some(var) = self.true_ { bool_vis.push(var); } if let Some(var) = self.false_ { bool_vis.push(var); } let visitors = &[ ("bool", None, &bool_vis, "Bool", bool_vis.len() == 1), ("i64", None, &self.i64_, "Signed", true), ("u64", None, &self.u64_, "Unsigned", true), ("f64", None, &self.f64_, "Float", true), ("str", Some("&str"), &self.str_, "Str", true), ]; for (vis_name, vis_ty, vars, ident, needs_error) in visitors { write!( f, " fn visit_{name}<E: Error>(self, v: {ty}) -> Result<Self::Value, E> {{", name = vis_name, ty = vis_ty.unwrap_or(vis_name) )?; for var in *vars { write!( f, " if v == {} {{ return Ok({}::{}); }}", var.value, self.obj.name, var.name )?; } // For bool, this is needed for "unreachable" code check. if *needs_error { write!( f, " Err(E::invalid_value(Unexpected::{ident}(v), &EXPECT_MSG)) }}", ident = ident, )?; } } Ok(()) } } impl Default for ObjectContainer { fn default() -> Self { ObjectContainer::Struct { fields: vec![] } } }
33.209773
96
0.522146
8a8a072c0eaa576cf0cb81ec2db3a7433e0f3b0e
3,143
#![allow(unused_imports)] use super::*; use wasm_bindgen::prelude::*; #[cfg(web_sys_unstable_apis)] #[wasm_bindgen] extern "wasm-bindgen" { # [wasm_bindgen (extends = :: js_sys :: Object , js_name = GPUPipelineDescriptorBase)] #[derive(Debug, Clone, PartialEq, Eq)] #[doc = "The `GpuPipelineDescriptorBase` dictionary."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuPipelineDescriptorBase`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub type GpuPipelineDescriptorBase; } #[cfg(web_sys_unstable_apis)] impl GpuPipelineDescriptorBase { #[doc = "Construct a new `GpuPipelineDescriptorBase`."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuPipelineDescriptorBase`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn new() -> Self { #[allow(unused_mut)] let mut ret: Self = ::wasm_bindgen::JsCast::unchecked_into(::js_sys::Object::new()); ret } #[cfg(web_sys_unstable_apis)] #[doc = "Change the `label` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuPipelineDescriptorBase`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn label(&mut self, val: &str) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("label"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } #[cfg(web_sys_unstable_apis)] #[cfg(feature = "GpuPipelineLayout")] #[doc = "Change the `layout` field of this object."] #[doc = ""] #[doc = "*This API requires the following crate features to be activated: `GpuPipelineDescriptorBase`, `GpuPipelineLayout`*"] #[doc = ""] #[doc = "*This API is unstable and requires `--cfg=web_sys_unstable_apis` to be activated, as"] #[doc = "[described in the `wasm-bindgen` guide](https://rustwasm.github.io/docs/wasm-bindgen/web-sys/unstable-apis.html)*"] pub fn layout(&mut self, val: &GpuPipelineLayout) -> &mut Self { use wasm_bindgen::JsValue; let r = ::js_sys::Reflect::set(self.as_ref(), &JsValue::from("layout"), &JsValue::from(val)); debug_assert!( r.is_ok(), "setting properties should never fail on our dictionary objects" ); let _ = r; self } }
46.910448
129
0.637607
18c9500dd1d259c813855b040a2751516c0bba89
409
pub use self::{ export_namespace_from::export_namespace_from, nullish_coalescing::nullish_coalescing, opt_chaining::optional_chaining, }; use swc_common::chain; use swc_ecma_visit::Fold; mod export_namespace_from; mod nullish_coalescing; mod opt_chaining; pub fn es2020() -> impl Fold { chain!( nullish_coalescing(), optional_chaining(), export_namespace_from(), ) }
21.526316
89
0.723716
61852c1151831b72a5c1bd74bb26ad084262bf12
3,863
use std::collections::HashMap; use indexmap::IndexMap; use serde::Deserialize; pub type ElementDefines = Vec<ElementDefine>; pub fn filter_element_define(defines: &[ElementDefine], key: &str) -> Option<ElementDefine> { let def = defines .iter() .filter(|define| define.name == key) .collect::<Vec<&ElementDefine>>(); if def.is_empty() { None } else { Some(def[0].clone()) } } #[derive(Serialize, Deserialize, PartialEq, Debug, Default, Clone)] pub struct ElementDefine { /// element id, such as `<quake-dashboard>` pub name: String, /// element's input attributes, such /// data in `<quake-dashboard data=""></quake-dashboard>` pub attributes: Vec<Attribute>, /// output events pub events: Vec<EventListener>, /// data attributes pub data_properties: Vec<HashMap<String, String>>, } impl ElementDefine { pub fn new(id: String) -> Self { Self { name: id, attributes: vec![], events: vec![], data_properties: vec![], } } pub fn data_map(&self) -> IndexMap<String, String> { let mut result: IndexMap<String, String> = IndexMap::new(); for map in &self.data_properties { for (key, value) in map { result.insert(key.to_string(), value.to_string()); } } result } pub fn from_js(element: &str, attributes: Vec<String>, events: Vec<String>) -> ElementDefine { let mut wce = Self::new(element.to_string()); for attr in attributes { wce.attributes.push(Attribute { typ: None, name: attr, }) } for event in events { wce.events.push(EventListener { event_name: event, event_data: None, }) } wce } pub fn add_event(&mut self, event_name: &str) { self.events.push(EventListener { event_name: event_name.to_string(), event_data: None, }); } } #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct Attribute { #[serde(rename = "type")] pub typ: Option<String>, pub name: String, } #[derive(Serialize, Deserialize, PartialEq, Debug)] pub enum AttributeType { Array(Vec<AttributeType>), Boolean(bool), Number(usize), String(String), Date(String), Object(HashMap<String, AttributeType>), } pub type EventValue = Attribute; #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] pub struct EventListener { pub event_name: String, /// to get `event.detail` pub event_data: Option<Vec<EventValue>>, } #[cfg(test)] mod tests { use std::fs; use std::path::PathBuf; use crate::entry::entry_paths::EntryPaths; use crate::transflow::element_define::ElementDefine; #[test] fn serialize_wc_element() { let quake_path = PathBuf::from("..") .join("_fixtures") .join("demo_quake") .join("_quake") .join(EntryPaths::element_define()); let string = fs::read_to_string(quake_path).unwrap(); let elements: Vec<ElementDefine> = serde_yaml::from_str(&*string).unwrap(); assert_eq!("quake-calendar", elements[0].name); let map = elements[0].data_map(); assert_eq!("String", map.get("title").unwrap()); assert_eq!("String", map.get("content").unwrap()); } #[test] fn test_web_component_element_struct() { let wce = ElementDefine::from_js( "quake-dashboard", vec!["data".to_string()], vec!["onSave".to_string()], ); assert_eq!("quake-dashboard", wce.name); assert_eq!(1, wce.events.len()); assert_eq!(1, wce.attributes.len()); } }
26.101351
98
0.57753
1d423ac34919b421a31e4f77f4e63030746082a6
16,161
// Inspired by Paul Woolcock's cargo-fmt (https://github.com/pwoolcoc/cargo-fmt/). #![deny(warnings)] #![allow(clippy::match_like_matches_macro)] use std::cmp::Ordering; use std::collections::{BTreeMap, BTreeSet}; use std::env; use std::ffi::OsStr; use std::fs; use std::hash::{Hash, Hasher}; use std::io::{self, Write}; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use std::process::Command; use std::str; use structopt::StructOpt; #[path = "test/mod.rs"] #[cfg(test)] mod cargo_fmt_tests; #[derive(StructOpt, Debug)] #[structopt( bin_name = "cargo fmt", about = "This utility formats all bin and lib files of \ the current crate using rustfmt." )] pub struct Opts { /// No output printed to stdout #[structopt(short = "q", long = "quiet")] quiet: bool, /// Use verbose output #[structopt(short = "v", long = "verbose")] verbose: bool, /// Print rustfmt version and exit #[structopt(long = "version")] version: bool, /// Specify package to format #[structopt(short = "p", long = "package", value_name = "package")] packages: Vec<String>, /// Specify path to Cargo.toml #[structopt(long = "manifest-path", value_name = "manifest-path")] manifest_path: Option<String>, /// Specify message-format: short|json|human #[structopt(long = "message-format", value_name = "message-format")] message_format: Option<String>, /// Options passed to rustfmt // 'raw = true' to make `--` explicit. #[structopt(name = "rustfmt_options", raw(true))] rustfmt_options: Vec<String>, /// Format all packages, and also their local path-based dependencies #[structopt(long = "all")] format_all: bool, /// Run rustfmt in check mode #[structopt(long = "check")] check: bool, } fn main() { let exit_status = execute(); std::io::stdout().flush().unwrap(); std::process::exit(exit_status); } const SUCCESS: i32 = 0; const FAILURE: i32 = 1; fn execute() -> i32 { // Drop extra `fmt` argument provided by `cargo`. let mut found_fmt = false; let args = env::args().filter(|x| { if found_fmt { true } else { found_fmt = x == "fmt"; x != "fmt" } }); let opts = Opts::from_iter(args); let verbosity = match (opts.verbose, opts.quiet) { (false, false) => Verbosity::Normal, (false, true) => Verbosity::Quiet, (true, false) => Verbosity::Verbose, (true, true) => { print_usage_to_stderr("quiet mode and verbose mode are not compatible"); return FAILURE; } }; if opts.version { return handle_command_status(get_rustfmt_info(&[String::from("--version")])); } if opts.rustfmt_options.iter().any(|s| { ["--print-config", "-h", "--help", "-V", "--version"].contains(&s.as_str()) || s.starts_with("--help=") || s.starts_with("--print-config=") }) { return handle_command_status(get_rustfmt_info(&opts.rustfmt_options)); } let strategy = CargoFmtStrategy::from_opts(&opts); let mut rustfmt_args = opts.rustfmt_options; if opts.check { let check_flag = "--check"; if !rustfmt_args.iter().any(|o| o == check_flag) { rustfmt_args.push(check_flag.to_owned()); } } if let Some(message_format) = opts.message_format { if let Err(msg) = convert_message_format_to_rustfmt_args(&message_format, &mut rustfmt_args) { print_usage_to_stderr(&msg); return FAILURE; } } if let Some(specified_manifest_path) = opts.manifest_path { if !specified_manifest_path.ends_with("Cargo.toml") { print_usage_to_stderr("the manifest-path must be a path to a Cargo.toml file"); return FAILURE; } let manifest_path = PathBuf::from(specified_manifest_path); handle_command_status(format_crate( verbosity, &strategy, rustfmt_args, Some(&manifest_path), )) } else { handle_command_status(format_crate(verbosity, &strategy, rustfmt_args, None)) } } fn rustfmt_command() -> Command { let rustfmt_var = env::var_os("RUSTFMT"); let rustfmt = match &rustfmt_var { Some(rustfmt) => rustfmt, None => OsStr::new("rustfmt"), }; Command::new(rustfmt) } fn convert_message_format_to_rustfmt_args( message_format: &str, rustfmt_args: &mut Vec<String>, ) -> Result<(), String> { let mut contains_emit_mode = false; let mut contains_check = false; let mut contains_list_files = false; for arg in rustfmt_args.iter() { if arg.starts_with("--emit") { contains_emit_mode = true; } if arg == "--check" { contains_check = true; } if arg == "-l" || arg == "--files-with-diff" { contains_list_files = true; } } match message_format { "short" => { if !contains_list_files { rustfmt_args.push(String::from("-l")); } Ok(()) } "json" => { if contains_emit_mode { return Err(String::from( "cannot include --emit arg when --message-format is set to json", )); } if contains_check { return Err(String::from( "cannot include --check arg when --message-format is set to json", )); } rustfmt_args.push(String::from("--emit")); rustfmt_args.push(String::from("json")); Ok(()) } "human" => Ok(()), _ => { return Err(format!( "invalid --message-format value: {}. Allowed values are: short|json|human", message_format )); } } } fn print_usage_to_stderr(reason: &str) { eprintln!("{}", reason); let app = Opts::clap(); app.after_help("") .write_help(&mut io::stderr()) .expect("failed to write to stderr"); } #[derive(Debug, Clone, Copy, PartialEq)] pub enum Verbosity { Verbose, Normal, Quiet, } fn handle_command_status(status: Result<i32, io::Error>) -> i32 { match status { Err(e) => { print_usage_to_stderr(&e.to_string()); FAILURE } Ok(status) => status, } } fn get_rustfmt_info(args: &[String]) -> Result<i32, io::Error> { let mut command = rustfmt_command() .stdout(std::process::Stdio::inherit()) .args(args) .spawn() .map_err(|e| match e.kind() { io::ErrorKind::NotFound => io::Error::new( io::ErrorKind::Other, "Could not run rustfmt, please make sure it is in your PATH.", ), _ => e, })?; let result = command.wait()?; if result.success() { Ok(SUCCESS) } else { Ok(result.code().unwrap_or(SUCCESS)) } } fn format_crate( verbosity: Verbosity, strategy: &CargoFmtStrategy, rustfmt_args: Vec<String>, manifest_path: Option<&Path>, ) -> Result<i32, io::Error> { let targets = get_targets(strategy, manifest_path)?; // Currently only bin and lib files get formatted. run_rustfmt(&targets, &rustfmt_args, verbosity) } /// Target uses a `path` field for equality and hashing. #[derive(Debug)] pub struct Target { /// A path to the main source file of the target. path: PathBuf, /// A kind of target (e.g., lib, bin, example, ...). kind: String, /// Rust edition for this target. edition: String, } impl Target { pub fn from_target(target: &cargo_metadata::Target) -> Self { let path = PathBuf::from(&target.src_path); let canonicalized = fs::canonicalize(&path).unwrap_or(path); Target { path: canonicalized, kind: target.kind[0].clone(), edition: target.edition.clone(), } } } impl PartialEq for Target { fn eq(&self, other: &Target) -> bool { self.path == other.path } } impl PartialOrd for Target { fn partial_cmp(&self, other: &Target) -> Option<Ordering> { Some(self.path.cmp(&other.path)) } } impl Ord for Target { fn cmp(&self, other: &Target) -> Ordering { self.path.cmp(&other.path) } } impl Eq for Target {} impl Hash for Target { fn hash<H: Hasher>(&self, state: &mut H) { self.path.hash(state); } } #[derive(Debug, PartialEq, Eq)] pub enum CargoFmtStrategy { /// Format every packages and dependencies. All, /// Format packages that are specified by the command line argument. Some(Vec<String>), /// Format the root packages only. Root, } impl CargoFmtStrategy { pub fn from_opts(opts: &Opts) -> CargoFmtStrategy { match (opts.format_all, opts.packages.is_empty()) { (false, true) => CargoFmtStrategy::Root, (true, _) => CargoFmtStrategy::All, (false, false) => CargoFmtStrategy::Some(opts.packages.clone()), } } } /// Based on the specified `CargoFmtStrategy`, returns a set of main source files. fn get_targets( strategy: &CargoFmtStrategy, manifest_path: Option<&Path>, ) -> Result<BTreeSet<Target>, io::Error> { let mut targets = BTreeSet::new(); match *strategy { CargoFmtStrategy::Root => get_targets_root_only(manifest_path, &mut targets)?, CargoFmtStrategy::All => { get_targets_recursive(manifest_path, &mut targets, &mut BTreeSet::new())? } CargoFmtStrategy::Some(ref hitlist) => { get_targets_with_hitlist(manifest_path, hitlist, &mut targets)? } } if targets.is_empty() { Err(io::Error::new( io::ErrorKind::Other, "Failed to find targets".to_owned(), )) } else { Ok(targets) } } fn get_targets_root_only( manifest_path: Option<&Path>, targets: &mut BTreeSet<Target>, ) -> Result<(), io::Error> { let metadata = get_cargo_metadata(manifest_path)?; let workspace_root_path = PathBuf::from(&metadata.workspace_root).canonicalize()?; let (in_workspace_root, current_dir_manifest) = if let Some(target_manifest) = manifest_path { ( workspace_root_path == target_manifest, target_manifest.canonicalize()?, ) } else { let current_dir = env::current_dir()?.canonicalize()?; ( workspace_root_path == current_dir, current_dir.join("Cargo.toml"), ) }; let package_targets = match metadata.packages.len() { 1 => metadata.packages.into_iter().next().unwrap().targets, _ => metadata .packages .into_iter() .filter(|p| { in_workspace_root || PathBuf::from(&p.manifest_path) .canonicalize() .unwrap_or_default() == current_dir_manifest }) .map(|p| p.targets) .flatten() .collect(), }; for target in package_targets { targets.insert(Target::from_target(&target)); } Ok(()) } fn get_targets_recursive( manifest_path: Option<&Path>, mut targets: &mut BTreeSet<Target>, visited: &mut BTreeSet<String>, ) -> Result<(), io::Error> { let metadata = get_cargo_metadata(manifest_path)?; for package in &metadata.packages { add_targets(&package.targets, &mut targets); // Look for local dependencies using information available since cargo v1.51 // It's theoretically possible someone could use a newer version of rustfmt with // a much older version of `cargo`, but we don't try to explicitly support that scenario. // If someone reports an issue with path-based deps not being formatted, be sure to // confirm their version of `cargo` (not `cargo-fmt`) is >= v1.51 // https://github.com/rust-lang/cargo/pull/8994 for dependency in &package.dependencies { if dependency.path.is_none() || visited.contains(&dependency.name) { continue; } let manifest_path = PathBuf::from(dependency.path.as_ref().unwrap()).join("Cargo.toml"); if manifest_path.exists() && !metadata .packages .iter() .any(|p| p.manifest_path.eq(&manifest_path)) { visited.insert(dependency.name.to_owned()); get_targets_recursive(Some(&manifest_path), &mut targets, visited)?; } } } Ok(()) } fn get_targets_with_hitlist( manifest_path: Option<&Path>, hitlist: &[String], targets: &mut BTreeSet<Target>, ) -> Result<(), io::Error> { let metadata = get_cargo_metadata(manifest_path)?; let mut workspace_hitlist: BTreeSet<&String> = BTreeSet::from_iter(hitlist); for package in metadata.packages { if workspace_hitlist.remove(&package.name) { for target in package.targets { targets.insert(Target::from_target(&target)); } } } if workspace_hitlist.is_empty() { Ok(()) } else { let package = workspace_hitlist.iter().next().unwrap(); Err(io::Error::new( io::ErrorKind::InvalidInput, format!("package `{}` is not a member of the workspace", package), )) } } fn add_targets(target_paths: &[cargo_metadata::Target], targets: &mut BTreeSet<Target>) { for target in target_paths { targets.insert(Target::from_target(target)); } } fn run_rustfmt( targets: &BTreeSet<Target>, fmt_args: &[String], verbosity: Verbosity, ) -> Result<i32, io::Error> { let by_edition = targets .iter() .inspect(|t| { if verbosity == Verbosity::Verbose { println!("[{} ({})] {:?}", t.kind, t.edition, t.path) } }) .fold(BTreeMap::new(), |mut h, t| { h.entry(&t.edition).or_insert_with(Vec::new).push(&t.path); h }); let mut status = vec![]; for (edition, files) in by_edition { let stdout = if verbosity == Verbosity::Quiet { std::process::Stdio::null() } else { std::process::Stdio::inherit() }; if verbosity == Verbosity::Verbose { print!("rustfmt"); print!(" --edition {}", edition); fmt_args.iter().for_each(|f| print!(" {}", f)); files.iter().for_each(|f| print!(" {}", f.display())); println!(); } let mut command = rustfmt_command() .stdout(stdout) .args(files) .args(&["--edition", edition]) .args(fmt_args) .spawn() .map_err(|e| match e.kind() { io::ErrorKind::NotFound => io::Error::new( io::ErrorKind::Other, "Could not run rustfmt, please make sure it is in your PATH.", ), _ => e, })?; status.push(command.wait()?); } Ok(status .iter() .filter_map(|s| if s.success() { None } else { s.code() }) .next() .unwrap_or(SUCCESS)) } fn get_cargo_metadata(manifest_path: Option<&Path>) -> Result<cargo_metadata::Metadata, io::Error> { let mut cmd = cargo_metadata::MetadataCommand::new(); cmd.no_deps(); if let Some(manifest_path) = manifest_path { cmd.manifest_path(manifest_path); } cmd.other_options(vec![String::from("--offline")]); match cmd.exec() { Ok(metadata) => Ok(metadata), Err(_) => { cmd.other_options(vec![]); match cmd.exec() { Ok(metadata) => Ok(metadata), Err(error) => Err(io::Error::new(io::ErrorKind::Other, error.to_string())), } } } }
29.54479
100
0.56525
e59cb43fb87bb81576bfcd8169be25585c5030e6
2,007
// Copyright (c) 2017-present PyO3 Project and Contributors use crate::pymethod; use proc_macro2::TokenStream; use quote::quote; pub fn build_py_methods(ast: &mut syn::ItemImpl) -> syn::Result<TokenStream> { if let Some((_, ref path, _)) = ast.trait_ { Err(syn::Error::new_spanned( path, "#[pymethods] cannot be used on trait impl blocks", )) } else if ast.generics != Default::default() { Err(syn::Error::new_spanned( ast.generics.clone(), "#[pymethods] cannot be used with lifetime parameters or generics", )) } else { impl_methods(&ast.self_ty, &mut ast.items) } } pub fn impl_methods(ty: &syn::Type, impls: &mut Vec<syn::ImplItem>) -> syn::Result<TokenStream> { let mut methods = Vec::new(); let mut cfg_attributes = Vec::new(); for iimpl in impls.iter_mut() { match iimpl { syn::ImplItem::Method(meth) => { methods.push(pymethod::gen_py_method(ty, &mut meth.sig, &mut meth.attrs)?); cfg_attributes.push(get_cfg_attributes(&meth.attrs)); } syn::ImplItem::Const(konst) => { if let Some(meth) = pymethod::gen_py_const(ty, &konst.ident, &mut konst.attrs)? { methods.push(meth); } cfg_attributes.push(get_cfg_attributes(&konst.attrs)); } _ => (), } } Ok(quote! { pyo3::inventory::submit! { #![crate = pyo3] { type Inventory = <#ty as pyo3::class::methods::HasMethodsInventory>::Methods; <Inventory as pyo3::class::methods::PyMethodsInventory>::new(vec![#( #(#cfg_attributes)* #methods ),*]) } } }) } fn get_cfg_attributes(attrs: &[syn::Attribute]) -> Vec<&syn::Attribute> { attrs .iter() .filter(|attr| attr.path.is_ident("cfg")) .collect() }
32.901639
97
0.538615
f9955916fbcbe919f8af68b9732d1bb978d872af
4,259
use beserial::{Deserialize, Serialize, ReadBytesExt, WriteBytesExt, SerializingError}; use keys::Address; use transaction::Transaction; use primitives::coin::Coin; use std::collections::HashSet; #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] #[repr(u8)] pub enum SubscriptionType { None = 0, Any = 1, Addresses = 2, MinFee = 3 } #[derive(Clone, Debug)] pub enum Subscription { None, Any, Addresses(HashSet<Address>), MinFee(Coin), // Fee per byte } impl Subscription { pub fn subscription_type(&self) -> SubscriptionType { match self { Subscription::None => SubscriptionType::None, Subscription::Any => SubscriptionType::Any, Subscription::Addresses(_) => SubscriptionType::Addresses, Subscription::MinFee(_) => SubscriptionType::MinFee } } pub fn matches_block(&self) -> bool { match self { Subscription::None => false, _ => true } } pub fn matches_transaction(&self, transaction: &Transaction) -> bool { match self { Subscription::None => false, Subscription::Any => true, Subscription::Addresses(addresses) => addresses.contains(&transaction.sender), Subscription::MinFee(min_fee) => { // TODO: Potential overflow for u64 min_fee.checked_mul(transaction.serialized_size() as u64) .map(|block_fee| transaction.fee >= block_fee) .unwrap_or(true) } } } } impl Deserialize for Subscription { fn deserialize<R: ReadBytesExt>(reader: &mut R) -> Result<Self, SerializingError> { let sub_type: SubscriptionType = Deserialize::deserialize(reader)?; match sub_type { SubscriptionType::Addresses => { // parse number of addresses and cast to usize // FIXME We should check for an overflow let num_addresses: u16 = Deserialize::deserialize(reader)?; let num_addresses = num_addresses as usize; // parse addresses and push them into vector let mut addresses = HashSet::with_capacity(num_addresses); for _ in 0..num_addresses { let address: Address = Deserialize::deserialize(reader)?; addresses.insert(address); } Ok(Subscription::Addresses(addresses)) }, SubscriptionType::MinFee => { let min_fee: Coin = Deserialize::deserialize(reader)?; Ok(Subscription::MinFee(min_fee)) }, SubscriptionType::None => Ok(Subscription::None), SubscriptionType::Any => Ok(Subscription::Any) } } } impl Serialize for Subscription { fn serialize<W: WriteBytesExt>(&self, writer: &mut W) -> Result<usize, SerializingError> { let mut size: usize = 0; // Serialize subscription type size += Serialize::serialize(&self.subscription_type(), writer)?; match self { Subscription::Addresses(addresses) => { // Serialize number of addresses if addresses.len() > 0xFFFF { return Err(SerializingError::Overflow) } size += Serialize::serialize(&(addresses.len() as u16), writer)?; // Serialize addresses for address in addresses { size += Serialize::serialize(address, writer)?; } }, Subscription::MinFee(min_fee) => { // Serialize minFee size += Serialize::serialize(min_fee, writer)?; } _ => {} } Ok(size) } fn serialized_size(&self) -> usize { 1 + (match self { // 2 bytes #n addresses, 20 * #n for the addresses // XXX This ignores an overflow of number of addresses Subscription::Addresses(addresses) => 2 + 20 * addresses.len(), // 64 bit minFee value Subscription::MinFee(_) => 8, _ => 0 }) } }
30.421429
94
0.555764
14d8fdee6ff0e9ebe1fd3ad79e4c5d0753ff2b0f
1,128
#[derive(Debug)] pub struct Stack<'a> { first: Option<Box<StackElem<'a>>>, } #[derive(Debug)] struct StackElem<'a> { content: &'a str, next: Option<Box<StackElem<'a>>>, } impl<'a> Stack<'a> { pub fn new() -> Stack<'a> { Stack{ first: None } } pub fn push(&mut self, content: &'a str) { let new_elem = Some(Box::new(StackElem { content, next: self.first.take(), })); self.first = new_elem; } pub fn pop(&mut self) -> Option<&'a str> { match self.first.take() { Some(elem) => { let ret_elem = *elem; self.first = ret_elem.next; Some(ret_elem.content) }, None => None, } //Solution using map and closure to avoid match // self.first.take().map(|elem| { // let ret_elem = *elem; // self.first = ret_elem.next; // ret_elem.content // }) } } impl<'a> Iterator for Stack<'a> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { self.pop() } }
22.56
55
0.477837
38be635d83726859cfbc1f4097ab4710a477e443
643
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. pub fn main() { let i = box 100i; assert!(i == box 100i); assert!(i < box 101i); assert!(i <= box 100i); assert!(i > box 99i); assert!(i >= box 99i); }
33.842105
68
0.678072
610b3bdd0901c054fc7ea1f4cc2bff6a8eeea74f
1,565
use datasize::DataSize; use serde::{Deserialize, Serialize}; use crate::{ logging::LoggingConfig, types::NodeConfig, BlockProposerConfig, ConsensusConfig, ContractRuntimeConfig, DeployAcceptorConfig, EventStreamServerConfig, FetcherConfig, GossipConfig, RestServerConfig, RpcServerConfig, SmallNetworkConfig, StorageConfig, }; /// Root configuration. #[derive(DataSize, Debug, Default, Deserialize, Serialize)] // Disallow unknown fields to ensure config files and command-line overrides contain valid keys. #[serde(deny_unknown_fields)] pub struct Config { /// Node configuration. pub node: NodeConfig, /// Logging configuration. pub logging: LoggingConfig, /// Consensus configuration. pub consensus: ConsensusConfig, /// Network configuration. pub network: SmallNetworkConfig, /// Event stream API server configuration. pub event_stream_server: EventStreamServerConfig, /// REST API server configuration. pub rest_server: RestServerConfig, /// RPC API server configuration. pub rpc_server: RpcServerConfig, /// On-disk storage configuration. pub storage: StorageConfig, /// Gossip protocol configuration. pub gossip: GossipConfig, /// Fetcher configuration. pub fetcher: FetcherConfig, /// Contract runtime configuration. pub contract_runtime: ContractRuntimeConfig, /// Deploy acceptor configuration. pub deploy_acceptor: DeployAcceptorConfig, /// Block proposer configuration. #[serde(default)] pub block_proposer: BlockProposerConfig, }
36.395349
96
0.74377
edf530be8b7d13ab745aacdf9110739814dde25e
13,892
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. use super::*; #[tokio::test] async fn csv_query_avg_multi_batch() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT avg(c12) FROM aggregate_test_100"; let plan = ctx.create_logical_plan(sql).unwrap(); let plan = ctx.optimize(&plan).unwrap(); let plan = ctx.create_physical_plan(&plan).await.unwrap(); let results = collect(plan).await.unwrap(); let batch = &results[0]; let column = batch.column(0); let array = column.as_any().downcast_ref::<Float64Array>().unwrap(); let actual = array.value(0); let expected = 0.5089725; // Due to float number's accuracy, different batch size will lead to different // answers. assert!((expected - actual).abs() < 0.01); Ok(()) } #[tokio::test] async fn csv_query_avg() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT avg(c12) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["0.5089725099127211"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_variance_1() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT var_pop(c2) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["1.8675"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_variance_2() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT var_pop(c6) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["26156334342021890000000000000000000000"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_variance_3() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT var_pop(c12) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["0.09234223721582163"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_variance_4() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT var(c2) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["1.8863636363636365"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_variance_5() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT var_samp(c2) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["1.8863636363636365"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_stddev_1() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT stddev_pop(c2) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["1.3665650368716449"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_stddev_2() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT stddev_pop(c6) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["5114326382039172000"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_stddev_3() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT stddev_pop(c12) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["0.30387865541334363"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_stddev_4() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT stddev(c12) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["0.3054095399405338"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_stddev_5() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT stddev_samp(c12) FROM aggregate_test_100"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["0.3054095399405338"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_stddev_6() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "select stddev(sq.column1) from (values (1.1), (2.0), (3.0)) as sq"; let mut actual = execute(&mut ctx, sql).await; actual.sort(); let expected = vec![vec!["0.9504384952922168"]]; assert_float_eq(&expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_external_table_count() { let mut ctx = ExecutionContext::new(); register_aggregate_csv_by_sql(&mut ctx).await; let sql = "SELECT COUNT(c12) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+-------------------------------+", "| COUNT(aggregate_test_100.c12) |", "+-------------------------------+", "| 100 |", "+-------------------------------+", ]; assert_batches_eq!(expected, &actual); } #[tokio::test] async fn csv_query_external_table_sum() { let mut ctx = ExecutionContext::new(); // cast smallint and int to bigint to avoid overflow during calculation register_aggregate_csv_by_sql(&mut ctx).await; let sql = "SELECT SUM(CAST(c7 AS BIGINT)), SUM(CAST(c8 AS BIGINT)) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+-------------------------------------------+-------------------------------------------+", "| SUM(CAST(aggregate_test_100.c7 AS Int64)) | SUM(CAST(aggregate_test_100.c8 AS Int64)) |", "+-------------------------------------------+-------------------------------------------+", "| 13060 | 3017641 |", "+-------------------------------------------+-------------------------------------------+", ]; assert_batches_eq!(expected, &actual); } #[tokio::test] async fn csv_query_count() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT count(c12) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+-------------------------------+", "| COUNT(aggregate_test_100.c12) |", "+-------------------------------+", "| 100 |", "+-------------------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_count_distinct() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT count(distinct c2) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+---------------------------------------+", "| COUNT(DISTINCT aggregate_test_100.c2) |", "+---------------------------------------+", "| 5 |", "+---------------------------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_count_distinct_expr() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT count(distinct c2 % 2) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+--------------------------------------------------+", "| COUNT(DISTINCT aggregate_test_100.c2 % Int64(2)) |", "+--------------------------------------------------+", "| 2 |", "+--------------------------------------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_count_star() { let mut ctx = ExecutionContext::new(); register_aggregate_csv_by_sql(&mut ctx).await; let sql = "SELECT COUNT(*) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+-----------------+", "| COUNT(UInt8(1)) |", "+-----------------+", "| 100 |", "+-----------------+", ]; assert_batches_eq!(expected, &actual); } #[tokio::test] async fn csv_query_count_one() { let mut ctx = ExecutionContext::new(); register_aggregate_csv_by_sql(&mut ctx).await; let sql = "SELECT COUNT(1) FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+-----------------+", "| COUNT(UInt8(1)) |", "+-----------------+", "| 100 |", "+-----------------+", ]; assert_batches_eq!(expected, &actual); } #[tokio::test] async fn csv_query_approx_count() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT approx_distinct(c9) count_c9, approx_distinct(cast(c9 as varchar)) count_c9_str FROM aggregate_test_100"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+----------+--------------+", "| count_c9 | count_c9_str |", "+----------+--------------+", "| 100 | 99 |", "+----------+--------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn query_count_without_from() -> Result<()> { let mut ctx = ExecutionContext::new(); let sql = "SELECT count(1 + 1)"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+----------------------------+", "| COUNT(Int64(1) + Int64(1)) |", "+----------------------------+", "| 1 |", "+----------------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_array_agg() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT array_agg(c13) FROM (SELECT * FROM aggregate_test_100 ORDER BY c13 LIMIT 2) test"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+------------------------------------------------------------------+", "| ARRAYAGG(test.c13) |", "+------------------------------------------------------------------+", "| [0VVIHzxWtNOFLtnhjHEKjXaJOSLJfm, 0keZ5G8BffGwgF2RwQD59TFzMStxCB] |", "+------------------------------------------------------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_array_agg_empty() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT array_agg(c13) FROM (SELECT * FROM aggregate_test_100 LIMIT 0) test"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+--------------------+", "| ARRAYAGG(test.c13) |", "+--------------------+", "| [] |", "+--------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) } #[tokio::test] async fn csv_query_array_agg_one() -> Result<()> { let mut ctx = ExecutionContext::new(); register_aggregate_csv(&mut ctx).await?; let sql = "SELECT array_agg(c13) FROM (SELECT * FROM aggregate_test_100 ORDER BY c13 LIMIT 1) test"; let actual = execute_to_batches(&mut ctx, sql).await; let expected = vec![ "+----------------------------------+", "| ARRAYAGG(test.c13) |", "+----------------------------------+", "| [0VVIHzxWtNOFLtnhjHEKjXaJOSLJfm] |", "+----------------------------------+", ]; assert_batches_eq!(expected, &actual); Ok(()) }
35.804124
127
0.548661
f91de33fb201143a8ea2bc1993a4f33428bfd6b8
492
use std::path::PathBuf; use strum_macros::Display; #[derive(Display)] pub enum RepositoryState { UpToDate, Fetched, Updated, NoFastForward, LocalChanges, } pub struct RepositoryInfo { pub path: PathBuf, pub state: RepositoryState, pub stashed: usize, } impl RepositoryInfo { pub fn new(path: PathBuf) -> RepositoryInfo { RepositoryInfo { path, state: RepositoryState::UpToDate, stashed: 0, } } }
16.965517
49
0.611789
26bfd1ad4dc1f1605863e36613dd672d21df0d4a
973
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-fast - check-fast doesn't understand aux-build // aux-build:cci_nested_lib.rs extern mod cci_nested_lib; use cci_nested_lib::*; pub fn main() { let lst = new_int_alist(); alist_add(lst, 22, ~"hi"); alist_add(lst, 44, ~"ho"); assert alist_get(lst, 22) == ~"hi"; assert alist_get(lst, 44) == ~"ho"; let lst = new_int_alist_2(); alist_add(lst, 22, ~"hi"); alist_add(lst, 44, ~"ho"); assert alist_get(lst, 22) == ~"hi"; assert alist_get(lst, 44) == ~"ho"; }
32.433333
68
0.673176
2619b1cef6ee73d8fc3c03dbd1ffb129bf62ca8d
10,692
use super::GuiElement; use crate::{error::GuiError, Font, GameState}; use image::Pixel; /// A struct that is used to create a [GuiElement]. It is constructed by calling `GameState::add_new_element()` /// /// This builder can either load a texture by calling [with_texture], or you can create a custom image by calling [with_canvas]. /// /// [with_texture]: #method.with_texture /// [with_canvas]: #method.with_canvas pub struct GuiElementBuilder<'a> { game_state: &'a mut GameState, dimensions: (i32, i32, u32, u32), } impl<'a> GuiElementBuilder<'a> { pub(crate) fn new(game_state: &'a mut GameState, dimensions: (i32, i32, u32, u32)) -> Self { Self { game_state, dimensions, } } /// Create a gui element with a texture pub fn with_texture<'b>(self, texture_path: &'b str) -> GuiElementTextureBuilder<'a, 'b> { GuiElementTextureBuilder { game_state: self.game_state, dimensions: self.dimensions, texture_path, } } /// Create a gui element with a custom canvas. The returned [GuiElementCanvasBuilder] can be further changed to include background color, text and borders. /// /// The element will be completely transparent by default. Make sure to update e.g. the background color. pub fn canvas(self) -> GuiElementCanvasBuilder<'a> { GuiElementCanvasBuilder { game_state: self.game_state, dimensions: self.dimensions, color: crate::color::TRANSPARENT, text: None, border: None, } } } /// A struct that is used to create a [GuiElement] with a texture. This is created by calling `GameState::create_gui_element().texture("..")`. Currently nothing can be manipulated in this struct. pub struct GuiElementTextureBuilder<'a, 'b> { game_state: &'a mut GameState, dimensions: (i32, i32, u32, u32), texture_path: &'b str, } impl<'a, 'b> GuiElementTextureBuilder<'a, 'b> { /// Finish building the element and return it. /// The returned [GuiElement] has to be stored somewhere, as it will be removed from the engine when dropped. /// Starting next frame, the returned GuiElement will be rendered on the screen. pub fn build(self) -> Result<GuiElement, GuiError> { let queue = self.game_state.queue.clone(); let image = image::open(self.texture_path) .map_err(|e| GuiError::CouldNotLoadTexture { path: self.texture_path.to_owned(), inner: e, })? .to_rgba(); let (id, element_ref, element) = GuiElement::new( queue, self.dimensions, (image.width(), image.height(), image.into_raw()), self.game_state.internal_update_sender.clone(), None, )?; self.game_state.gui_elements.insert(id, element_ref); Ok(element) } } /// A struct that is used to render a custom texture for a [GuiElement]. This can be further customized by e.g. `.with_text` and `with_border`. /// Finalize this GuiElement by calling `.build()`. pub struct GuiElementCanvasBuilder<'a> { game_state: &'a mut GameState, dimensions: (i32, i32, u32, u32), color: [u8; 4], text: Option<TextRequest>, border: Option<(u16, [u8; 4])>, } #[derive(Clone)] pub(crate) struct TextRequest { pub font: Font, pub font_size: u16, pub text: String, pub color: [u8; 4], } impl<'a> GuiElementCanvasBuilder<'a> { /// Adds a border to the [GuiElement]. /// This will be subtracted from the size of the element, /// e.g. if you have an element of 100 pixels wide with a border of 10 pixels the resulting outer width will still be 100 pixels, /// while the inner width will be `100 - (left_border + right_border) = 100 - (10 + 10) = 80` pixels. pub fn with_border(mut self, border_width: u16, border_color: [u8; 4]) -> Self { self.border = Some((border_width, border_color)); self } /// Update the dimensions. This will overwrite the value passed to `new_gui_element(dimensions)`. This is mostly useful when calling `GuiElement::update_canvas`. pub fn with_dimensions(mut self, dimensions: (i32, i32, u32, u32)) -> Self { self.dimensions = dimensions; self } /// Update the background color. pub fn with_background_color(mut self, color: [u8; 4]) -> Self { self.color = color; self } /// Add a text to the GUI element. This text will be rendered in the center of the element, and does not respect newlines. /// /// An instance of [Font](rusttype::Font) can be obtained by calling `GameState::load_font`. pub fn with_text( mut self, font: Font, font_size: u16, text: impl std::fmt::Display, color: [u8; 4], ) -> Self { self.text = Some(TextRequest { font, font_size, text: text.to_string(), color, }); self } /// Update the text of an element. This has to be called *after* `with_text` is called, or this method will panic. This is mostly useful when calling `GuiElement::update_canvas`. pub fn with_text_content(mut self, text: impl std::fmt::Display) -> Self { self.text.as_mut().unwrap().text = text.to_string(); self } /// Update the text color of an element. This has to be called *after* `with_text` is called, or this method will panic. This is mostly useful when calling `GuiElement::update_canvas`. pub fn with_text_color(mut self, color: [u8; 4]) -> Self { self.text.as_mut().unwrap().color = color; self } /// Finish building the element and return it. /// The returned [GuiElement] has to be stored somewhere, as it will be removed from the engine when dropped. /// Starting next frame, the returned GuiElement will be rendered on the screen. pub fn build(self) -> Result<GuiElement, GuiError> { let queue = self.game_state.queue.clone(); let width = self.dimensions.2; let height = self.dimensions.3; let mut image = image::RgbaImage::from_raw( width, height, vec![0; width as usize * height as usize * 4], ) // only returns `None` if the given buffer isn't big enough for the requested dimensions. // Rgba is 4 bytes, and the dimensions are width * height, so the buffer should always be // big enough. .unwrap(); for x in 0..width { for y in 0..height { let ps = if let Some(border_color) = is_border(x, y, width, height, self.border) { border_color } else { self.color }; image.put_pixel(x, y, image::Rgba(ps)); } } if let Some(request) = &self.text { let scale = rusttype::Scale::uniform(request.font_size as f32); let v_metrics = request.font.v_metrics(scale); let glyphs: Vec<_> = request .font .layout( request.text.trim(), scale, rusttype::point(0.0, v_metrics.ascent), ) .collect(); if !glyphs.is_empty() { let total_bounding_box = calc_text_bounding_box(glyphs.iter()); let text_width = total_bounding_box.max.x - total_bounding_box.min.x; let text_height = total_bounding_box.max.y - total_bounding_box.min.y; let position = ( (width as i32 - text_width) / 2, (height as i32 - text_height) / 2, ); let color = request.color; for glyph in glyphs { if let Some(bounding_box) = glyph.pixel_bounding_box() { glyph.draw(|x, y, v| { let x = position.0 + x as i32 + bounding_box.min.x; let y = position.1 + y as i32 + bounding_box.min.y; if x < 0 || y < 0 || x >= image.width() as i32 || y >= image.height() as i32 { return; } image.get_pixel_mut(x as u32, y as u32).blend(&image::Rgba([ color[0], color[1], color[2], (v * 255.) as u8, ])); }); } } } } let (id, element_ref, element) = GuiElement::new( queue, self.dimensions, (width, height, image.into_raw()), self.game_state.internal_update_sender.clone(), Some(super::element::CanvasConfig { background: self.color, border: self.border, text: self.text, }), )?; self.game_state.gui_elements.insert(id, element_ref); Ok(element) } } fn calc_text_bounding_box<'a>( glyphs: impl Iterator<Item = &'a rusttype::PositionedGlyph<'a>>, ) -> rusttype::Rect<i32> { let mut total_bounding_box = rusttype::Rect { min: rusttype::Point { x: i32::max_value(), y: i32::max_value(), }, max: rusttype::Point { x: i32::min_value(), y: i32::min_value(), }, }; for glyph in glyphs { if let Some(bounding_box) = glyph.pixel_bounding_box() { total_bounding_box.min.x = total_bounding_box.min.x.min(bounding_box.min.x); total_bounding_box.min.y = total_bounding_box.min.y.min(bounding_box.min.y); total_bounding_box.max.x = total_bounding_box.max.x.max(bounding_box.max.x); total_bounding_box.max.y = total_bounding_box.min.y.max(bounding_box.max.y); } } total_bounding_box } fn is_border( x: u32, y: u32, width: u32, height: u32, maybe_border: Option<(u16, [u8; 4])>, ) -> Option<[u8; 4]> { if let Some((border_width, border_color)) = maybe_border { let border_width = border_width as u32; if x < border_width || x + border_width >= width || y < border_width || y + border_width >= height { return Some(border_color); } } None }
36.99654
195
0.560887
09b6dab6f923e753e251d7068961726fdcd5675d
11,708
use console::consoleintr; use x86::io::inb; // PC keyboard interface constants const KBSTATP: u16 = 0x64; // kbd controller status port(I) const KBS_DIB: u8 = 0x01; // kbd data in buffer const KBDATAP: u16 = 0x60; // kbd data port(I) const NO: u8 = 0; const SHIFT: u8 = (1<<0); const CTL: u8 = (1<<1); const ALT: u8 = (1<<2); const CAPSLOCK: u8 = (1<<3); const NUMLOCK: u8 = (1<<4); const SCROLLLOCK: u8 = (1<<5); const E0ESC: usize = (1<<6); // Special keycodes const KEY_HOME: u8 = 0xE0; const KEY_END: u8 = 0xE1; const KEY_UP: u8 = 0xE2; const KEY_DN: u8 = 0xE3; const KEY_LF: u8 = 0xE4; const KEY_RT: u8 = 0xE5; const KEY_PGUP: u8 = 0xE6; const KEY_PGDN: u8 = 0xE7; const KEY_INS: u8 = 0xE8; const KEY_DEL: u8 = 0xE9; #[deny(const_err)] macro_rules! C { ($c:expr) => { $c as u8 - '@' as u8 } } static NORMALMAP: [u8; 256] = [ NO, 0x1B, b'1', b'2', b'3', b'4', b'5', b'6', // 0x00 b'7', b'8', b'9', b'0', b'-', b'=', 0x08, b'\t', b'q', b'w', b'e', b'r', b't', b'y', b'u', b'i', // 0x10 b'o', b'p', b'[', b']', b'\n', NO, b'a', b's', b'd', b'f', b'g', b'h', b'j', b'k', b'l', b';', // 0x20 b'\'', b'`', NO, b'\\', b'z', b'x', b'c', b'v', b'b', b'n', b'm', b',', b'.', b'/', NO, b'*', // 0x30 NO, ' ' as u8, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, b'7', // 0x40 b'8', b'9', b'-', b'4', b'5', b'6', b'+', b'1', b'2', b'3', b'0', b'.', NO, NO, NO, NO, // 0x50 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x60 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x70 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x80 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, KEY_HOME, // 0x90 NO, NO, NO, NO, b'\n', NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xA0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, b'/', NO, NO, // 0xB0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xC0 KEY_UP, KEY_PGUP, NO, KEY_LF, NO, NO, NO, KEY_END, KEY_DN, KEY_PGDN, KEY_INS, KEY_DEL, NO, KEY_RT, NO, NO, // 0xD0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xE0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xF0 NO, NO, NO, NO, NO, NO, NO, NO, ]; static SHIFTMAP: [u8; 256] = [ NO, 033, b'!', b'@', b'#', b'$', b'%', b'^', // 0x00 b'&', b'*', b'(', b')', b'_', b'+', 0x08, b'\t', b'Q', b'W', b'E', b'R', b'T', b'Y', b'U', b'I', // 0x10 b'O', b'P', b'{', b'}', b'\n', NO, b'A', b'S', b'D', b'F', b'G', b'H', b'J', b'K', b'L', b':', // 0x20 b'"', b'~', NO, b'|', b'Z', b'X', b'C', b'V', b'B', b'N', b'M', b'<', b'>', b'?', NO, b'*', // 0x30 NO, ' ' as u8, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, b'7', // 0x40 b'8', b'9', b'-', b'4', b'5', b'6', b'+', b'1', b'2', b'3', b'0', b'.', NO, NO, NO, NO, // 0x50 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x60 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x70 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x80 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, KEY_HOME, // 0x90 NO, NO, NO, NO, b'\n', NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xA0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, b'/', NO, NO, // 0xB0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xC0 KEY_UP, KEY_PGUP, NO, KEY_LF, NO, NO, NO, KEY_END, KEY_DN, KEY_PGDN, KEY_INS, KEY_DEL, NO, KEY_RT, NO, NO, // 0xD0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xE0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xF0 NO, NO, NO, NO, NO, NO, NO, NO, ]; static CTLMAP: [u8; 256] = [ NO, NO, NO, NO, NO, NO, NO, NO, // 0x00 NO, NO, NO, NO, NO, NO, NO, NO, C!('Q'), C!('W'), C!('E'), C!('R'), C!('T'), C!('Y'), C!('U'), C!('I'), // 0x10 C!('O'), C!('P'), NO, NO, b'\r', NO, C!('A'), C!('S'), C!('D'), C!('F'), C!('G'), C!('H'), C!('J'), C!('K'), C!('L'), NO, // 0x20 NO, NO, NO, C!('\\'), C!('Z'), C!('X'), C!('C'), C!('V'), C!('B'), C!('N'), C!('M'), NO, NO, 239, NO, NO, // 0x30 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x40 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x50 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x60 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x70 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x80 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, KEY_HOME, // 0x90 NO, NO, NO, NO, b'\r', NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xA0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, 239, NO, NO, // 0xB0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xC0 KEY_UP, KEY_PGUP, NO, KEY_LF, NO, NO, NO, KEY_END, KEY_DN, KEY_PGDN, KEY_INS, KEY_DEL, NO, KEY_RT, NO, NO, // 0xD0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xE0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xF0 NO, NO, NO, NO, NO, NO, NO, NO, ]; static SHIFTCODE: [u8; 256] = [ NO, NO, NO, NO, NO, NO, NO, NO, // 0x00 NO, NO, NO, NO, NO, CTL, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x10 NO, NO, NO, NO, NO, CTL, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x20 NO, NO, SHIFT, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, SHIFT, NO, // 0x30 ALT, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x40 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x50 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x60 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x70 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x80 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x90 NO, NO, NO, NO, NO, CTL, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xA0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xB0 ALT, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xC0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xD0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xE0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xF0 NO, NO, NO, NO, NO, NO, NO, NO, ]; static TOGGLECODE: [u8; 256] = [ NO, NO, NO, NO, NO, NO, NO, NO, // 0x00 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x10 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x20 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x30 NO, NO, CAPSLOCK, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NUMLOCK, SCROLLLOCK, NO, // 0x40 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x50 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x60 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x70 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x80 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0x90 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xA0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xB0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xC0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xD0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xE0 NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, NO, // 0xF0 NO, NO, NO, NO, NO, NO, NO, NO, ]; #[no_mangle] pub extern "C" fn kbdintr() { unsafe { consoleintr(kbdgetc); } } pub extern "C" fn kbdgetc() -> i32 { static mut SHIFT_VAR: usize = 0; static CHARCODE: [&'static [u8]; 4] = [&NORMALMAP, &SHIFTMAP, &CTLMAP, &CTLMAP]; let st = unsafe { inb(KBSTATP as u16) }; if st & KBS_DIB == 0 { return -1; } let mut data = unsafe { inb(KBDATAP as u16) }; if data == 0xE0 { unsafe { SHIFT_VAR |= E0ESC }; return 0; } else if data & 0x80 != 0 { // Key released data = if unsafe { SHIFT_VAR } & E0ESC != 0 { data } else { data & 0x7F }; unsafe { SHIFT_VAR &= !(SHIFTCODE[data as usize] as usize | E0ESC) }; return 0; } else if unsafe { SHIFT_VAR } & E0ESC != 0 { // Last character was an E0 escape; or with 0x80 data |= 0x80; unsafe { SHIFT_VAR &= !E0ESC }; } unsafe { SHIFT_VAR |= SHIFTCODE[data as usize] as usize; SHIFT_VAR ^= TOGGLECODE[data as usize] as usize; } let mut c = CHARCODE[unsafe { SHIFT_VAR } & (CTL | SHIFT) as usize][data as usize]; if unsafe { SHIFT_VAR } & CAPSLOCK as usize != 0 { if b'a' <= c && c <= b'z' { c += 224 // 'A' - 'a' } else if b'A' <= c && c <= b'Z' { c += b'a' - b'A'; } } c as i32 }
44.687023
90
0.36932
098a76da5acd1ac1d93806851c2939567fa19d37
303
/// A trait for types describing a position pub trait Position<T> { /// Get the x coordinate fn x(&self) -> T; /// Get the y coordinate fn y(&self) -> T; } impl<T: Copy> Position<T> for (T, T) { fn x(&self) -> T { self.0 } fn y(&self) -> T { self.1 } }
16.833333
43
0.491749
1eb3ddf58c837cf44b8012390f786f4233cda363
988
use std::f64::consts::PI; use stdweb::web::CanvasRenderingContext2d; pub fn draw(context: &CanvasRenderingContext2d) { context.set_stroke_style_color("#000"); let rotate = |x: f64, y: f64, a: f64| -> (f64, f64) { (x * a.cos() + y * a.sin(), y * a.cos() - x * a.sin()) }; context.begin_path(); for x in (-170..170).step_by(20) { for y in (-170..170).step_by(20) { let (x, y) = (x as f64, y as f64); let a = x.abs() / 170.0 * PI / 2.0; let mut al = 0.0; while al <= 2.0 * PI { let (x1, y1) = (10.0 * al.cos(), 10.0 * al.sin()); let (x2, y2) = rotate(x1, y1, a); let (x, y) = (x + 320.0 + x2, y + 200.0 - y2); if al == 0.0 { context.move_to(x, y); } else { context.line_to(x, y); } al += PI / 2.0; } } } context.stroke(); }
30.875
66
0.410931
e4123c9a20b196f83f030c0fd981423a7c6894de
61,244
// This is an attempt at an implementation following the ideal // // ``` // struct BTreeMap<K, V> { // height: usize, // root: Option<Box<Node<K, V, height>>> // } // // struct Node<K, V, height: usize> { // keys: [K; 2 * B - 1], // vals: [V; 2 * B - 1], // edges: if height > 0 { // [Box<Node<K, V, height - 1>>; 2 * B] // } else { () }, // parent: *const Node<K, V, height + 1>, // parent_idx: u16, // len: u16, // } // ``` // // Since Rust doesn't actually have dependent types and polymorphic recursion, // we make do with lots of unsafety. // A major goal of this module is to avoid complexity by treating the tree as a generic (if // weirdly shaped) container and avoiding dealing with most of the B-Tree invariants. As such, // this module doesn't care whether the entries are sorted, which nodes can be underfull, or // even what underfull means. However, we do rely on a few invariants: // // - Trees must have uniform depth/height. This means that every path down to a leaf from a // given node has exactly the same length. // - A node of length `n` has `n` keys, `n` values, and (in an internal node) `n + 1` edges. // This implies that even an empty internal node has at least one edge. use core::marker::PhantomData; use core::mem::{self, MaybeUninit}; use core::ptr::{self, NonNull, Unique}; use core::slice; use crate::alloc::{AllocRef, Global, Layout}; use crate::boxed::Box; const B: usize = 6; pub const MIN_LEN: usize = B - 1; pub const CAPACITY: usize = 2 * B - 1; /// The underlying representation of leaf nodes. Note that it is often unsafe to actually store /// these, since only the first `len` keys and values are assumed to be initialized. As such, /// these should always be put behind pointers, and specifically behind `BoxedNode` in the owned /// case. /// /// We have a separate type for the header and rely on it matching the prefix of `LeafNode`, in /// order to statically allocate a single dummy node to avoid allocations. This struct is /// `repr(C)` to prevent them from being reordered. `LeafNode` does not just contain a /// `NodeHeader` because we do not want unnecessary padding between `len` and the keys. /// Crucially, `NodeHeader` can be safely transmuted to different K and V. (This is exploited /// by `as_header`.) #[repr(C)] struct NodeHeader<K, V> { /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`. /// This either points to an actual node or is null. parent: *const InternalNode<K, V>, /// This node's index into the parent node's `edges` array. /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`. /// This is only guaranteed to be initialized when `parent` is non-null. parent_idx: MaybeUninit<u16>, /// The number of keys and values this node stores. /// /// This next to `parent_idx` to encourage the compiler to join `len` and /// `parent_idx` into the same 32-bit word, reducing space overhead. len: u16, } #[repr(C)] struct LeafNode<K, V> { /// We use `*const` as opposed to `*mut` so as to be covariant in `K` and `V`. /// This either points to an actual node or is null. parent: *const InternalNode<K, V>, /// This node's index into the parent node's `edges` array. /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`. /// This is only guaranteed to be initialized when `parent` is non-null. parent_idx: MaybeUninit<u16>, /// The number of keys and values this node stores. /// /// This next to `parent_idx` to encourage the compiler to join `len` and /// `parent_idx` into the same 32-bit word, reducing space overhead. len: u16, /// The arrays storing the actual data of the node. Only the first `len` elements of each /// array are initialized and valid. keys: [MaybeUninit<K>; CAPACITY], vals: [MaybeUninit<V>; CAPACITY], } impl<K, V> LeafNode<K, V> { /// Creates a new `LeafNode`. Unsafe because all nodes should really be hidden behind /// `BoxedNode`, preventing accidental dropping of uninitialized keys and values. unsafe fn new() -> Self { LeafNode { // As a general policy, we leave fields uninitialized if they can be, as this should // be both slightly faster and easier to track in Valgrind. keys: [MaybeUninit::UNINIT; CAPACITY], vals: [MaybeUninit::UNINIT; CAPACITY], parent: ptr::null(), parent_idx: MaybeUninit::uninit(), len: 0, } } } impl<K, V> NodeHeader<K, V> { fn is_shared_root(&self) -> bool { ptr::eq(self, &EMPTY_ROOT_NODE as *const _ as *const _) } } // We need to implement Sync here in order to make a static instance. unsafe impl Sync for NodeHeader<(), ()> {} // An empty node used as a placeholder for the root node, to avoid allocations. // We use just a header in order to save space, since no operation on an empty tree will // ever take a pointer past the first key. static EMPTY_ROOT_NODE: NodeHeader<(), ()> = NodeHeader { parent: ptr::null(), parent_idx: MaybeUninit::uninit(), len: 0 }; /// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden /// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an /// `InternalNode` can be directly casted to a pointer to the underlying `LeafNode` portion of the /// node, allowing code to act on leaf and internal nodes generically without having to even check /// which of the two a pointer is pointing at. This property is enabled by the use of `repr(C)`. #[repr(C)] struct InternalNode<K, V> { data: LeafNode<K, V>, /// The pointers to the children of this node. `len + 1` of these are considered /// initialized and valid. edges: [MaybeUninit<BoxedNode<K, V>>; 2 * B], } impl<K, V> InternalNode<K, V> { /// Creates a new `InternalNode`. /// /// This is unsafe for two reasons. First, it returns an `InternalNode` by value, risking /// dropping of uninitialized fields. Second, an invariant of internal nodes is that `len + 1` /// edges are initialized and valid, meaning that even when the node is empty (having a /// `len` of 0), there must be one initialized and valid edge. This function does not set up /// such an edge. unsafe fn new() -> Self { InternalNode { data: LeafNode::new(), edges: [MaybeUninit::UNINIT; 2 * B] } } } /// An owned pointer to a node. This basically is either `Box<LeafNode<K, V>>` or /// `Box<InternalNode<K, V>>`. However, it contains no information as to which of the two types /// of nodes is actually behind the box, and, partially due to this lack of information, has no /// destructor. struct BoxedNode<K, V> { ptr: Unique<LeafNode<K, V>>, } impl<K, V> BoxedNode<K, V> { fn from_leaf(node: Box<LeafNode<K, V>>) -> Self { BoxedNode { ptr: Box::into_unique(node) } } fn from_internal(node: Box<InternalNode<K, V>>) -> Self { unsafe { BoxedNode { ptr: Unique::new_unchecked(Box::into_raw(node) as *mut LeafNode<K, V>) } } } unsafe fn from_ptr(ptr: NonNull<LeafNode<K, V>>) -> Self { BoxedNode { ptr: Unique::from(ptr) } } fn as_ptr(&self) -> NonNull<LeafNode<K, V>> { NonNull::from(self.ptr) } } /// An owned tree. Note that despite being owned, this does not have a destructor, /// and must be cleaned up manually. pub struct Root<K, V> { node: BoxedNode<K, V>, height: usize, } unsafe impl<K: Sync, V: Sync> Sync for Root<K, V> {} unsafe impl<K: Send, V: Send> Send for Root<K, V> {} impl<K, V> Root<K, V> { pub fn is_shared_root(&self) -> bool { self.as_ref().is_shared_root() } pub fn shared_empty_root() -> Self { Root { node: unsafe { BoxedNode::from_ptr(NonNull::new_unchecked( &EMPTY_ROOT_NODE as *const _ as *const LeafNode<K, V> as *mut _, )) }, height: 0, } } pub fn new_leaf() -> Self { Root { node: BoxedNode::from_leaf(Box::new(unsafe { LeafNode::new() })), height: 0 } } pub fn as_ref(&self) -> NodeRef<marker::Immut<'_>, K, V, marker::LeafOrInternal> { NodeRef { height: self.height, node: self.node.as_ptr(), root: self as *const _ as *mut _, _marker: PhantomData, } } pub fn as_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::LeafOrInternal> { NodeRef { height: self.height, node: self.node.as_ptr(), root: self as *mut _, _marker: PhantomData, } } pub fn into_ref(self) -> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> { NodeRef { height: self.height, node: self.node.as_ptr(), root: ptr::null_mut(), // FIXME: Is there anything better to do here? _marker: PhantomData, } } /// Adds a new internal node with a single edge, pointing to the previous root, and make that /// new node the root. This increases the height by 1 and is the opposite of `pop_level`. pub fn push_level(&mut self) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> { debug_assert!(!self.is_shared_root()); let mut new_node = Box::new(unsafe { InternalNode::new() }); new_node.edges[0].write(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) }); self.node = BoxedNode::from_internal(new_node); self.height += 1; let mut ret = NodeRef { height: self.height, node: self.node.as_ptr(), root: self as *mut _, _marker: PhantomData, }; unsafe { ret.reborrow_mut().first_edge().correct_parent_link(); } ret } /// Removes the root node, using its first child as the new root. This cannot be called when /// the tree consists only of a leaf node. As it is intended only to be called when the root /// has only one edge, no cleanup is done on any of the other children of the root. /// This decreases the height by 1 and is the opposite of `push_level`. pub fn pop_level(&mut self) { assert!(self.height > 0); let top = self.node.ptr; self.node = unsafe { BoxedNode::from_ptr( self.as_mut().cast_unchecked::<marker::Internal>().first_edge().descend().node, ) }; self.height -= 1; unsafe { (*self.as_mut().as_leaf_mut()).parent = ptr::null(); } unsafe { Global.dealloc(NonNull::from(top).cast(), Layout::new::<InternalNode<K, V>>()); } } } // N.B. `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType` // is `Mut`. This is technically wrong, but cannot result in any unsafety due to // internal use of `NodeRef` because we stay completely generic over `K` and `V`. // However, whenever a public type wraps `NodeRef`, make sure that it has the // correct variance. /// A reference to a node. /// /// This type has a number of parameters that controls how it acts: /// - `BorrowType`: This can be `Immut<'a>` or `Mut<'a>` for some `'a` or `Owned`. /// When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`, /// when this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`, /// and when this is `Owned`, the `NodeRef` acts roughly like `Box<Node>`. /// - `K` and `V`: These control what types of things are stored in the nodes. /// - `Type`: This can be `Leaf`, `Internal`, or `LeafOrInternal`. When this is /// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the /// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the /// `NodeRef` could be pointing to either type of node. /// Note that in case of a leaf node, this might still be the shared root! /// Only turn this into a `LeafNode` reference if you know it is not the shared root! /// Shared references must be dereferencable *for the entire size of their pointee*, /// so '&LeafNode` or `&InternalNode` pointing to the shared root is undefined behavior. /// Turning this into a `NodeHeader` reference is always safe. pub struct NodeRef<BorrowType, K, V, Type> { height: usize, node: NonNull<LeafNode<K, V>>, // `root` is null unless the borrow type is `Mut` root: *const Root<K, V>, _marker: PhantomData<(BorrowType, Type)>, } impl<'a, K: 'a, V: 'a, Type> Copy for NodeRef<marker::Immut<'a>, K, V, Type> {} impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> { fn clone(&self) -> Self { *self } } unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {} unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {} unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {} unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {} impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> { fn as_internal(&self) -> &InternalNode<K, V> { unsafe { &*(self.node.as_ptr() as *mut InternalNode<K, V>) } } } impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> { unsafe { &mut *(self.node.as_ptr() as *mut InternalNode<K, V>) } } } impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> { /// Finds the length of the node. This is the number of keys or values. In an /// internal node, the number of edges is `len() + 1`. /// For any node, the number of possible edge handles is also `len() + 1`. /// Note that, despite being safe, calling this function can have the side effect /// of invalidating mutable references that unsafe code has created. pub fn len(&self) -> usize { self.as_header().len as usize } /// Returns the height of this node in the whole tree. Zero height denotes the /// leaf level. pub fn height(&self) -> usize { self.height } /// Removes any static information about whether this node is a `Leaf` or an /// `Internal` node. pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> { NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData } } /// Temporarily takes out another, immutable reference to the same node. fn reborrow(&self) -> NodeRef<marker::Immut<'_>, K, V, Type> { NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData } } /// Exposes the leaf "portion" of any leaf or internal node that is not the shared root. /// If the node is a leaf, this function simply opens up its data. /// If the node is an internal node, so not a leaf, it does have all the data a leaf has /// (header, keys and values), and this function exposes that. /// Unsafe because the node must not be the shared root. For more information, /// see the `NodeRef` comments. unsafe fn as_leaf(&self) -> &LeafNode<K, V> { debug_assert!(!self.is_shared_root()); self.node.as_ref() } fn as_header(&self) -> &NodeHeader<K, V> { unsafe { &*(self.node.as_ptr() as *const NodeHeader<K, V>) } } /// Returns whether the node is the shared, empty root. pub fn is_shared_root(&self) -> bool { self.as_header().is_shared_root() } /// Borrows a view into the keys stored in the node. /// Unsafe because the caller must ensure that the node is not the shared root. pub unsafe fn keys(&self) -> &[K] { self.reborrow().into_key_slice() } /// Borrows a view into the values stored in the node. /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn vals(&self) -> &[V] { self.reborrow().into_val_slice() } /// Finds the parent of the current node. Returns `Ok(handle)` if the current /// node actually has a parent, where `handle` points to the edge of the parent /// that points to the current node. Returns `Err(self)` if the current node has /// no parent, giving back the original `NodeRef`. /// /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should /// both, upon success, do nothing. pub fn ascend( self, ) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> { let parent_as_leaf = self.as_header().parent as *const LeafNode<K, V>; if let Some(non_zero) = NonNull::new(parent_as_leaf as *mut _) { Ok(Handle { node: NodeRef { height: self.height + 1, node: non_zero, root: self.root, _marker: PhantomData, }, idx: unsafe { usize::from(*self.as_header().parent_idx.as_ptr()) }, _marker: PhantomData, }) } else { Err(self) } } pub fn first_edge(self) -> Handle<Self, marker::Edge> { unsafe { Handle::new_edge(self, 0) } } pub fn last_edge(self) -> Handle<Self, marker::Edge> { let len = self.len(); unsafe { Handle::new_edge(self, len) } } /// Note that `self` must be nonempty. pub fn first_kv(self) -> Handle<Self, marker::KV> { let len = self.len(); assert!(len > 0); unsafe { Handle::new_kv(self, 0) } } /// Note that `self` must be nonempty. pub fn last_kv(self) -> Handle<Self, marker::KV> { let len = self.len(); assert!(len > 0); unsafe { Handle::new_kv(self, len - 1) } } } impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> { /// Similar to `ascend`, gets a reference to a node's parent node, but also /// deallocate the current node in the process. This is unsafe because the /// current node will still be accessible despite being deallocated. pub unsafe fn deallocate_and_ascend( self, ) -> Option<Handle<NodeRef<marker::Owned, K, V, marker::Internal>, marker::Edge>> { assert!(!self.is_shared_root()); let node = self.node; let ret = self.ascend().ok(); Global.dealloc(node.cast(), Layout::new::<LeafNode<K, V>>()); ret } } impl<K, V> NodeRef<marker::Owned, K, V, marker::Internal> { /// Similar to `ascend`, gets a reference to a node's parent node, but also /// deallocate the current node in the process. This is unsafe because the /// current node will still be accessible despite being deallocated. pub unsafe fn deallocate_and_ascend( self, ) -> Option<Handle<NodeRef<marker::Owned, K, V, marker::Internal>, marker::Edge>> { let node = self.node; let ret = self.ascend().ok(); Global.dealloc(node.cast(), Layout::new::<InternalNode<K, V>>()); ret } } impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> { /// Unsafely asserts to the compiler some static information about whether this /// node is a `Leaf`. unsafe fn cast_unchecked<NewType>(&mut self) -> NodeRef<marker::Mut<'_>, K, V, NewType> { NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData } } /// Temporarily takes out another, mutable reference to the same node. Beware, as /// this method is very dangerous, doubly so since it may not immediately appear /// dangerous. /// /// Because mutable pointers can roam anywhere around the tree and can even (through /// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut` /// can easily be used to make the original mutable pointer dangling, or, in the case /// of a reborrowed handle, out of bounds. // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. unsafe fn reborrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> { NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData } } /// Exposes the leaf "portion" of any leaf or internal node for writing. /// If the node is a leaf, this function simply opens up its data. /// If the node is an internal node, so not a leaf, it does have all the data a leaf has /// (header, keys and values), and this function exposes that. /// /// Returns a raw ptr to avoid asserting exclusive access to the entire node. /// This also implies you can invoke this member on the shared root, but the resulting pointer /// might not be properly aligned and definitely would not allow accessing keys and values. fn as_leaf_mut(&mut self) -> *mut LeafNode<K, V> { self.node.as_ptr() } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn keys_mut(&mut self) -> &mut [K] { self.reborrow_mut().into_key_slice_mut() } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn vals_mut(&mut self) -> &mut [V] { self.reborrow_mut().into_val_slice_mut() } } impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> { /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_key_slice(self) -> &'a [K] { debug_assert!(!self.is_shared_root()); // We cannot be the shared root, so `as_leaf` is okay. slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().keys), self.len()) } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_val_slice(self) -> &'a [V] { debug_assert!(!self.is_shared_root()); // We cannot be the shared root, so `as_leaf` is okay. slice::from_raw_parts(MaybeUninit::first_ptr(&self.as_leaf().vals), self.len()) } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_slices(self) -> (&'a [K], &'a [V]) { let k = ptr::read(&self); (k.into_key_slice(), self.into_val_slice()) } } impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> { /// Gets a mutable reference to the root itself. This is useful primarily when the /// height of the tree needs to be adjusted. Never call this on a reborrowed pointer. pub fn into_root_mut(self) -> &'a mut Root<K, V> { unsafe { &mut *(self.root as *mut Root<K, V>) } } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_key_slice_mut(mut self) -> &'a mut [K] { debug_assert!(!self.is_shared_root()); // We cannot be the shared root, so `as_leaf_mut` is okay. slice::from_raw_parts_mut( MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).keys), self.len(), ) } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_val_slice_mut(mut self) -> &'a mut [V] { debug_assert!(!self.is_shared_root()); slice::from_raw_parts_mut( MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).vals), self.len(), ) } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) { debug_assert!(!self.is_shared_root()); // We cannot use the getters here, because calling the second one // invalidates the reference returned by the first. // More precisely, it is the call to `len` that is the culprit, // because that creates a shared reference to the header, which *can* // overlap with the keys (and even the values, for ZST keys). let len = self.len(); let leaf = self.as_leaf_mut(); let keys = slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).keys), len); let vals = slice::from_raw_parts_mut(MaybeUninit::first_ptr_mut(&mut (*leaf).vals), len); (keys, vals) } } impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> { /// Adds a key/value pair the end of the node. pub fn push(&mut self, key: K, val: V) { assert!(self.len() < CAPACITY); debug_assert!(!self.is_shared_root()); let idx = self.len(); unsafe { ptr::write(self.keys_mut().get_unchecked_mut(idx), key); ptr::write(self.vals_mut().get_unchecked_mut(idx), val); (*self.as_leaf_mut()).len += 1; } } /// Adds a key/value pair to the beginning of the node. pub fn push_front(&mut self, key: K, val: V) { assert!(self.len() < CAPACITY); debug_assert!(!self.is_shared_root()); unsafe { slice_insert(self.keys_mut(), 0, key); slice_insert(self.vals_mut(), 0, val); (*self.as_leaf_mut()).len += 1; } } } impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> { /// Adds a key/value pair and an edge to go to the right of that pair to /// the end of the node. pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) { assert!(edge.height == self.height - 1); assert!(self.len() < CAPACITY); debug_assert!(!self.is_shared_root()); let idx = self.len(); unsafe { ptr::write(self.keys_mut().get_unchecked_mut(idx), key); ptr::write(self.vals_mut().get_unchecked_mut(idx), val); self.as_internal_mut().edges.get_unchecked_mut(idx + 1).write(edge.node); (*self.as_leaf_mut()).len += 1; Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link(); } } // Unsafe because 'first' and 'after_last' must be in range unsafe fn correct_childrens_parent_links(&mut self, first: usize, after_last: usize) { debug_assert!(first <= self.len()); debug_assert!(after_last <= self.len() + 1); for i in first..after_last { Handle::new_edge(self.reborrow_mut(), i).correct_parent_link(); } } fn correct_all_childrens_parent_links(&mut self) { let len = self.len(); unsafe { self.correct_childrens_parent_links(0, len + 1) }; } /// Adds a key/value pair and an edge to go to the left of that pair to /// the beginning of the node. pub fn push_front(&mut self, key: K, val: V, edge: Root<K, V>) { assert!(edge.height == self.height - 1); assert!(self.len() < CAPACITY); debug_assert!(!self.is_shared_root()); unsafe { slice_insert(self.keys_mut(), 0, key); slice_insert(self.vals_mut(), 0, val); slice_insert( slice::from_raw_parts_mut( MaybeUninit::first_ptr_mut(&mut self.as_internal_mut().edges), self.len() + 1, ), 0, edge.node, ); (*self.as_leaf_mut()).len += 1; self.correct_all_childrens_parent_links(); } } } impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> { /// Removes a key/value pair from the end of this node. If this is an internal node, /// also removes the edge that was to the right of that pair. pub fn pop(&mut self) -> (K, V, Option<Root<K, V>>) { assert!(self.len() > 0); let idx = self.len() - 1; unsafe { let key = ptr::read(self.keys().get_unchecked(idx)); let val = ptr::read(self.vals().get_unchecked(idx)); let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, ForceResult::Internal(internal) => { let edge = ptr::read(internal.as_internal().edges.get_unchecked(idx + 1).as_ptr()); let mut new_root = Root { node: edge, height: internal.height - 1 }; (*new_root.as_mut().as_leaf_mut()).parent = ptr::null(); Some(new_root) } }; (*self.as_leaf_mut()).len -= 1; (key, val, edge) } } /// Removes a key/value pair from the beginning of this node. If this is an internal node, /// also removes the edge that was to the left of that pair. pub fn pop_front(&mut self) -> (K, V, Option<Root<K, V>>) { assert!(self.len() > 0); let old_len = self.len(); unsafe { let key = slice_remove(self.keys_mut(), 0); let val = slice_remove(self.vals_mut(), 0); let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, ForceResult::Internal(mut internal) => { let edge = slice_remove( slice::from_raw_parts_mut( MaybeUninit::first_ptr_mut(&mut internal.as_internal_mut().edges), old_len + 1, ), 0, ); let mut new_root = Root { node: edge, height: internal.height - 1 }; (*new_root.as_mut().as_leaf_mut()).parent = ptr::null(); for i in 0..old_len { Handle::new_edge(internal.reborrow_mut(), i).correct_parent_link(); } Some(new_root) } }; (*self.as_leaf_mut()).len -= 1; (key, val, edge) } } /// Unsafe because the caller must ensure that the node is not the shared root. unsafe fn into_kv_pointers_mut(mut self) -> (*mut K, *mut V) { (self.keys_mut().as_mut_ptr(), self.vals_mut().as_mut_ptr()) } } impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> { /// Checks whether a node is an `Internal` node or a `Leaf` node. pub fn force( self, ) -> ForceResult< NodeRef<BorrowType, K, V, marker::Leaf>, NodeRef<BorrowType, K, V, marker::Internal>, > { if self.height == 0 { ForceResult::Leaf(NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData, }) } else { ForceResult::Internal(NodeRef { height: self.height, node: self.node, root: self.root, _marker: PhantomData, }) } } } /// A reference to a specific key/value pair or edge within a node. The `Node` parameter /// must be a `NodeRef`, while the `Type` can either be `KV` (signifying a handle on a key/value /// pair) or `Edge` (signifying a handle on an edge). /// /// Note that even `Leaf` nodes can have `Edge` handles. Instead of representing a pointer to /// a child node, these represent the spaces where child pointers would go between the key/value /// pairs. For example, in a node with length 2, there would be 3 possible edge locations - one /// to the left of the node, one between the two pairs, and one at the right of the node. pub struct Handle<Node, Type> { node: Node, idx: usize, _marker: PhantomData<Type>, } impl<Node: Copy, Type> Copy for Handle<Node, Type> {} // We don't need the full generality of `#[derive(Clone)]`, as the only time `Node` will be // `Clone`able is when it is an immutable reference and therefore `Copy`. impl<Node: Copy, Type> Clone for Handle<Node, Type> { fn clone(&self) -> Self { *self } } impl<Node, Type> Handle<Node, Type> { /// Retrieves the node that contains the edge of key/value pair this handle points to. pub fn into_node(self) -> Node { self.node } } impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV> { /// Creates a new handle to a key/value pair in `node`. /// Unsafe because the caller must ensure that `idx < node.len()`. pub unsafe fn new_kv(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self { debug_assert!(idx < node.len()); Handle { node, idx, _marker: PhantomData } } pub fn left_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> { unsafe { Handle::new_edge(self.node, self.idx) } } pub fn right_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> { unsafe { Handle::new_edge(self.node, self.idx + 1) } } } impl<BorrowType, K, V, NodeType, HandleType> PartialEq for Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType> { fn eq(&self, other: &Self) -> bool { self.node.node == other.node.node && self.idx == other.idx } } impl<BorrowType, K, V, NodeType, HandleType> Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType> { /// Temporarily takes out another, immutable handle on the same location. pub fn reborrow(&self) -> Handle<NodeRef<marker::Immut<'_>, K, V, NodeType>, HandleType> { // We can't use Handle::new_kv or Handle::new_edge because we don't know our type Handle { node: self.node.reborrow(), idx: self.idx, _marker: PhantomData } } } impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> { /// Temporarily takes out another, mutable handle on the same location. Beware, as /// this method is very dangerous, doubly so since it may not immediately appear /// dangerous. /// /// Because mutable pointers can roam anywhere around the tree and can even (through /// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut` /// can easily be used to make the original mutable pointer dangling, or, in the case /// of a reborrowed handle, out of bounds. // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. pub unsafe fn reborrow_mut( &mut self, ) -> Handle<NodeRef<marker::Mut<'_>, K, V, NodeType>, HandleType> { // We can't use Handle::new_kv or Handle::new_edge because we don't know our type Handle { node: self.node.reborrow_mut(), idx: self.idx, _marker: PhantomData } } } impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> { /// Creates a new handle to an edge in `node`. /// Unsafe because the caller must ensure that `idx <= node.len()`. pub unsafe fn new_edge(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self { debug_assert!(idx <= node.len()); Handle { node, idx, _marker: PhantomData } } pub fn left_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> { if self.idx > 0 { Ok(unsafe { Handle::new_kv(self.node, self.idx - 1) }) } else { Err(self) } } pub fn right_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> { if self.idx < self.node.len() { Ok(unsafe { Handle::new_kv(self.node, self.idx) }) } else { Err(self) } } } impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> { /// Inserts a new key/value pair between the key/value pairs to the right and left of /// this edge. This method assumes that there is enough space in the node for the new /// pair to fit. /// /// The returned pointer points to the inserted value. fn insert_fit(&mut self, key: K, val: V) -> *mut V { // Necessary for correctness, but in a private module debug_assert!(self.node.len() < CAPACITY); debug_assert!(!self.node.is_shared_root()); unsafe { slice_insert(self.node.keys_mut(), self.idx, key); slice_insert(self.node.vals_mut(), self.idx, val); (*self.node.as_leaf_mut()).len += 1; self.node.vals_mut().get_unchecked_mut(self.idx) } } /// Inserts a new key/value pair between the key/value pairs to the right and left of /// this edge. This method splits the node if there isn't enough room. /// /// The returned pointer points to the inserted value. pub fn insert(mut self, key: K, val: V) -> (InsertResult<'a, K, V, marker::Leaf>, *mut V) { if self.node.len() < CAPACITY { let ptr = self.insert_fit(key, val); let kv = unsafe { Handle::new_kv(self.node, self.idx) }; (InsertResult::Fit(kv), ptr) } else { let middle = unsafe { Handle::new_kv(self.node, B) }; let (mut left, k, v, mut right) = middle.split(); let ptr = if self.idx <= B { unsafe { Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val) } } else { unsafe { Handle::new_edge( right.as_mut().cast_unchecked::<marker::Leaf>(), self.idx - (B + 1), ) .insert_fit(key, val) } }; (InsertResult::Split(left, k, v, right), ptr) } } } impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> { /// Fixes the parent pointer and index in the child node below this edge. This is useful /// when the ordering of edges has been changed, such as in the various `insert` methods. fn correct_parent_link(mut self) { let idx = self.idx as u16; let ptr = self.node.as_internal_mut() as *mut _; let mut child = self.descend(); unsafe { (*child.as_leaf_mut()).parent = ptr; (*child.as_leaf_mut()).parent_idx.write(idx); } } /// Unsafely asserts to the compiler some static information about whether the underlying /// node of this handle is a `Leaf`. unsafe fn cast_unchecked<NewType>( &mut self, ) -> Handle<NodeRef<marker::Mut<'_>, K, V, NewType>, marker::Edge> { Handle::new_edge(self.node.cast_unchecked(), self.idx) } /// Inserts a new key/value pair and an edge that will go to the right of that new pair /// between this edge and the key/value pair to the right of this edge. This method assumes /// that there is enough space in the node for the new pair to fit. fn insert_fit(&mut self, key: K, val: V, edge: Root<K, V>) { // Necessary for correctness, but in an internal module debug_assert!(self.node.len() < CAPACITY); debug_assert!(edge.height == self.node.height - 1); unsafe { // This cast is a lie, but it allows us to reuse the key/value insertion logic. self.cast_unchecked::<marker::Leaf>().insert_fit(key, val); slice_insert( slice::from_raw_parts_mut( MaybeUninit::first_ptr_mut(&mut self.node.as_internal_mut().edges), self.node.len(), ), self.idx + 1, edge.node, ); for i in (self.idx + 1)..(self.node.len() + 1) { Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link(); } } } /// Inserts a new key/value pair and an edge that will go to the right of that new pair /// between this edge and the key/value pair to the right of this edge. This method splits /// the node if there isn't enough room. pub fn insert( mut self, key: K, val: V, edge: Root<K, V>, ) -> InsertResult<'a, K, V, marker::Internal> { assert!(edge.height == self.node.height - 1); if self.node.len() < CAPACITY { self.insert_fit(key, val, edge); let kv = unsafe { Handle::new_kv(self.node, self.idx) }; InsertResult::Fit(kv) } else { let middle = unsafe { Handle::new_kv(self.node, B) }; let (mut left, k, v, mut right) = middle.split(); if self.idx <= B { unsafe { Handle::new_edge(left.reborrow_mut(), self.idx).insert_fit(key, val, edge); } } else { unsafe { Handle::new_edge( right.as_mut().cast_unchecked::<marker::Internal>(), self.idx - (B + 1), ) .insert_fit(key, val, edge); } } InsertResult::Split(left, k, v, right) } } } impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> { /// Finds the node pointed to by this edge. /// /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should /// both, upon success, do nothing. pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> { NodeRef { height: self.node.height - 1, node: unsafe { (&*self.node.as_internal().edges.get_unchecked(self.idx).as_ptr()).as_ptr() }, root: self.node.root, _marker: PhantomData, } } } impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Immut<'a>, K, V, NodeType>, marker::KV> { pub fn into_kv(self) -> (&'a K, &'a V) { unsafe { let (keys, vals) = self.node.into_slices(); (keys.get_unchecked(self.idx), vals.get_unchecked(self.idx)) } } } impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> { pub fn into_kv_mut(self) -> (&'a mut K, &'a mut V) { unsafe { let (keys, vals) = self.node.into_slices_mut(); (keys.get_unchecked_mut(self.idx), vals.get_unchecked_mut(self.idx)) } } } impl<'a, K, V, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> { pub fn kv_mut(&mut self) -> (&mut K, &mut V) { unsafe { let (keys, vals) = self.node.reborrow_mut().into_slices_mut(); (keys.get_unchecked_mut(self.idx), vals.get_unchecked_mut(self.idx)) } } } impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> { /// Splits the underlying node into three parts: /// /// - The node is truncated to only contain the key/value pairs to the right of /// this handle. /// - The key and value pointed to by this handle and extracted. /// - All the key/value pairs to the right of this handle are put into a newly /// allocated node. pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, K, V, Root<K, V>) { assert!(!self.node.is_shared_root()); unsafe { let mut new_node = Box::new(LeafNode::new()); let k = ptr::read(self.node.keys().get_unchecked(self.idx)); let v = ptr::read(self.node.vals().get_unchecked(self.idx)); let new_len = self.node.len() - self.idx - 1; ptr::copy_nonoverlapping( self.node.keys().as_ptr().add(self.idx + 1), new_node.keys.as_mut_ptr() as *mut K, new_len, ); ptr::copy_nonoverlapping( self.node.vals().as_ptr().add(self.idx + 1), new_node.vals.as_mut_ptr() as *mut V, new_len, ); (*self.node.as_leaf_mut()).len = self.idx as u16; new_node.len = new_len as u16; (self.node, k, v, Root { node: BoxedNode::from_leaf(new_node), height: 0 }) } } /// Removes the key/value pair pointed to by this handle and returns it, along with the edge /// between the now adjacent key/value pairs (if any) to the left and right of this handle. pub fn remove( mut self, ) -> (Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>, K, V) { assert!(!self.node.is_shared_root()); unsafe { let k = slice_remove(self.node.keys_mut(), self.idx); let v = slice_remove(self.node.vals_mut(), self.idx); (*self.node.as_leaf_mut()).len -= 1; (self.left_edge(), k, v) } } } impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> { /// Splits the underlying node into three parts: /// /// - The node is truncated to only contain the edges and key/value pairs to the /// right of this handle. /// - The key and value pointed to by this handle and extracted. /// - All the edges and key/value pairs to the right of this handle are put into /// a newly allocated node. pub fn split(mut self) -> (NodeRef<marker::Mut<'a>, K, V, marker::Internal>, K, V, Root<K, V>) { unsafe { let mut new_node = Box::new(InternalNode::new()); let k = ptr::read(self.node.keys().get_unchecked(self.idx)); let v = ptr::read(self.node.vals().get_unchecked(self.idx)); let height = self.node.height; let new_len = self.node.len() - self.idx - 1; ptr::copy_nonoverlapping( self.node.keys().as_ptr().add(self.idx + 1), new_node.data.keys.as_mut_ptr() as *mut K, new_len, ); ptr::copy_nonoverlapping( self.node.vals().as_ptr().add(self.idx + 1), new_node.data.vals.as_mut_ptr() as *mut V, new_len, ); ptr::copy_nonoverlapping( self.node.as_internal().edges.as_ptr().add(self.idx + 1), new_node.edges.as_mut_ptr(), new_len + 1, ); (*self.node.as_leaf_mut()).len = self.idx as u16; new_node.data.len = new_len as u16; let mut new_root = Root { node: BoxedNode::from_internal(new_node), height }; for i in 0..(new_len + 1) { Handle::new_edge(new_root.as_mut().cast_unchecked(), i).correct_parent_link(); } (self.node, k, v, new_root) } } /// Returns `true` if it is valid to call `.merge()`, i.e., whether there is enough room in /// a node to hold the combination of the nodes to the left and right of this handle along /// with the key/value pair at this handle. pub fn can_merge(&self) -> bool { (self.reborrow().left_edge().descend().len() + self.reborrow().right_edge().descend().len() + 1) <= CAPACITY } /// Combines the node immediately to the left of this handle, the key/value pair pointed /// to by this handle, and the node immediately to the right of this handle into one new /// child of the underlying node, returning an edge referencing that new child. /// /// Assumes that this edge `.can_merge()`. pub fn merge( mut self, ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> { let self1 = unsafe { ptr::read(&self) }; let self2 = unsafe { ptr::read(&self) }; let mut left_node = self1.left_edge().descend(); let left_len = left_node.len(); let mut right_node = self2.right_edge().descend(); let right_len = right_node.len(); // necessary for correctness, but in a private module assert!(left_len + right_len + 1 <= CAPACITY); unsafe { ptr::write( left_node.keys_mut().get_unchecked_mut(left_len), slice_remove(self.node.keys_mut(), self.idx), ); ptr::copy_nonoverlapping( right_node.keys().as_ptr(), left_node.keys_mut().as_mut_ptr().add(left_len + 1), right_len, ); ptr::write( left_node.vals_mut().get_unchecked_mut(left_len), slice_remove(self.node.vals_mut(), self.idx), ); ptr::copy_nonoverlapping( right_node.vals().as_ptr(), left_node.vals_mut().as_mut_ptr().add(left_len + 1), right_len, ); slice_remove(&mut self.node.as_internal_mut().edges, self.idx + 1); for i in self.idx + 1..self.node.len() { Handle::new_edge(self.node.reborrow_mut(), i).correct_parent_link(); } (*self.node.as_leaf_mut()).len -= 1; (*left_node.as_leaf_mut()).len += right_len as u16 + 1; if self.node.height > 1 { ptr::copy_nonoverlapping( right_node.cast_unchecked().as_internal().edges.as_ptr(), left_node .cast_unchecked() .as_internal_mut() .edges .as_mut_ptr() .add(left_len + 1), right_len + 1, ); for i in left_len + 1..left_len + right_len + 2 { Handle::new_edge(left_node.cast_unchecked().reborrow_mut(), i) .correct_parent_link(); } Global.dealloc(right_node.node.cast(), Layout::new::<InternalNode<K, V>>()); } else { Global.dealloc(right_node.node.cast(), Layout::new::<LeafNode<K, V>>()); } Handle::new_edge(self.node, self.idx) } } /// This removes a key/value pair from the left child and places it in the key/value storage /// pointed to by this handle while pushing the old key/value pair of this handle into the right /// child. pub fn steal_left(&mut self) { unsafe { let (k, v, edge) = self.reborrow_mut().left_edge().descend().pop(); let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k); let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v); match self.reborrow_mut().right_edge().descend().force() { ForceResult::Leaf(mut leaf) => leaf.push_front(k, v), ForceResult::Internal(mut internal) => internal.push_front(k, v, edge.unwrap()), } } } /// This removes a key/value pair from the right child and places it in the key/value storage /// pointed to by this handle while pushing the old key/value pair of this handle into the left /// child. pub fn steal_right(&mut self) { unsafe { let (k, v, edge) = self.reborrow_mut().right_edge().descend().pop_front(); let k = mem::replace(self.reborrow_mut().into_kv_mut().0, k); let v = mem::replace(self.reborrow_mut().into_kv_mut().1, v); match self.reborrow_mut().left_edge().descend().force() { ForceResult::Leaf(mut leaf) => leaf.push(k, v), ForceResult::Internal(mut internal) => internal.push(k, v, edge.unwrap()), } } } /// This does stealing similar to `steal_left` but steals multiple elements at once. pub fn bulk_steal_left(&mut self, count: usize) { unsafe { let mut left_node = ptr::read(self).left_edge().descend(); let left_len = left_node.len(); let mut right_node = ptr::read(self).right_edge().descend(); let right_len = right_node.len(); // Make sure that we may steal safely. assert!(right_len + count <= CAPACITY); assert!(left_len >= count); let new_left_len = left_len - count; // Move data. { let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); let parent_kv = { let kv = self.reborrow_mut().into_kv_mut(); (kv.0 as *mut K, kv.1 as *mut V) }; // Make room for stolen elements in the right child. ptr::copy(right_kv.0, right_kv.0.add(count), right_len); ptr::copy(right_kv.1, right_kv.1.add(count), right_len); // Move elements from the left child to the right one. move_kv(left_kv, new_left_len + 1, right_kv, 0, count - 1); // Move parent's key/value pair to the right child. move_kv(parent_kv, 0, right_kv, count - 1, 1); // Move the left-most stolen pair to the parent. move_kv(left_kv, new_left_len, parent_kv, 0, 1); } (*left_node.reborrow_mut().as_leaf_mut()).len -= count as u16; (*right_node.reborrow_mut().as_leaf_mut()).len += count as u16; match (left_node.force(), right_node.force()) { (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { // Make room for stolen edges. let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); ptr::copy(right_edges, right_edges.add(count), right_len + 1); right.correct_childrens_parent_links(count, count + right_len + 1); move_edges(left, new_left_len + 1, right, 0, count); } (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} _ => { unreachable!(); } } } } /// The symmetric clone of `bulk_steal_left`. pub fn bulk_steal_right(&mut self, count: usize) { unsafe { let mut left_node = ptr::read(self).left_edge().descend(); let left_len = left_node.len(); let mut right_node = ptr::read(self).right_edge().descend(); let right_len = right_node.len(); // Make sure that we may steal safely. assert!(left_len + count <= CAPACITY); assert!(right_len >= count); let new_right_len = right_len - count; // Move data. { let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); let parent_kv = { let kv = self.reborrow_mut().into_kv_mut(); (kv.0 as *mut K, kv.1 as *mut V) }; // Move parent's key/value pair to the left child. move_kv(parent_kv, 0, left_kv, left_len, 1); // Move elements from the right child to the left one. move_kv(right_kv, 0, left_kv, left_len + 1, count - 1); // Move the right-most stolen pair to the parent. move_kv(right_kv, count - 1, parent_kv, 0, 1); // Fix right indexing ptr::copy(right_kv.0.add(count), right_kv.0, new_right_len); ptr::copy(right_kv.1.add(count), right_kv.1, new_right_len); } (*left_node.reborrow_mut().as_leaf_mut()).len += count as u16; (*right_node.reborrow_mut().as_leaf_mut()).len -= count as u16; match (left_node.force(), right_node.force()) { (ForceResult::Internal(left), ForceResult::Internal(mut right)) => { move_edges(right.reborrow_mut(), 0, left, left_len + 1, count); // Fix right indexing. let right_edges = right.reborrow_mut().as_internal_mut().edges.as_mut_ptr(); ptr::copy(right_edges.add(count), right_edges, new_right_len + 1); right.correct_childrens_parent_links(0, new_right_len + 1); } (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} _ => { unreachable!(); } } } } } unsafe fn move_kv<K, V>( source: (*mut K, *mut V), source_offset: usize, dest: (*mut K, *mut V), dest_offset: usize, count: usize, ) { ptr::copy_nonoverlapping(source.0.add(source_offset), dest.0.add(dest_offset), count); ptr::copy_nonoverlapping(source.1.add(source_offset), dest.1.add(dest_offset), count); } // Source and destination must have the same height. unsafe fn move_edges<K, V>( mut source: NodeRef<marker::Mut<'_>, K, V, marker::Internal>, source_offset: usize, mut dest: NodeRef<marker::Mut<'_>, K, V, marker::Internal>, dest_offset: usize, count: usize, ) { let source_ptr = source.as_internal_mut().edges.as_mut_ptr(); let dest_ptr = dest.as_internal_mut().edges.as_mut_ptr(); ptr::copy_nonoverlapping(source_ptr.add(source_offset), dest_ptr.add(dest_offset), count); dest.correct_childrens_parent_links(dest_offset, dest_offset + count); } impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::KV> { pub fn forget_node_type( self, ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> { unsafe { Handle::new_kv(self.node.forget_type(), self.idx) } } } impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV> { pub fn forget_node_type( self, ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> { unsafe { Handle::new_kv(self.node.forget_type(), self.idx) } } } impl<BorrowType, K, V, HandleType> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, HandleType> { /// Checks whether the underlying node is an `Internal` node or a `Leaf` node. pub fn force( self, ) -> ForceResult< Handle<NodeRef<BorrowType, K, V, marker::Leaf>, HandleType>, Handle<NodeRef<BorrowType, K, V, marker::Internal>, HandleType>, > { match self.node.force() { ForceResult::Leaf(node) => { ForceResult::Leaf(Handle { node, idx: self.idx, _marker: PhantomData }) } ForceResult::Internal(node) => { ForceResult::Internal(Handle { node, idx: self.idx, _marker: PhantomData }) } } } } impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> { /// Move the suffix after `self` from one node to another one. `right` must be empty. /// The first edge of `right` remains unchanged. pub fn move_suffix( &mut self, right: &mut NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, ) { unsafe { let left_new_len = self.idx; let mut left_node = self.reborrow_mut().into_node(); let right_new_len = left_node.len() - left_new_len; let mut right_node = right.reborrow_mut(); assert!(right_node.len() == 0); assert!(left_node.height == right_node.height); if right_new_len > 0 { let left_kv = left_node.reborrow_mut().into_kv_pointers_mut(); let right_kv = right_node.reborrow_mut().into_kv_pointers_mut(); move_kv(left_kv, left_new_len, right_kv, 0, right_new_len); (*left_node.reborrow_mut().as_leaf_mut()).len = left_new_len as u16; (*right_node.reborrow_mut().as_leaf_mut()).len = right_new_len as u16; match (left_node.force(), right_node.force()) { (ForceResult::Internal(left), ForceResult::Internal(right)) => { move_edges(left, left_new_len + 1, right, 1, right_new_len); } (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {} _ => { unreachable!(); } } } } } } pub enum ForceResult<Leaf, Internal> { Leaf(Leaf), Internal(Internal), } pub enum InsertResult<'a, K, V, Type> { Fit(Handle<NodeRef<marker::Mut<'a>, K, V, Type>, marker::KV>), Split(NodeRef<marker::Mut<'a>, K, V, Type>, K, V, Root<K, V>), } pub mod marker { use core::marker::PhantomData; pub enum Leaf {} pub enum Internal {} pub enum LeafOrInternal {} pub enum Owned {} pub struct Immut<'a>(PhantomData<&'a ()>); pub struct Mut<'a>(PhantomData<&'a mut ()>); pub enum KV {} pub enum Edge {} } unsafe fn slice_insert<T>(slice: &mut [T], idx: usize, val: T) { ptr::copy(slice.as_ptr().add(idx), slice.as_mut_ptr().add(idx + 1), slice.len() - idx); ptr::write(slice.get_unchecked_mut(idx), val); } unsafe fn slice_remove<T>(slice: &mut [T], idx: usize) -> T { let ret = ptr::read(slice.get_unchecked(idx)); ptr::copy(slice.as_ptr().add(idx + 1), slice.as_mut_ptr().add(idx), slice.len() - idx - 1); ret }
39.950424
100
0.584972
3a0701d252ccb8a4e5faa72e471adde5f036158e
229
// This shopping list program isn't compiling! // Use your knowledge of generics to fix it. // I AM NOT DONE fn main() { let mut shopping_list: Vec<String> = Vec::new(); shopping_list.push("milk".to_string()); }
19.083333
52
0.650655
7a11fc698a615b41f24c9f22e9417f8a49053151
23,969
use std::{ ffi::CString, os::raw::{c_char, c_void}, pin::Pin, sync::{ atomic::{AtomicBool, Ordering::SeqCst}, Arc, Mutex, }, time::Duration, }; use byte_unit::{Byte, ByteUnit}; use futures::{channel::oneshot, future}; use git_version::git_version; use http::Uri; use once_cell::sync::{Lazy, OnceCell}; use snafu::Snafu; use spdk_rs::{ libspdk::{ spdk_app_shutdown_cb, spdk_log_level, spdk_log_open, spdk_log_set_level, spdk_log_set_print_level, spdk_pci_addr, spdk_rpc_set_state, spdk_thread_lib_fini, spdk_thread_send_critical_msg, SPDK_LOG_DEBUG, SPDK_LOG_INFO, SPDK_RPC_RUNTIME, }, spdk_rs_log, }; use structopt::StructOpt; use tokio::runtime::Builder; use crate::{ bdev::{bdev_io_ctx_pool_init, nexus, nvme_io_ctx_pool_init}, core::{ reactor::{Reactor, ReactorState, Reactors}, Cores, MayastorFeatures, Mthread, }, grpc, logger, persistent_store::PersistentStore, subsys::{self, Config, PoolConfig}, }; fn parse_mb(src: &str) -> Result<i32, String> { // For compatibility, we check to see if there are no alphabetic characters // passed in, if, so we interpret the value to be in MiB which is what the // EAL expects it to be in. let has_unit = src.trim_end().chars().any(|c| c.is_alphabetic()); if let Ok(val) = Byte::from_str(src) { let value; if has_unit { value = val.get_adjusted_unit(ByteUnit::MiB).get_value() as i32 } else { value = val.get_bytes() as i32 } Ok(value) } else { Err(format!("Invalid argument {}", src)) } } #[derive(Debug, Clone, StructOpt)] #[structopt( name = "Mayastor", about = "Containerized Attached Storage (CAS) for k8s", version = git_version!(args = ["--tags", "--abbrev=12"], fallback="unkown"), setting(structopt::clap::AppSettings::ColoredHelp) )] pub struct MayastorCliArgs { #[structopt(short = "g", default_value = grpc::default_endpoint_str())] /// IP address and port (optional) for the gRPC server to listen on. pub grpc_endpoint: String, #[structopt(short = "R")] /// Registration grpc endpoint pub registration_endpoint: Option<Uri>, #[structopt(short = "L")] /// Enable logging for sub components. pub log_components: Vec<String>, #[structopt(short = "m", default_value = "0x1")] /// The reactor mask to be used for starting up the instance pub reactor_mask: String, #[structopt(short = "N")] /// Name of the node where mayastor is running (ID used by control plane) pub node_name: Option<String>, /// The maximum amount of hugepage memory we are allowed to allocate in /// MiB. A value of 0 means no limit. #[structopt( short = "s", parse(try_from_str = parse_mb), default_value = "0" )] pub mem_size: i32, #[structopt(short = "u")] /// Disable the use of PCIe devices. pub no_pci: bool, #[structopt(short = "r", default_value = "/var/tmp/mayastor.sock")] /// Path to create the rpc socket. pub rpc_address: String, #[structopt(short = "y")] /// Path to mayastor config YAML file. pub mayastor_config: Option<String>, #[structopt(short = "P")] /// Path to pool config file. pub pool_config: Option<String>, #[structopt(long = "huge-dir")] /// Path to hugedir. pub hugedir: Option<String>, #[structopt(long = "env-context")] /// Pass additional arguments to the EAL environment. pub env_context: Option<String>, #[structopt(short = "l")] /// List of cores to run on instead of using the core mask. When specified /// it supersedes the core mask (-m) argument. pub core_list: Option<String>, #[structopt(short = "p")] /// Endpoint of the persistent store. pub persistent_store_endpoint: Option<String>, #[structopt(long = "bdev-pool-size", default_value = "65535")] /// Number of entries in memory pool for bdev I/O contexts pub bdev_io_ctx_pool_size: u64, #[structopt(long = "nvme-ctl-pool-size", default_value = "65535")] /// Number of entries in memory pool for NVMe controller I/O contexts pub nvme_ctl_io_ctx_pool_size: u64, } /// Mayastor features. impl MayastorFeatures { fn init_features() -> MayastorFeatures { let ana = match std::env::var("NEXUS_NVMF_ANA_ENABLE") { Ok(s) => s == "1", Err(_) => false, }; MayastorFeatures { asymmetric_namespace_access: ana, } } pub fn get_features() -> Self { MAYASTOR_FEATURES.get_or_init(Self::init_features).clone() } } /// Defaults are redefined here in case of using it during tests impl Default for MayastorCliArgs { fn default() -> Self { Self { grpc_endpoint: grpc::default_endpoint().to_string(), persistent_store_endpoint: None, node_name: None, env_context: None, reactor_mask: "0x1".into(), mem_size: 0, rpc_address: "/var/tmp/mayastor.sock".to_string(), no_pci: true, log_components: vec![], mayastor_config: None, pool_config: None, hugedir: None, core_list: None, bdev_io_ctx_pool_size: 65535, nvme_ctl_io_ctx_pool_size: 65535, registration_endpoint: None, } } } /// Global exit code of the program, initially set to -1 to capture double /// shutdown during test cases pub static GLOBAL_RC: Lazy<Arc<Mutex<i32>>> = Lazy::new(|| Arc::new(Mutex::new(-1))); /// keep track if we have received a signal already pub static SIG_RECEIVED: Lazy<AtomicBool> = Lazy::new(|| AtomicBool::new(false)); /// FFI functions that are needed to initialize the environment extern "C" { pub fn rte_eal_init(argc: i32, argv: *mut *mut libc::c_char) -> i32; pub fn spdk_trace_cleanup(); pub fn spdk_env_dpdk_post_init(legacy_mem: bool) -> i32; pub fn spdk_env_fini(); pub fn spdk_log_close(); pub fn spdk_log_set_flag(name: *const c_char, enable: bool) -> i32; pub fn spdk_rpc_finish(); pub fn spdk_rpc_initialize(listen: *mut libc::c_char); pub fn spdk_subsystem_fini( f: Option<unsafe extern "C" fn(*mut c_void)>, ctx: *mut c_void, ); pub fn spdk_subsystem_init( f: Option<extern "C" fn(i32, *mut c_void)>, ctx: *mut c_void, ); } #[derive(Debug, Snafu)] pub enum EnvError { #[snafu(display("Failed to install signal handler"))] SetSigHdl { source: nix::Error }, #[snafu(display("Failed to initialize logging subsystem"))] InitLog, #[snafu(display("Failed to initialize {} target", target))] InitTarget { target: String }, } type Result<T, E = EnvError> = std::result::Result<T, E>; /// Mayastor argument #[derive(Debug, Clone)] #[allow(dead_code)] pub struct MayastorEnvironment { pub node_name: String, pub grpc_endpoint: Option<std::net::SocketAddr>, pub registration_endpoint: Option<Uri>, persistent_store_endpoint: Option<String>, mayastor_config: Option<String>, pool_config: Option<String>, delay_subsystem_init: bool, enable_coredump: bool, env_context: Option<String>, hugedir: Option<String>, hugepage_single_segments: bool, json_config_file: Option<String>, master_core: i32, mem_channel: i32, pub mem_size: i32, pub name: String, no_pci: bool, num_entries: u64, num_pci_addr: usize, pci_blocklist: Vec<spdk_pci_addr>, pci_allowlist: Vec<spdk_pci_addr>, print_level: spdk_log_level, debug_level: spdk_log_level, reactor_mask: String, pub rpc_addr: String, shm_id: i32, shutdown_cb: spdk_app_shutdown_cb, tpoint_group_mask: String, unlink_hugepage: bool, log_component: Vec<String>, core_list: Option<String>, bdev_io_ctx_pool_size: u64, nvme_ctl_io_ctx_pool_size: u64, } impl Default for MayastorEnvironment { fn default() -> Self { Self { node_name: "mayastor-node".into(), grpc_endpoint: None, registration_endpoint: None, persistent_store_endpoint: None, mayastor_config: None, pool_config: None, delay_subsystem_init: false, enable_coredump: true, env_context: None, hugedir: None, hugepage_single_segments: false, json_config_file: None, master_core: -1, mem_channel: -1, mem_size: -1, name: "mayastor".into(), no_pci: false, num_entries: 0, num_pci_addr: 0, pci_blocklist: vec![], pci_allowlist: vec![], print_level: SPDK_LOG_INFO, debug_level: SPDK_LOG_INFO, reactor_mask: "0x1".into(), rpc_addr: "/var/tmp/mayastor.sock".into(), shm_id: -1, shutdown_cb: None, tpoint_group_mask: String::new(), unlink_hugepage: true, log_component: vec![], core_list: None, bdev_io_ctx_pool_size: 65535, nvme_ctl_io_ctx_pool_size: 65535, } } } /// The actual routine which does the mayastor shutdown. /// Must be called on the same thread which did the init. async fn do_shutdown(arg: *mut c_void) { // we must enter the init thread explicitly here as this, typically, gets // called by the signal handler // callback for when the subsystems have shutdown extern "C" fn reactors_stop(arg: *mut c_void) { Reactors::iter().for_each(|r| r.shutdown()); *GLOBAL_RC.lock().unwrap() = arg as i32; } let rc = arg as i32; if rc != 0 { warn!("Mayastor stopped non-zero: {}", rc); } nexus::nexus_children_to_destroying_state().await; crate::lvs::Lvs::export_all().await; unsafe { spdk_rpc_finish(); spdk_subsystem_fini(Some(reactors_stop), arg); } } /// main shutdown routine for mayastor pub fn mayastor_env_stop(rc: i32) { let r = Reactors::master(); match r.get_state() { ReactorState::Running | ReactorState::Delayed | ReactorState::Init => { r.send_future(async move { do_shutdown(rc as *const i32 as *mut c_void).await; }); } _ => { panic!("invalid reactor state during shutdown"); } } } #[inline(always)] unsafe extern "C" fn signal_trampoline(_: *mut c_void) { mayastor_env_stop(0); } /// called on SIGINT and SIGTERM extern "C" fn mayastor_signal_handler(signo: i32) { if SIG_RECEIVED.load(SeqCst) { return; } warn!("Received SIGNO: {}", signo); SIG_RECEIVED.store(true, SeqCst); unsafe { spdk_thread_send_critical_msg( Mthread::get_init().into_raw(), Some(signal_trampoline), ); }; } #[derive(Debug)] struct SubsystemCtx { rpc: CString, sender: futures::channel::oneshot::Sender<bool>, } static MAYASTOR_FEATURES: OnceCell<MayastorFeatures> = OnceCell::new(); static MAYASTOR_DEFAULT_ENV: OnceCell<MayastorEnvironment> = OnceCell::new(); impl MayastorEnvironment { pub fn new(args: MayastorCliArgs) -> Self { Self { grpc_endpoint: Some(grpc::endpoint(args.grpc_endpoint)), registration_endpoint: args.registration_endpoint, persistent_store_endpoint: args.persistent_store_endpoint, node_name: args.node_name.unwrap_or_else(|| "mayastor-node".into()), mayastor_config: args.mayastor_config, pool_config: args.pool_config, log_component: args.log_components, mem_size: args.mem_size, no_pci: args.no_pci, reactor_mask: args.reactor_mask, rpc_addr: args.rpc_address, hugedir: args.hugedir, env_context: args.env_context, core_list: args.core_list, bdev_io_ctx_pool_size: args.bdev_io_ctx_pool_size, nvme_ctl_io_ctx_pool_size: args.nvme_ctl_io_ctx_pool_size, ..Default::default() } .setup_static() } fn setup_static(self) -> Self { MAYASTOR_DEFAULT_ENV.get_or_init(|| self.clone()); self } /// Get the global environment (first created on new) /// or otherwise the default one (used by the tests) pub fn global_or_default() -> Self { match MAYASTOR_DEFAULT_ENV.get() { Some(env) => env.clone(), None => MayastorEnvironment::default(), } } /// configure signal handling fn install_signal_handlers(&self) { unsafe { signal_hook::low_level::register( signal_hook::consts::SIGTERM, || mayastor_signal_handler(1), ) } .unwrap(); unsafe { signal_hook::low_level::register( signal_hook::consts::SIGINT, || mayastor_signal_handler(1), ) } .unwrap(); } /// construct an array of options to be passed to EAL and start it fn initialize_eal(&self) { let mut args = vec![CString::new(self.name.clone()).unwrap()]; if self.mem_channel > 0 { args.push( CString::new(format!("-n {}", self.mem_channel)).unwrap(), ); } if self.shm_id < 0 { args.push(CString::new("--no-shconf").unwrap()); } if self.mem_size >= 0 { args.push(CString::new(format!("-m {}", self.mem_size)).unwrap()); } if self.master_core > 0 { args.push( CString::new(format!("--master-lcore={}", self.master_core)) .unwrap(), ); } if self.no_pci { args.push(CString::new("--no-pci").unwrap()); } if self.hugepage_single_segments { args.push(CString::new("--single-file-segments").unwrap()); } if self.hugedir.is_some() { args.push( CString::new(format!( "--huge-dir={}", &self.hugedir.as_ref().unwrap().clone() )) .unwrap(), ) } if cfg!(target_os = "linux") { // Ref: https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm args.push(CString::new("--base-virtaddr=0x200000000000").unwrap()); } if self.shm_id < 0 { args.push( CString::new(format!("--file-prefix=mayastor_pid{}", unsafe { libc::getpid() })) .unwrap(), ); } else { args.push( CString::new(format!( "--file-prefix=mayastor_pid{}", self.shm_id )) .unwrap(), ); args.push(CString::new("--proc-type=auto").unwrap()); } if self.unlink_hugepage { args.push(CString::new("--huge-unlink".to_string()).unwrap()); } // set the log levels of the DPDK libs, this can be overridden by // setting env_context args.push(CString::new("--log-level=lib.eal:6").unwrap()); args.push(CString::new("--log-level=lib.cryptodev:5").unwrap()); args.push(CString::new("--log-level=user1:6").unwrap()); args.push(CString::new("--match-allocations").unwrap()); // any additional parameters we want to pass down to the eal. These // arguments are not checked or validated. if self.env_context.is_some() { args.extend( self.env_context .as_ref() .unwrap() .split_ascii_whitespace() .map(|s| CString::new(s.to_string()).unwrap()) .collect::<Vec<_>>(), ); } // when -l is specified it overrules the core mask. The core mask still // carries our default of 0x1 such that existing testing code // does not require any changes. if let Some(list) = &self.core_list { args.push(CString::new(format!("-l {}", list)).unwrap()); } else { args.push( CString::new(format!("-c {}", self.reactor_mask)).unwrap(), ) } let mut cargs = args .iter() .map(|arg| arg.as_ptr()) .collect::<Vec<*const c_char>>(); cargs.push(std::ptr::null()); debug!("EAL arguments {:?}", args); if unsafe { rte_eal_init( (cargs.len() as libc::c_int) - 1, cargs.as_ptr() as *mut *mut c_char, ) } < 0 { panic!("Failed to init EAL"); } if unsafe { spdk_env_dpdk_post_init(false) } != 0 { panic!("Failed execute post setup"); } } /// initialize the logging subsystem fn init_logger(&mut self) -> Result<()> { // if log flags are specified increase the loglevel and print level. if !self.log_component.is_empty() { warn!("Increasing debug and print level ..."); self.debug_level = SPDK_LOG_DEBUG; self.print_level = SPDK_LOG_DEBUG; } unsafe { for flag in &self.log_component { let cflag = CString::new(flag.as_str()).unwrap(); if spdk_log_set_flag(cflag.as_ptr(), true) != 0 { return Err(EnvError::InitLog); } } spdk_log_set_level(self.debug_level); spdk_log_set_print_level(self.print_level); // open our log implementation which is implemented in the wrapper spdk_log_open(Some(spdk_rs_log)); // our callback called defined in rust called by our wrapper spdk_rs::logfn = Some(logger::log_impl); } Ok(()) } /// start the JSON rpc server which listens only to a local path extern "C" fn start_rpc(rc: i32, arg: *mut c_void) { let ctx = unsafe { Box::from_raw(arg as *mut SubsystemCtx) }; if rc != 0 { ctx.sender.send(false).unwrap(); } else { info!("RPC server listening at: {}", ctx.rpc.to_str().unwrap()); unsafe { spdk_rpc_initialize(ctx.rpc.as_ptr() as *mut c_char); spdk_rpc_set_state(SPDK_RPC_RUNTIME); }; let success = true; ctx.sender.send(success).unwrap(); } } /// load the config and apply it before any subsystems have started. /// there is currently no run time check that enforces this. fn load_yaml_config(&self) { let cfg = if let Some(yaml) = &self.mayastor_config { info!("loading mayastor config YAML file {}", yaml); Config::get_or_init(|| { if let Ok(cfg) = Config::read(yaml) { cfg } else { // if the configuration is invalid exit early panic!("Failed to load the mayastor configuration") } }) } else { Config::get_or_init(Config::default) }; cfg.apply(); } /// load the pool config file. fn load_pool_config(&self) -> Option<PoolConfig> { if let Some(file) = &self.pool_config { info!("loading pool config file {}", file); match PoolConfig::load(file) { Ok(config) => { return Some(config); } Err(error) => { warn!("failed to load pool configuration: {}", error); } } } None } /// initialize the core, call this before all else pub fn init(mut self) -> Self { // setup the logger as soon as possible self.init_logger().unwrap(); self.load_yaml_config(); let pool_config = self.load_pool_config(); // bootstrap DPDK and its magic self.initialize_eal(); // initialize memory pool for allocating bdev I/O contexts bdev_io_ctx_pool_init(self.bdev_io_ctx_pool_size); // initialize memory pool for allocating NVMe controller I/O contexts nvme_io_ctx_pool_init(self.nvme_ctl_io_ctx_pool_size); info!( "Total number of cores available: {}", Cores::count().into_iter().count() ); // setup our signal handlers self.install_signal_handlers(); // allocate a Reactor per core Reactors::init(); // launch the remote cores if any. note that during init these have to // be running as during setup cross call will take place. Cores::count() .into_iter() .for_each(|c| Reactors::launch_remote(c).unwrap()); let rpc = CString::new(self.rpc_addr.as_str()).unwrap(); // wait for all cores to be online, not sure if this is the right way // but when using more then 16 cores, I saw some "weirdness" // which could be related purely to logging. while Reactors::iter().any(|r| { r.get_state() == ReactorState::Init && r.core() != Cores::current() }) { std::thread::sleep(Duration::from_millis(1)); } info!("All cores locked and loaded!"); // ensure we are within the context of a spdk thread from here Mthread::get_init().enter(); Reactor::block_on(async { let (sender, receiver) = oneshot::channel::<bool>(); unsafe { spdk_subsystem_init( Some(Self::start_rpc), Box::into_raw(Box::new(SubsystemCtx { rpc, sender, })) as *mut _, ); } assert!(receiver.await.unwrap()); }); // load any pools that need to be created if let Some(config) = pool_config { config.import_pools(); } self } // finalize our environment pub fn fini(&self) { unsafe { spdk_trace_cleanup(); spdk_thread_lib_fini(); spdk_env_fini(); spdk_log_close(); } } /// start mayastor and call f when all is setup. pub fn start<F>(self, f: F) -> Result<i32> where F: FnOnce() + 'static, { type FutureResult = Result<(), ()>; let grpc_endpoint = self.grpc_endpoint; let rpc_addr = self.rpc_addr.clone(); let persistent_store_endpoint = self.persistent_store_endpoint.clone(); let ms = self.init(); let rt = Builder::new_current_thread().enable_all().build().unwrap(); rt.block_on(async { PersistentStore::init(persistent_store_endpoint).await; let master = Reactors::current(); master.send_future(async { f() }); let mut futures: Vec< Pin<Box<dyn future::Future<Output = FutureResult>>>, > = Vec::new(); if let Some(grpc_endpoint) = grpc_endpoint { futures.push(Box::pin(grpc::MayastorGrpcServer::run( grpc_endpoint, rpc_addr, ))); } futures.push(Box::pin(subsys::Registration::run())); futures.push(Box::pin(master)); let _out = future::try_join_all(futures).await; info!("reactors stopped"); ms.fini(); }); Ok(*GLOBAL_RC.lock().unwrap()) } }
31.831341
87
0.567692
c1e132839d226679f3c43f167e298d6b051b97bf
98,229
/*! This private module handles the various image formats in OpenGL. */ use std::fmt; use std::error::Error; use gl; use context::Context; use CapabilitiesSource; use ToGlEnum; use version::{Api, Version}; /// Error that is returned if the format is not supported by OpenGL. #[derive(Copy, Clone, Debug)] pub struct FormatNotSupportedError; impl fmt::Display for FormatNotSupportedError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { write!(fmt, "{}", self.description()) } } impl Error for FormatNotSupportedError { fn description(&self) -> &str { "Format is not supported by OpenGL" } } /// Texture format request. #[derive(Copy, Clone, Debug)] pub enum TextureFormatRequest { /// Request a specific format. Specific(TextureFormat), /// Request any floating-point format, normalized or not. AnyFloatingPoint, // TODO: // /// Request any floating-point format represented with floats. //AnyFloatingPointFloat, /// Request any compressed format. AnyCompressed, /// Request any sRGB format. AnySrgb, /// Request any compressed sRGB format. AnyCompressedSrgb, /// Request any integral format. AnyIntegral, /// Request any unsigned format. AnyUnsigned, /// Request any depth format. AnyDepth, /// Request any stencil format. AnyStencil, /// Request any depth-stencil format. AnyDepthStencil, } /// List of client-side pixel formats. /// /// These are all the possible formats of input data when uploading to a texture. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ClientFormat { U8, U8U8, U8U8U8, U8U8U8U8, I8, I8I8, I8I8I8, I8I8I8I8, U16, U16U16, U16U16U16, U16U16U16U16, I16, I16I16, I16I16I16, I16I16I16I16, U32, U32U32, U32U32U32, U32U32U32U32, I32, I32I32, I32I32I32, I32I32I32I32, U3U3U2, U5U6U5, U4U4U4U4, U5U5U5U1, U10U10U10U2, F16, F16F16, F16F16F16, F16F16F16F16, F32, F32F32, F32F32F32, F32F32F32F32, } impl ClientFormat { /// Returns the size in bytes of a pixel of this type. pub fn get_size(&self) -> usize { use std::mem; match *self { ClientFormat::U8 => 1 * mem::size_of::<u8>(), ClientFormat::U8U8 => 2 * mem::size_of::<u8>(), ClientFormat::U8U8U8 => 3 * mem::size_of::<u8>(), ClientFormat::U8U8U8U8 => 4 * mem::size_of::<u8>(), ClientFormat::I8 => 1 * mem::size_of::<i8>(), ClientFormat::I8I8 => 2 * mem::size_of::<i8>(), ClientFormat::I8I8I8 => 3 * mem::size_of::<i8>(), ClientFormat::I8I8I8I8 => 4 * mem::size_of::<i8>(), ClientFormat::U16 => 1 * mem::size_of::<u16>(), ClientFormat::U16U16 => 2 * mem::size_of::<u16>(), ClientFormat::U16U16U16 => 3 * mem::size_of::<u16>(), ClientFormat::U16U16U16U16 => 4 * mem::size_of::<u16>(), ClientFormat::I16 => 1 * mem::size_of::<i16>(), ClientFormat::I16I16 => 2 * mem::size_of::<i16>(), ClientFormat::I16I16I16 => 3 * mem::size_of::<i16>(), ClientFormat::I16I16I16I16 => 4 * mem::size_of::<i16>(), ClientFormat::U32 => 1 * mem::size_of::<u32>(), ClientFormat::U32U32 => 2 * mem::size_of::<u32>(), ClientFormat::U32U32U32 => 3 * mem::size_of::<u32>(), ClientFormat::U32U32U32U32 => 4 * mem::size_of::<u32>(), ClientFormat::I32 => 1 * mem::size_of::<i32>(), ClientFormat::I32I32 => 2 * mem::size_of::<i32>(), ClientFormat::I32I32I32 => 3 * mem::size_of::<i32>(), ClientFormat::I32I32I32I32 => 4 * mem::size_of::<i32>(), ClientFormat::U3U3U2 => (3 + 3 + 2) / 8, ClientFormat::U5U6U5 => (5 + 6 + 5) / 8, ClientFormat::U4U4U4U4 => (4 + 4 + 4 + 4) / 8, ClientFormat::U5U5U5U1 => (5 + 5 + 5 + 1) / 8, ClientFormat::U10U10U10U2 => (10 + 10 + 10 + 2) / 8, ClientFormat::F16 => 16 / 8, ClientFormat::F16F16 => (16 + 16) / 8, ClientFormat::F16F16F16 => (16 + 16 + 16) / 8, ClientFormat::F16F16F16F16 => (16 + 16 + 16 + 16) / 8, ClientFormat::F32 => 1 * mem::size_of::<f32>(), ClientFormat::F32F32 => 2 * mem::size_of::<f32>(), ClientFormat::F32F32F32 => 3 * mem::size_of::<f32>(), ClientFormat::F32F32F32F32 => 4 * mem::size_of::<f32>(), } } /// Returns the number of components of this client format. pub fn get_num_components(&self) -> u8 { match *self { ClientFormat::U8 => 1, ClientFormat::U8U8 => 2, ClientFormat::U8U8U8 => 3, ClientFormat::U8U8U8U8 => 4, ClientFormat::I8 => 1, ClientFormat::I8I8 => 2, ClientFormat::I8I8I8 => 3, ClientFormat::I8I8I8I8 => 4, ClientFormat::U16 => 1, ClientFormat::U16U16 => 2, ClientFormat::U16U16U16 => 3, ClientFormat::U16U16U16U16 => 4, ClientFormat::I16 => 1, ClientFormat::I16I16 => 2, ClientFormat::I16I16I16 => 3, ClientFormat::I16I16I16I16 => 4, ClientFormat::U32 => 1, ClientFormat::U32U32 => 2, ClientFormat::U32U32U32 => 3, ClientFormat::U32U32U32U32 => 4, ClientFormat::I32 => 1, ClientFormat::I32I32 => 2, ClientFormat::I32I32I32 => 3, ClientFormat::I32I32I32I32 => 4, ClientFormat::U3U3U2 => 3, ClientFormat::U5U6U5 => 3, ClientFormat::U4U4U4U4 => 4, ClientFormat::U5U5U5U1 => 4, ClientFormat::U10U10U10U2 => 4, ClientFormat::F16 => 1, ClientFormat::F16F16 => 2, ClientFormat::F16F16F16 => 3, ClientFormat::F16F16F16F16 => 4, ClientFormat::F32 => 1, ClientFormat::F32F32 => 2, ClientFormat::F32F32F32 => 3, ClientFormat::F32F32F32F32 => 4, } } } /// List of uncompressed pixel formats that contain floating-point-like data. /// /// Some formats are marked as "guaranteed to be supported". What this means is that you are /// certain that the backend will use exactly these formats. If you try to use a format that /// is not supported by the backend, it will automatically fall back to a larger format. // TODO: missing RGB565 #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum UncompressedFloatFormat { /// /// /// Guaranteed to be supported for both textures and renderbuffers. U8, /// /// /// Guaranteed to be supported for textures. I8, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U16, /// /// /// Guaranteed to be supported for textures. I16, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U8U8, /// /// /// Guaranteed to be supported for textures. I8I8, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U16U16, /// /// /// Guaranteed to be supported for textures. I16I16, /// U3U3U2, /// U4U4U4, /// U5U5U5, /// /// /// Guaranteed to be supported for textures. U8U8U8, /// /// /// Guaranteed to be supported for textures. I8I8I8, /// U10U10U10, /// U12U12U12, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U16U16U16, /// /// /// Guaranteed to be supported for textures. I16I16I16, /// U2U2U2U2, /// U4U4U4U4, /// U5U5U5U1, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U8U8U8U8, /// /// /// Guaranteed to be supported for textures. I8I8I8I8, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U10U10U10U2, /// U12U12U12U12, /// /// /// Guaranteed to be supported for both textures and renderbuffers. U16U16U16U16, /// /// /// Guaranteed to be supported for both textures and renderbuffers. I16I16I16I16, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F16, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F16F16, /// /// /// Guaranteed to be supported for textures. F16F16F16, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F16F16F16F16, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F32, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F32F32, /// /// /// Guaranteed to be supported for textures. F32F32F32, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F32F32F32F32, /// /// /// Guaranteed to be supported for both textures and renderbuffers. F11F11F10, /// Uses three components of 9 bits of precision that all share the same exponent. /// /// Use this format only if all the components are approximately equal. /// /// Guaranteed to be supported for textures. F9F9F9, } impl UncompressedFloatFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<UncompressedFloatFormat> { vec![ UncompressedFloatFormat::U8, UncompressedFloatFormat::I8, UncompressedFloatFormat::U16, UncompressedFloatFormat::I16, UncompressedFloatFormat::U8U8, UncompressedFloatFormat::I8I8, UncompressedFloatFormat::U16U16, UncompressedFloatFormat::I16I16, UncompressedFloatFormat::U3U3U2, UncompressedFloatFormat::U4U4U4, UncompressedFloatFormat::U5U5U5, UncompressedFloatFormat::U8U8U8, UncompressedFloatFormat::I8I8I8, UncompressedFloatFormat::U10U10U10, UncompressedFloatFormat::U12U12U12, UncompressedFloatFormat::U16U16U16, UncompressedFloatFormat::I16I16I16, UncompressedFloatFormat::U2U2U2U2, UncompressedFloatFormat::U4U4U4U4, UncompressedFloatFormat::U5U5U5U1, UncompressedFloatFormat::U8U8U8U8, UncompressedFloatFormat::I8I8I8I8, UncompressedFloatFormat::U10U10U10U2, UncompressedFloatFormat::U12U12U12U12, UncompressedFloatFormat::U16U16U16U16, UncompressedFloatFormat::I16I16I16I16, UncompressedFloatFormat::F16, UncompressedFloatFormat::F16F16, UncompressedFloatFormat::F16F16F16, UncompressedFloatFormat::F16F16F16F16, UncompressedFloatFormat::F32, UncompressedFloatFormat::F32F32, UncompressedFloatFormat::F32F32F32, UncompressedFloatFormat::F32F32F32F32, UncompressedFloatFormat::F11F11F10, UncompressedFloatFormat::F9F9F9, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::UncompressedFloat(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &UncompressedFloatFormat::U8 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_rg }, &UncompressedFloatFormat::I8 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_rg }, &UncompressedFloatFormat::I16 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U8U8 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_rg }, &UncompressedFloatFormat::I8I8 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U16U16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_rg }, &UncompressedFloatFormat::I16I16 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U3U3U2 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U4U4U4 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U5U5U5 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U8U8U8 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::I8I8I8 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U10U10U10 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U12U12U12 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U16U16U16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::I16I16I16 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U2U2U2U2 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U4U4U4U4 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U5U5U5U1 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U8U8U8U8 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::I8I8I8I8 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::U10U10U10U2 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U12U12U12U12 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::U16U16U16U16 => { version >= &Version(Api::Gl, 1, 1) || version >= &Version(Api::GlEs, 3, 0) }, &UncompressedFloatFormat::I16I16I16I16 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_snorm }, &UncompressedFloatFormat::F16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || (extensions.gl_arb_texture_float && extensions.gl_arb_texture_rg) }, &UncompressedFloatFormat::F16F16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || (extensions.gl_arb_texture_float && extensions.gl_arb_texture_rg) }, &UncompressedFloatFormat::F16F16F16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_float || extensions.gl_ati_texture_float }, &UncompressedFloatFormat::F16F16F16F16 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_float || extensions.gl_ati_texture_float }, &UncompressedFloatFormat::F32 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || (extensions.gl_arb_texture_float && extensions.gl_arb_texture_rg) }, &UncompressedFloatFormat::F32F32 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || (extensions.gl_arb_texture_float && extensions.gl_arb_texture_rg) }, &UncompressedFloatFormat::F32F32F32 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_float || extensions.gl_ati_texture_float }, &UncompressedFloatFormat::F32F32F32F32 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_float || extensions.gl_ati_texture_float }, &UncompressedFloatFormat::F11F11F10 => { version >= &Version(Api::Gl, 3, 2) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_packed_float }, &UncompressedFloatFormat::F9F9F9 => { version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_shared_exponent }, } } /// Returns true if a texture or renderbuffer with this format can be used as a framebuffer /// attachment. pub fn is_color_renderable<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { // this is the only format that is never renderable if let &UncompressedFloatFormat::F9F9F9 = self { return false; } // checking whether it's supported, so that we don't return `true` by accident if !self.is_supported(context) { return false; } let version = context.get_version(); let extensions = context.get_extensions(); // if we have OpenGL, everything here is color-renderable if version >= &Version(Api::Gl, 1, 0) { return true; } // if we have OpenGL ES, it depends // TODO: there are maybe more formats here match self { &UncompressedFloatFormat::U8 => { version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_rg }, &UncompressedFloatFormat::U8U8 => { version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arb_texture_rg }, //&UncompressedFloatFormat::U5U6U5 => true, &UncompressedFloatFormat::U8U8U8 => { version >= &Version(Api::GlEs, 3, 0) || extensions.gl_oes_rgb8_rgba8 }, &UncompressedFloatFormat::U4U4U4U4 => true, &UncompressedFloatFormat::U5U5U5U1 => true, &UncompressedFloatFormat::U8U8U8U8 => { version >= &Version(Api::GlEs, 3, 0) || extensions.gl_arm_rgba8 || extensions.gl_oes_rgb8_rgba8 }, &UncompressedFloatFormat::U10U10U10U2 => version >= &Version(Api::GlEs, 3, 0), &UncompressedFloatFormat::F16 => version >= &Version(Api::GlEs, 3, 2), &UncompressedFloatFormat::F16F16 => version >= &Version(Api::GlEs, 3, 2), &UncompressedFloatFormat::F16F16F16F16 => version >= &Version(Api::GlEs, 3, 2), &UncompressedFloatFormat::F32 => version >= &Version(Api::GlEs, 3, 2), &UncompressedFloatFormat::F32F32 => version >= &Version(Api::GlEs, 3, 2), &UncompressedFloatFormat::F32F32F32F32 => version >= &Version(Api::GlEs, 3, 2), &UncompressedFloatFormat::F11F11F10 => version >= &Version(Api::GlEs, 3, 2), _ => false } } fn to_glenum(&self) -> gl::types::GLenum { match self { &UncompressedFloatFormat::U8 => gl::R8, &UncompressedFloatFormat::I8 => gl::R8_SNORM, &UncompressedFloatFormat::U16 => gl::R16, &UncompressedFloatFormat::I16 => gl::R16_SNORM, &UncompressedFloatFormat::U8U8 => gl::RG8, &UncompressedFloatFormat::I8I8 => gl::RG8_SNORM, &UncompressedFloatFormat::U16U16 => gl::RG16, &UncompressedFloatFormat::I16I16 => gl::RG16_SNORM, &UncompressedFloatFormat::U3U3U2 => gl::R3_G3_B2, &UncompressedFloatFormat::U4U4U4 => gl::RGB4, &UncompressedFloatFormat::U5U5U5 => gl::RGB5, &UncompressedFloatFormat::U8U8U8 => gl::RGB8, &UncompressedFloatFormat::I8I8I8 => gl::RGB8_SNORM, &UncompressedFloatFormat::U10U10U10 => gl::RGB10, &UncompressedFloatFormat::U12U12U12 => gl::RGB12, &UncompressedFloatFormat::U16U16U16 => gl::RGB16, &UncompressedFloatFormat::I16I16I16 => gl::RGB16_SNORM, &UncompressedFloatFormat::U2U2U2U2 => gl::RGBA2, &UncompressedFloatFormat::U4U4U4U4 => gl::RGBA4, &UncompressedFloatFormat::U5U5U5U1 => gl::RGB5_A1, &UncompressedFloatFormat::U8U8U8U8 => gl::RGBA8, &UncompressedFloatFormat::I8I8I8I8 => gl::RGBA8_SNORM, &UncompressedFloatFormat::U10U10U10U2 => gl::RGB10_A2, &UncompressedFloatFormat::U12U12U12U12 => gl::RGBA12, &UncompressedFloatFormat::U16U16U16U16 => gl::RGBA16, &UncompressedFloatFormat::I16I16I16I16 => gl::RGBA16_SNORM, &UncompressedFloatFormat::F16 => gl::R16F, &UncompressedFloatFormat::F16F16 => gl::RG16F, &UncompressedFloatFormat::F16F16F16 => gl::RGB16F, &UncompressedFloatFormat::F16F16F16F16 => gl::RGBA16F, &UncompressedFloatFormat::F32 => gl::R32F, &UncompressedFloatFormat::F32F32 => gl::RG32F, &UncompressedFloatFormat::F32F32F32 => gl::RGB32F, &UncompressedFloatFormat::F32F32F32F32 => gl::RGBA32F, &UncompressedFloatFormat::F11F11F10 => gl::R11F_G11F_B10F, &UncompressedFloatFormat::F9F9F9 => gl::RGB9_E5, } } } /// List of uncompressed pixel formats that contain floating-point data in the sRGB color space. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum SrgbFormat { U8U8U8, U8U8U8U8, } impl SrgbFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<SrgbFormat> { vec![ SrgbFormat::U8U8U8, SrgbFormat::U8U8U8U8, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::Srgb(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &SrgbFormat::U8U8U8 => { version >= &Version(Api::Gl, 2, 1) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_srgb }, &SrgbFormat::U8U8U8U8 => { version >= &Version(Api::Gl, 2, 1) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_srgb }, } } /// Returns true if a texture or renderbuffer with this format can be used as a framebuffer /// attachment. pub fn is_color_renderable<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { // checking whether it's supported, so that we don't return `true` by accident if !self.is_supported(context) { return false; } let version = context.get_version(); let extensions = context.get_extensions(); match self { &SrgbFormat::U8U8U8 => version >= &Version(Api::Gl, 1, 0), &SrgbFormat::U8U8U8U8 => version >= &Version(Api::Gl, 1, 0) || version >= &Version(Api::GlEs, 3, 0), } } fn to_glenum(&self) -> gl::types::GLenum { match self { &SrgbFormat::U8U8U8 => gl::SRGB8, &SrgbFormat::U8U8U8U8 => gl::SRGB8_ALPHA8, } } } /// List of uncompressed pixel formats that contain signed integral data. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum UncompressedIntFormat { I8, I16, I32, I8I8, I16I16, I32I32, I8I8I8, /// May not be supported by renderbuffers. I16I16I16, /// May not be supported by renderbuffers. I32I32I32, /// May not be supported by renderbuffers. I8I8I8I8, I16I16I16I16, I32I32I32I32, } impl UncompressedIntFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<UncompressedIntFormat> { vec![ UncompressedIntFormat::I8, UncompressedIntFormat::I16, UncompressedIntFormat::I32, UncompressedIntFormat::I8I8, UncompressedIntFormat::I16I16, UncompressedIntFormat::I32I32, UncompressedIntFormat::I8I8I8, UncompressedIntFormat::I16I16I16, UncompressedIntFormat::I32I32I32, UncompressedIntFormat::I8I8I8I8, UncompressedIntFormat::I16I16I16I16, UncompressedIntFormat::I32I32I32I32, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::UncompressedIntegral(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &UncompressedIntFormat::I8 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedIntFormat::I16 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedIntFormat::I32 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedIntFormat::I8I8 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedIntFormat::I16I16 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedIntFormat::I32I32 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedIntFormat::I8I8I8 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedIntFormat::I16I16I16 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedIntFormat::I32I32I32 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedIntFormat::I8I8I8I8 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedIntFormat::I16I16I16I16 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedIntFormat::I32I32I32I32 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, } } /// Returns true if a texture or renderbuffer with this format can be used as a framebuffer /// attachment. pub fn is_color_renderable<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { // checking whether it's supported, so that we don't return `true` by accident if !self.is_supported(context) { return false; } let version = context.get_version(); // if we have OpenGL, everything here is color-renderable if version >= &Version(Api::Gl, 1, 0) { return true; } // if we have OpenGL ES, it depends match self { &UncompressedIntFormat::I8 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I16 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I32 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I8I8 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I16I16 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I32I32 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I8I8I8 => false, &UncompressedIntFormat::I16I16I16 => false, &UncompressedIntFormat::I32I32I32 => false, &UncompressedIntFormat::I8I8I8I8 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I16I16I16I16 => version >= &Version(Api::GlEs, 3, 0), &UncompressedIntFormat::I32I32I32I32 => version >= &Version(Api::GlEs, 3, 0), } } fn to_glenum(&self) -> gl::types::GLenum { match self { &UncompressedIntFormat::I8 => gl::R8I, &UncompressedIntFormat::I16 => gl::R16I, &UncompressedIntFormat::I32 => gl::R32I, &UncompressedIntFormat::I8I8 => gl::RG8I, &UncompressedIntFormat::I16I16 => gl::RG16I, &UncompressedIntFormat::I32I32 => gl::RG32I, &UncompressedIntFormat::I8I8I8 => gl::RGB8I, &UncompressedIntFormat::I16I16I16 => gl::RGB16I, &UncompressedIntFormat::I32I32I32 => gl::RGB32I, &UncompressedIntFormat::I8I8I8I8 => gl::RGBA8I, &UncompressedIntFormat::I16I16I16I16 => gl::RGBA16I, &UncompressedIntFormat::I32I32I32I32 => gl::RGBA32I, } } } /// List of uncompressed pixel formats that contain unsigned integral data. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum UncompressedUintFormat { U8, U16, U32, U8U8, U16U16, U32U32, U8U8U8, /// May not be supported by renderbuffers. U16U16U16, /// May not be supported by renderbuffers. U32U32U32, /// May not be supported by renderbuffers. U8U8U8U8, U16U16U16U16, U32U32U32U32, U10U10U10U2, } impl UncompressedUintFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<UncompressedUintFormat> { vec![ UncompressedUintFormat::U8, UncompressedUintFormat::U16, UncompressedUintFormat::U32, UncompressedUintFormat::U8U8, UncompressedUintFormat::U16U16, UncompressedUintFormat::U32U32, UncompressedUintFormat::U8U8U8, UncompressedUintFormat::U16U16U16, UncompressedUintFormat::U32U32U32, UncompressedUintFormat::U8U8U8U8, UncompressedUintFormat::U16U16U16U16, UncompressedUintFormat::U32U32U32U32, UncompressedUintFormat::U10U10U10U2, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::UncompressedUnsigned(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &UncompressedUintFormat::U8 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedUintFormat::U16 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedUintFormat::U32 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedUintFormat::U8U8 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedUintFormat::U16U16 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedUintFormat::U32U32 => { version >= &Version(Api::Gl, 3, 0) || (extensions.gl_ext_texture_integer && extensions.gl_arb_texture_rg) }, &UncompressedUintFormat::U8U8U8 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedUintFormat::U16U16U16 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedUintFormat::U32U32U32 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedUintFormat::U8U8U8U8 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedUintFormat::U16U16U16U16 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedUintFormat::U32U32U32U32 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_texture_integer }, &UncompressedUintFormat::U10U10U10U2 => { version >= &Version(Api::Gl, 3, 3) || extensions.gl_arb_texture_rgb10_a2ui }, } } /// Returns true if a texture or renderbuffer with this format can be used as a framebuffer /// attachment. pub fn is_color_renderable<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { // checking whether it's supported, so that we don't return `true` by accident if !self.is_supported(context) { return false; } let version = context.get_version(); // if we have OpenGL, everything here is color-renderable if version >= &Version(Api::Gl, 1, 0) { return true; } // if we have OpenGL ES, it depends match self { &UncompressedUintFormat::U8 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U16 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U32 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U8U8 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U16U16 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U32U32 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U8U8U8 => false, &UncompressedUintFormat::U16U16U16 => false, &UncompressedUintFormat::U32U32U32 => false, &UncompressedUintFormat::U8U8U8U8 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U16U16U16U16 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U32U32U32U32 => version >= &Version(Api::GlEs, 3, 0), &UncompressedUintFormat::U10U10U10U2 => version >= &Version(Api::GlEs, 3, 0), } } fn to_glenum(&self) -> gl::types::GLenum { match self { &UncompressedUintFormat::U8 => gl::R8UI, &UncompressedUintFormat::U16 => gl::R16UI, &UncompressedUintFormat::U32 => gl::R32UI, &UncompressedUintFormat::U8U8 => gl::RG8UI, &UncompressedUintFormat::U16U16 => gl::RG16UI, &UncompressedUintFormat::U32U32 => gl::RG32UI, &UncompressedUintFormat::U8U8U8 => gl::RGB8UI, &UncompressedUintFormat::U16U16U16 => gl::RGB16UI, &UncompressedUintFormat::U32U32U32 => gl::RGB32UI, &UncompressedUintFormat::U8U8U8U8 => gl::RGBA8UI, &UncompressedUintFormat::U16U16U16U16 => gl::RGBA16UI, &UncompressedUintFormat::U32U32U32U32 => gl::RGBA32UI, &UncompressedUintFormat::U10U10U10U2 => gl::RGB10_A2UI, } } } /// List of compressed texture formats. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum CompressedFormat { /// Red/green compressed texture with one unsigned component. RgtcFormatU, /// Red/green compressed texture with one signed component. RgtcFormatI, /// Red/green compressed texture with two unsigned components. RgtcFormatUU, /// Red/green compressed texture with two signed components. RgtcFormatII, /// BPTC format with four components represented as integers. BptcUnorm4, /// BPTC format with three components (no alpha) represented as signed floats. BptcSignedFloat3, /// BPTC format with three components (no alpha) represented as unsigned floats. BptcUnsignedFloat3, /// S3TC DXT1 without alpha, see https://www.opengl.org/wiki/S3_Texture_Compression. S3tcDxt1NoAlpha, /// S3TC DXT1 with 1-bit alpha, see https://www.opengl.org/wiki/S3_Texture_Compression. S3tcDxt1Alpha, /// S3TC DXT3, see https://www.opengl.org/wiki/S3_Texture_Compression. S3tcDxt3Alpha, /// S3TC DXT5, see https://www.opengl.org/wiki/S3_Texture_Compression. S3tcDxt5Alpha, } impl CompressedFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<CompressedFormat> { vec![ CompressedFormat::RgtcFormatU, CompressedFormat::RgtcFormatI, CompressedFormat::RgtcFormatUU, CompressedFormat::RgtcFormatII, CompressedFormat::BptcUnorm4, CompressedFormat::BptcSignedFloat3, CompressedFormat::BptcUnsignedFloat3, CompressedFormat::S3tcDxt1NoAlpha, CompressedFormat::S3tcDxt1Alpha, CompressedFormat::S3tcDxt3Alpha, CompressedFormat::S3tcDxt5Alpha, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::CompressedFormat(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &CompressedFormat::RgtcFormatU => { version >= &Version(Api::Gl, 3, 0) }, &CompressedFormat::RgtcFormatI => { version >= &Version(Api::Gl, 3, 0) }, &CompressedFormat::RgtcFormatUU => { version >= &Version(Api::Gl, 3, 0) }, &CompressedFormat::RgtcFormatII => { version >= &Version(Api::Gl, 3, 0) }, &CompressedFormat::BptcUnorm4 => { version >= &Version(Api::Gl, 4, 2) || extensions.gl_arb_texture_compression_bptc }, &CompressedFormat::BptcSignedFloat3 => { version >= &Version(Api::Gl, 4, 2) || extensions.gl_arb_texture_compression_bptc }, &CompressedFormat::BptcUnsignedFloat3 => { version >= &Version(Api::Gl, 4, 2) || extensions.gl_arb_texture_compression_bptc }, &CompressedFormat::S3tcDxt1NoAlpha => { extensions.gl_ext_texture_compression_s3tc }, &CompressedFormat::S3tcDxt1Alpha => { extensions.gl_ext_texture_compression_s3tc }, &CompressedFormat::S3tcDxt3Alpha => { extensions.gl_ext_texture_compression_s3tc }, &CompressedFormat::S3tcDxt5Alpha => { extensions.gl_ext_texture_compression_s3tc }, } } fn to_glenum(&self) -> gl::types::GLenum { match self { &CompressedFormat::RgtcFormatU => gl::COMPRESSED_RED_RGTC1, &CompressedFormat::RgtcFormatI => gl::COMPRESSED_SIGNED_RED_RGTC1, &CompressedFormat::RgtcFormatUU => gl::COMPRESSED_RG_RGTC2, &CompressedFormat::RgtcFormatII => gl::COMPRESSED_SIGNED_RG_RGTC2, &CompressedFormat::BptcUnorm4 => gl::COMPRESSED_RGBA_BPTC_UNORM, &CompressedFormat::BptcSignedFloat3 => gl::COMPRESSED_RGB_BPTC_SIGNED_FLOAT, &CompressedFormat::BptcUnsignedFloat3 => gl::COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, &CompressedFormat::S3tcDxt1NoAlpha => gl::COMPRESSED_RGB_S3TC_DXT1_EXT, &CompressedFormat::S3tcDxt1Alpha => gl::COMPRESSED_RGBA_S3TC_DXT1_EXT, &CompressedFormat::S3tcDxt3Alpha => gl::COMPRESSED_RGBA_S3TC_DXT3_EXT, &CompressedFormat::S3tcDxt5Alpha => gl::COMPRESSED_RGBA_S3TC_DXT5_EXT, } } } /// List of compressed pixel formats in the sRGB color space. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum CompressedSrgbFormat { /// BPTC format. sRGB with alpha. Also called `BC7` by DirectX. Bptc, S3tcDxt1NoAlpha, S3tcDxt1Alpha, S3tcDxt3Alpha, S3tcDxt5Alpha, } impl CompressedSrgbFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<CompressedSrgbFormat> { vec![ CompressedSrgbFormat::Bptc, CompressedSrgbFormat::S3tcDxt1NoAlpha, CompressedSrgbFormat::S3tcDxt1Alpha, CompressedSrgbFormat::S3tcDxt3Alpha, CompressedSrgbFormat::S3tcDxt5Alpha, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::CompressedSrgbFormat(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &CompressedSrgbFormat::Bptc => { version >= &Version(Api::Gl, 4, 2) || extensions.gl_arb_texture_compression_bptc }, &CompressedSrgbFormat::S3tcDxt1NoAlpha => { extensions.gl_ext_texture_compression_s3tc && extensions.gl_ext_texture_srgb }, &CompressedSrgbFormat::S3tcDxt1Alpha => { extensions.gl_ext_texture_compression_s3tc && extensions.gl_ext_texture_srgb }, &CompressedSrgbFormat::S3tcDxt3Alpha => { extensions.gl_ext_texture_compression_s3tc && extensions.gl_ext_texture_srgb }, &CompressedSrgbFormat::S3tcDxt5Alpha => { extensions.gl_ext_texture_compression_s3tc && extensions.gl_ext_texture_srgb }, } } fn to_glenum(&self) -> gl::types::GLenum { match self { &CompressedSrgbFormat::Bptc => gl::COMPRESSED_SRGB_ALPHA_BPTC_UNORM, &CompressedSrgbFormat::S3tcDxt1NoAlpha => gl::COMPRESSED_SRGB_S3TC_DXT1_EXT, &CompressedSrgbFormat::S3tcDxt1Alpha => gl::COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, &CompressedSrgbFormat::S3tcDxt3Alpha => gl::COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, &CompressedSrgbFormat::S3tcDxt5Alpha => gl::COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, } } } /// List of formats available for depth textures. /// /// `I16`, `I24` and `I32` are still treated as if they were floating points. /// Only the internal representation is integral. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum DepthFormat { I16, I24, /// May not be supported by all hardware. I32, F32, } impl DepthFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<DepthFormat> { vec![ DepthFormat::I16, DepthFormat::I24, DepthFormat::I32, DepthFormat::F32, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::DepthFormat(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &DepthFormat::I16 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_arb_depth_texture }, &DepthFormat::I24 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_arb_depth_texture }, &DepthFormat::I32 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_arb_depth_texture }, &DepthFormat::F32 => { version >= &Version(Api::Gl, 3, 0) }, } } fn to_glenum(&self) -> gl::types::GLenum { match self { &DepthFormat::I16 => gl::DEPTH_COMPONENT16, &DepthFormat::I24 => gl::DEPTH_COMPONENT24, &DepthFormat::I32 => gl::DEPTH_COMPONENT32, &DepthFormat::F32 => gl::DEPTH_COMPONENT32F, } } } /// List of formats available for depth-stencil textures. // TODO: If OpenGL 4.3 or ARB_stencil_texturing is not available, then depth/stencil // textures are treated by samplers exactly like depth-only textures #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum DepthStencilFormat { I24I8, F32I8, } impl DepthStencilFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<DepthStencilFormat> { vec![ DepthStencilFormat::I24I8, DepthStencilFormat::F32I8, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::DepthStencilFormat(self) } /// Returns true if this format is supported by the backend. pub fn is_supported<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &DepthStencilFormat::I24I8 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_packed_depth_stencil || extensions.gl_oes_packed_depth_stencil }, &DepthStencilFormat::F32I8 => { version >= &Version(Api::Gl, 3, 0) }, } } fn to_glenum(&self) -> gl::types::GLenum { match self { &DepthStencilFormat::I24I8 => gl::DEPTH24_STENCIL8, &DepthStencilFormat::F32I8 => gl::DEPTH32F_STENCIL8, } } } /// List of formats available for stencil textures. /// /// You are strongly advised to only use `I8`. /// /// Stencil textures are a very recent OpenGL feature that may not be supported everywhere. /// Only `I8` is supported for textures. All the other formats can only be used with renderbuffers. #[allow(missing_docs)] #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] pub enum StencilFormat { I1, I4, I8, I16, } impl StencilFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<StencilFormat> { vec![ StencilFormat::I1, StencilFormat::I4, StencilFormat::I8, StencilFormat::I16, ] } /// Turns this format into a more generic `TextureFormat`. #[inline] pub fn to_texture_format(self) -> TextureFormat { TextureFormat::StencilFormat(self) } /// Returns true if this format is supported by the backend for textures. pub fn is_supported_for_textures<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &StencilFormat::I8 => { version >= &Version(Api::Gl, 4, 4) || version >= &Version(Api::GlEs, 3, 2) || extensions.gl_arb_texture_stencil8 || extensions.gl_oes_texture_stencil8 }, _ => false } } /// Returns true if this format is supported by the backend for renderbuffers. pub fn is_supported_for_renderbuffers<C: ?Sized>(&self, context: &C) -> bool where C: CapabilitiesSource { let version = context.get_version(); let extensions = context.get_extensions(); match self { &StencilFormat::I1 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_framebuffer_object || extensions.gl_arb_framebuffer_object || extensions.gl_oes_stencil1 }, &StencilFormat::I4 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_framebuffer_object || extensions.gl_arb_framebuffer_object || extensions.gl_oes_stencil4 }, &StencilFormat::I8 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_arb_texture_stencil8 || version >= &Version(Api::GlEs, 2, 0) }, &StencilFormat::I16 => { version >= &Version(Api::Gl, 3, 0) || extensions.gl_ext_framebuffer_object || extensions.gl_arb_framebuffer_object }, } } fn to_glenum(&self) -> gl::types::GLenum { match self { &StencilFormat::I1 => gl::STENCIL_INDEX1, &StencilFormat::I4 => gl::STENCIL_INDEX4, &StencilFormat::I8 => gl::STENCIL_INDEX8, &StencilFormat::I16 => gl::STENCIL_INDEX16, } } } /// Format of the internal representation of a texture. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] #[allow(missing_docs)] pub enum TextureFormat { UncompressedFloat(UncompressedFloatFormat), UncompressedIntegral(UncompressedIntFormat), UncompressedUnsigned(UncompressedUintFormat), Srgb(SrgbFormat), CompressedFormat(CompressedFormat), CompressedSrgbFormat(CompressedSrgbFormat), DepthFormat(DepthFormat), StencilFormat(StencilFormat), DepthStencilFormat(DepthStencilFormat), } impl TextureFormat { /// Returns a list of all the possible values of this enumeration. #[inline] pub fn get_formats_list() -> Vec<TextureFormat> { // TODO: this function looks ugly UncompressedFloatFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( UncompressedIntFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( UncompressedUintFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( SrgbFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( CompressedFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( CompressedSrgbFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( DepthFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( StencilFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()).chain( DepthStencilFormat::get_formats_list().into_iter().map(|f| f.to_texture_format()))))))))) .collect() } /// Returns true if this format is supported by the backend for textures. #[inline] pub fn is_supported_for_textures<C: ?Sized>(&self, c: &C) -> bool where C: CapabilitiesSource { match self { &TextureFormat::UncompressedFloat(format) => format.is_supported(c), &TextureFormat::UncompressedIntegral(format) => format.is_supported(c), &TextureFormat::UncompressedUnsigned(format) => format.is_supported(c), &TextureFormat::Srgb(format) => format.is_supported(c), &TextureFormat::CompressedFormat(format) => format.is_supported(c), &TextureFormat::CompressedSrgbFormat(format) => format.is_supported(c), &TextureFormat::DepthFormat(format) => format.is_supported(c), &TextureFormat::StencilFormat(format) => format.is_supported_for_textures(c), &TextureFormat::DepthStencilFormat(format) => format.is_supported(c), } } /// Returns true if this format is supported by the backend for renderbuffers. #[inline] pub fn is_supported_for_renderbuffers<C: ?Sized>(&self, c: &C) -> bool where C: CapabilitiesSource { match self { &TextureFormat::UncompressedFloat(format) => format.is_supported(c), &TextureFormat::UncompressedIntegral(format) => format.is_supported(c), &TextureFormat::UncompressedUnsigned(format) => format.is_supported(c), &TextureFormat::Srgb(format) => format.is_supported(c), &TextureFormat::CompressedFormat(format) => format.is_supported(c), &TextureFormat::CompressedSrgbFormat(format) => format.is_supported(c), &TextureFormat::DepthFormat(format) => format.is_supported(c), &TextureFormat::StencilFormat(format) => format.is_supported_for_renderbuffers(c), &TextureFormat::DepthStencilFormat(format) => format.is_supported(c), } } /// Returns true if the format is color-renderable, depth-renderable, depth-stencil-renderable /// or stencil-renderable. #[inline] pub fn is_renderable<C: ?Sized>(&self, c: &C) -> bool where C: CapabilitiesSource { match self { &TextureFormat::UncompressedFloat(format) => format.is_color_renderable(c), &TextureFormat::UncompressedIntegral(format) => format.is_color_renderable(c), &TextureFormat::UncompressedUnsigned(format) => format.is_color_renderable(c), &TextureFormat::Srgb(format) => format.is_color_renderable(c), &TextureFormat::CompressedFormat(_) => false, &TextureFormat::CompressedSrgbFormat(_) => false, &TextureFormat::DepthFormat(_) => true, &TextureFormat::StencilFormat(_) => true, &TextureFormat::DepthStencilFormat(_) => true, } } } impl ToGlEnum for TextureFormat { fn to_glenum(&self) -> gl::types::GLenum { match self { &TextureFormat::UncompressedFloat(f) => f.to_glenum(), &TextureFormat::UncompressedIntegral(f) => f.to_glenum(), &TextureFormat::UncompressedUnsigned(f) => f.to_glenum(), &TextureFormat::Srgb(f) => f.to_glenum(), &TextureFormat::CompressedFormat(f) => f.to_glenum(), &TextureFormat::CompressedSrgbFormat(f) => f.to_glenum(), &TextureFormat::DepthFormat(f) => f.to_glenum(), &TextureFormat::StencilFormat(f) => f.to_glenum(), &TextureFormat::DepthStencilFormat(f) => f.to_glenum(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ClientFormatAny { ClientFormat(ClientFormat), CompressedFormat(CompressedFormat), CompressedSrgbFormat(CompressedSrgbFormat), } impl ClientFormatAny { /// Checks if this format is a compressed format. #[inline] pub fn is_compressed(&self) -> bool { match *self { ClientFormatAny::ClientFormat(_) => false, ClientFormatAny::CompressedFormat(_) => true, ClientFormatAny::CompressedSrgbFormat(_) => true, } } /// Gets the size in bytes of the buffer required to store a uncompressed image /// of the specified dimensions on this format. /// /// ## Panic /// /// Panics if the dimensions are invalid for this format. pub fn get_buffer_size(&self, width: u32, height: Option<u32>, depth: Option<u32>, array_size: Option<u32>) -> usize { match *self { ClientFormatAny::ClientFormat(ref format) => { format.get_size() * width as usize * height.unwrap_or(1) as usize * depth.unwrap_or(1) as usize * array_size.unwrap_or(1) as usize }, // 8 bytes per 4x4 block ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt1Alpha) | ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt1Alpha) | ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt1NoAlpha) | ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt1NoAlpha) | ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatU) | ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatI) => { let width = if width < 4 { 4 } else { width as usize }; let height = height.map(|height| if height < 4 { 4 } else { height as usize }) .expect("ST3C, RGTC and BPTC textures must have 2 dimensions"); if (width % 4) != 0 || (height % 4) != 0 { panic!("ST3C, RGTC and BPTC textures must have a width and height multiple of 4."); } if depth.is_some() { // allow `array_size` (2D textures arrays) but not depth (3D textures) panic!("ST3C, RGTC and BPTC textures are 2 dimension only.") } let uncompressed_bit_size = 4 * width as usize * height as usize * depth.unwrap_or(1) as usize * array_size.unwrap_or(1) as usize; uncompressed_bit_size / 8 // Apply 8:1 compression ratio }, // 16 bytes per 4x4 block ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt3Alpha) | ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt3Alpha) | ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt5Alpha) | ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt5Alpha) | ClientFormatAny::CompressedFormat(CompressedFormat::BptcUnorm4) | ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::Bptc) | ClientFormatAny::CompressedFormat(CompressedFormat::BptcSignedFloat3) | ClientFormatAny::CompressedFormat(CompressedFormat::BptcUnsignedFloat3) | ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatUU) | ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatII) => { let width = if width < 4 { 4 } else { width as usize }; let height = height.map(|height| if height < 4 { 4 } else { height as usize }) .expect("ST3C, RGTC and BPTC textures must have 2 dimensions"); if (width % 4) != 0 || (height % 4) != 0 { panic!("ST3C, RGTC and BPTC textures must have a width and height multiple of 4."); } if depth.is_some() { // allow `array_size` (2D textures arrays) but not depth (3D textures) panic!("ST3C, RGTC and BPTC textures are 2 dimension only.") } let uncompressed_bit_size = 4 * width as usize * height as usize * depth.unwrap_or(1) as usize * array_size.unwrap_or(1) as usize; uncompressed_bit_size / 4 // Apply 4:1 compression ratio }, } } #[inline] pub fn get_num_components(&self) -> u8 { match *self { ClientFormatAny::ClientFormat(ref format) => format.get_num_components(), _ => unimplemented!(), } } #[doc(hidden)] pub fn from_internal_compressed_format(internal: gl::types::GLenum) -> Option<ClientFormatAny> { match internal { gl::COMPRESSED_RGB_S3TC_DXT1_EXT => Some(ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt1NoAlpha)), gl::COMPRESSED_RGBA_S3TC_DXT1_EXT => Some(ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt1Alpha)), gl::COMPRESSED_RGBA_S3TC_DXT3_EXT => Some(ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt3Alpha)), gl::COMPRESSED_RGBA_S3TC_DXT5_EXT => Some(ClientFormatAny::CompressedFormat(CompressedFormat::S3tcDxt5Alpha)), gl::COMPRESSED_SRGB_S3TC_DXT1_EXT => Some(ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt1NoAlpha)), gl::COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT => Some(ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt1Alpha)), gl::COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT => Some(ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt3Alpha)), gl::COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT => Some(ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::S3tcDxt5Alpha)), gl::COMPRESSED_RGBA_BPTC_UNORM => Some(ClientFormatAny::CompressedFormat(CompressedFormat::BptcUnorm4)), gl::COMPRESSED_SRGB_ALPHA_BPTC_UNORM => Some(ClientFormatAny::CompressedSrgbFormat(CompressedSrgbFormat::Bptc)), gl::COMPRESSED_RGB_BPTC_SIGNED_FLOAT => Some(ClientFormatAny::CompressedFormat(CompressedFormat::BptcSignedFloat3)), gl::COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT => Some(ClientFormatAny::CompressedFormat(CompressedFormat::BptcUnsignedFloat3)), gl::COMPRESSED_RED_RGTC1 => Some(ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatU)), gl::COMPRESSED_SIGNED_RED_RGTC1 => Some(ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatI)), gl::COMPRESSED_RG_RGTC2 => Some(ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatUU)), gl::COMPRESSED_SIGNED_RG_RGTC2 => Some(ClientFormatAny::CompressedFormat(CompressedFormat::RgtcFormatII)), _ => None, } } } /// Type of request. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum RequestType { /// A format suitable for `glTexImage#D`. TexImage(Option<ClientFormatAny>), /// A format suitable for `glTexStorage#D`. TexStorage, /// A format suitable for `glRenderbufferStorage`. Renderbuffer, } impl RequestType { /// Returns the client format of the data that will be put in the texture. #[inline] pub fn get_client_format(&self) -> Option<ClientFormatAny> { match self { &RequestType::TexImage(f) => f, &RequestType::TexStorage => None, &RequestType::Renderbuffer => None, } } } /// Checks that the texture format is supported and compatible with the client format. /// /// Returns two `GLenum`s. The first one can be unsized and is suitable for the internal format /// of `glTexImage#D`. The second one is always sized and is suitable for `glTexStorage*D` or /// `glRenderbufferStorage`. pub fn format_request_to_glenum(context: &Context, format: TextureFormatRequest, rq_ty: RequestType) -> Result<gl::types::GLenum, FormatNotSupportedError> { let version = context.get_version(); let extensions = context.get_extensions(); let is_client_compressed = match rq_ty.get_client_format() { Some(ref client) => client.is_compressed(), None => false, }; Ok(match format { /*******************************************************************/ /* REGULAR */ /*******************************************************************/ TextureFormatRequest::AnyFloatingPoint => { let size = rq_ty.get_client_format().map(|c| c.get_num_components()); if version >= &Version(Api::Gl, 3, 0) || version >= &Version(Api::GlEs, 3, 0) { match (rq_ty, size) { (RequestType::TexImage(_), Some(1)) => gl::RED, (RequestType::TexImage(_), Some(2)) => gl::RG, (RequestType::TexImage(_), Some(3)) => gl::RGB, (RequestType::TexImage(_), Some(4)) => gl::RGBA, (RequestType::TexImage(_), None) => gl::RGBA, (_, Some(1)) => gl::R8, (_, Some(2)) => gl::RG8, (_, Some(3)) => gl::RGB8, (_, Some(4)) => gl::RGBA8, (_, None) => gl::RGBA8, _ => unreachable!(), } } else if version >= &Version(Api::Gl, 1, 1) { match (rq_ty, size) { (RequestType::TexImage(_), Some(1)) => gl::RED, (RequestType::TexImage(_), Some(2)) => gl::RG, (RequestType::TexImage(_), Some(3)) => gl::RGB, (RequestType::TexImage(_), Some(4)) => gl::RGBA, (RequestType::TexImage(_), None) => gl::RGBA, (_, Some(1)) if extensions.gl_arb_texture_rg => gl::R8, (_, Some(2)) if extensions.gl_arb_texture_rg => gl::RG8, (_, Some(3)) => gl::RGB8, (_, Some(4)) => gl::RGBA8, (_, None) => gl::RGBA8, _ => return Err(FormatNotSupportedError), } } else if version >= &Version(Api::Gl, 1, 0) { match rq_ty { RequestType::TexImage(_) => size.unwrap_or(4) as gl::types::GLenum, _ => return Err(FormatNotSupportedError) } } else if version >= &Version(Api::GlEs, 2, 0) { match (rq_ty, size) { (RequestType::TexImage(_), Some(3)) => gl::RGB, (_, Some(3)) => { if extensions.gl_oes_rgb8_rgba8 { gl::RGB8_OES } else if extensions.gl_arm_rgba8 { gl::RGBA8_OES } else { gl::RGB565 } }, (RequestType::TexImage(_), Some(4)) => gl::RGBA, (RequestType::TexImage(_), None) => gl::RGBA, (_, Some(4)) | (_, None) => { if extensions.gl_oes_rgb8_rgba8 || extensions.gl_arm_rgba8 { gl::RGBA8_OES } else { gl::RGB5_A1 } }, _ => return Err(FormatNotSupportedError) } } else { unreachable!(); } }, TextureFormatRequest::Specific(TextureFormat::UncompressedFloat(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* COMPRESSED */ /*******************************************************************/ TextureFormatRequest::AnyCompressed if is_client_compressed => { // Note: client is always Some here. When refactoring this function it'd be a good idea // to let the client participate on the matching process. let newformat = TextureFormat::CompressedFormat(match rq_ty.get_client_format() { Some(ClientFormatAny::CompressedFormat(format)) => format, _ => unreachable!(), }); format_request_to_glenum(context, TextureFormatRequest::Specific(newformat), rq_ty)? }, TextureFormatRequest::AnyCompressed => { match rq_ty { RequestType::TexImage(client) => { let size = client.map(|c| c.get_num_components()); if version >= &Version(Api::Gl, 1, 1) { match size { Some(1) => if version >= &Version(Api::Gl, 3, 0) || extensions.gl_arb_texture_rg { gl::COMPRESSED_RED } else { 1 }, Some(2) => if version >= &Version(Api::Gl, 3, 0) || extensions.gl_arb_texture_rg { gl::COMPRESSED_RG } else { 2 }, Some(3) => gl::COMPRESSED_RGB, Some(4) => gl::COMPRESSED_RGBA, None => gl::COMPRESSED_RGBA, _ => unreachable!(), } } else { // OpenGL 1.0 doesn't support compressed textures, so we use a // regular float format instead size.unwrap_or(4) as gl::types::GLenum } }, RequestType::TexStorage | RequestType::Renderbuffer => { return Err(FormatNotSupportedError) }, } }, TextureFormatRequest::Specific(TextureFormat::CompressedFormat(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* SRGB */ /*******************************************************************/ TextureFormatRequest::AnySrgb => { let size = rq_ty.get_client_format().map(|c| c.get_num_components()); if version >= &Version(Api::Gl, 2, 1) || version >= &Version(Api::GlEs, 3, 0) || extensions.gl_ext_texture_srgb { match size { Some(1 ..= 3) => gl::SRGB8, Some(4) => gl::SRGB8_ALPHA8, None => if let RequestType::TexImage(_) = rq_ty { gl::SRGB8 } else { gl::SRGB8_ALPHA8 }, _ => unreachable!(), } } else { // no support for sRGB format_request_to_glenum(context, TextureFormatRequest::AnyFloatingPoint, rq_ty)? } }, TextureFormatRequest::Specific(TextureFormat::Srgb(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* COMPRESSED SRGB */ /*******************************************************************/ TextureFormatRequest::AnyCompressedSrgb if is_client_compressed => { let newformat = TextureFormat::CompressedSrgbFormat(match rq_ty.get_client_format() { Some(ClientFormatAny::CompressedSrgbFormat(format)) => format, _ => unreachable!(), }); format_request_to_glenum(context, TextureFormatRequest::Specific(newformat), rq_ty)? }, TextureFormatRequest::AnyCompressedSrgb => { if version >= &Version(Api::Gl, 4, 0) || extensions.gl_ext_texture_srgb { match rq_ty { RequestType::TexImage(client) => { match client.map(|c| c.get_num_components()) { Some(1 ..= 3) => gl::COMPRESSED_SRGB, Some(4) => gl::COMPRESSED_SRGB_ALPHA, None => gl::COMPRESSED_SRGB_ALPHA, _ => unreachable!(), } }, RequestType::TexStorage | RequestType::Renderbuffer => { return Err(FormatNotSupportedError) }, } } else { // no support for compressed srgb textures format_request_to_glenum(context, TextureFormatRequest::AnySrgb, rq_ty)? } }, TextureFormatRequest::Specific(TextureFormat::CompressedSrgbFormat(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* INTEGRAL */ /*******************************************************************/ TextureFormatRequest::AnyIntegral => { let size = rq_ty.get_client_format().map(|c| c.get_num_components()); if version >= &Version(Api::Gl, 3, 0) { match size { // FIXME: choose between 8, 16 and 32 depending on the client format Some(1) => gl::R32I, Some(2) => gl::RG32I, Some(3) => gl::RGB32I, Some(4) => gl::RGBA32I, None => gl::RGBA32I, _ => unreachable!(), } } else { if !extensions.gl_ext_texture_integer { return Err(FormatNotSupportedError); } match size { // FIXME: choose between 8, 16 and 32 depending on the client format Some(1) => if extensions.gl_arb_texture_rg { gl::R32I } else { return Err(FormatNotSupportedError); }, Some(2) => if extensions.gl_arb_texture_rg { gl::RG32I } else { return Err(FormatNotSupportedError); }, Some(3) => gl::RGB32I_EXT, Some(4) => gl::RGBA32I_EXT, None => gl::RGBA32I_EXT, _ => unreachable!(), } } }, TextureFormatRequest::Specific(TextureFormat::UncompressedIntegral(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* UNSIGNED */ /*******************************************************************/ TextureFormatRequest::AnyUnsigned => { let size = rq_ty.get_client_format().map(|c| c.get_num_components()); if version >= &Version(Api::Gl, 3, 0) { match size { // FIXME: choose between 8, 16 and 32 depending on the client format Some(1) => gl::R32UI, Some(2) => gl::RG32UI, Some(3) => gl::RGB32UI, Some(4) => gl::RGBA32UI, None => gl::RGBA32UI, _ => unreachable!(), } } else { if !extensions.gl_ext_texture_integer { return Err(FormatNotSupportedError); } match size { // FIXME: choose between 8, 16 and 32 depending on the client format Some(1) => if extensions.gl_arb_texture_rg { gl::R32UI } else { return Err(FormatNotSupportedError); }, Some(2) => if extensions.gl_arb_texture_rg { gl::RG32UI } else { return Err(FormatNotSupportedError); }, Some(3) => gl::RGB32UI_EXT, Some(4) => gl::RGBA32UI_EXT, None => gl::RGBA32UI_EXT, _ => unreachable!(), } } }, TextureFormatRequest::Specific(TextureFormat::UncompressedUnsigned(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* DEPTH */ /*******************************************************************/ TextureFormatRequest::AnyDepth => { if version >= &Version(Api::Gl, 2, 0) { match rq_ty { RequestType::TexImage(_) => gl::DEPTH_COMPONENT, RequestType::TexStorage | RequestType::Renderbuffer => gl::DEPTH_COMPONENT24, } } else if version >= &Version(Api::Gl, 1, 4) || extensions.gl_arb_depth_texture || extensions.gl_oes_depth_texture { match rq_ty { RequestType::TexImage(_) => gl::DEPTH_COMPONENT, RequestType::TexStorage | RequestType::Renderbuffer => return Err(FormatNotSupportedError), // TODO: sized format? } } else { return Err(FormatNotSupportedError); } }, TextureFormatRequest::Specific(TextureFormat::DepthFormat(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, /*******************************************************************/ /* STENCIL */ /*******************************************************************/ TextureFormatRequest::AnyStencil => { // TODO: we just request I8, but this could be more flexible return format_request_to_glenum(context, TextureFormatRequest::Specific( TextureFormat::StencilFormat( StencilFormat::I8)), rq_ty); }, TextureFormatRequest::Specific(TextureFormat::StencilFormat(format)) => { match rq_ty { RequestType::TexImage(_) | RequestType::TexStorage => { if format.is_supported_for_textures(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, RequestType::Renderbuffer => { if format.is_supported_for_renderbuffers(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, } }, /*******************************************************************/ /* DEPTH-STENCIL */ /*******************************************************************/ TextureFormatRequest::AnyDepthStencil => { if version >= &Version(Api::Gl, 3, 0) { match rq_ty { RequestType::TexImage(_) => gl::DEPTH_STENCIL, RequestType::TexStorage | RequestType::Renderbuffer => gl::DEPTH24_STENCIL8, } } else if extensions.gl_ext_packed_depth_stencil { match rq_ty { RequestType::TexImage(_) => gl::DEPTH_STENCIL_EXT, RequestType::TexStorage | RequestType::Renderbuffer => gl::DEPTH24_STENCIL8_EXT, } } else if extensions.gl_oes_packed_depth_stencil { match rq_ty { RequestType::TexImage(_) => gl::DEPTH_STENCIL_OES, RequestType::TexStorage | RequestType::Renderbuffer => gl::DEPTH24_STENCIL8_OES, } } else { return Err(FormatNotSupportedError); } }, TextureFormatRequest::Specific(TextureFormat::DepthStencilFormat(format)) => { if format.is_supported(context) { format.to_glenum() } else { return Err(FormatNotSupportedError); } }, }) } /// Checks that the client texture format is supported. /// /// If `inverted` is true, returns a format where the R, G and B components are flipped. /// /// Returns two GLenums suitable for `glTexImage#D` and `glTexSubImage#D`. pub fn client_format_to_glenum(context: &Context, client: ClientFormatAny, format: TextureFormatRequest, inverted: bool) -> Result<(gl::types::GLenum, gl::types::GLenum), FormatNotSupportedError> { let value = match format { TextureFormatRequest::AnyCompressed if client.is_compressed() => { match client { ClientFormatAny::CompressedFormat(client_format) => { if client_format.is_supported(context) { let e = client_format.to_glenum(); Ok((e, e)) } else { return Err(FormatNotSupportedError); } }, _ => unreachable!(), } }, TextureFormatRequest::AnyCompressedSrgb if client.is_compressed() => { match client { ClientFormatAny::CompressedSrgbFormat(client_format) => { if client_format.is_supported(context) { let e = client_format.to_glenum(); Ok((e, e)) } else { return Err(FormatNotSupportedError); } }, _ => unreachable!(), } }, TextureFormatRequest::Specific(TextureFormat::CompressedFormat(format)) if client.is_compressed() => { if format.is_supported(context) { let e = format.to_glenum(); Ok((e, e)) } else { return Err(FormatNotSupportedError); } }, TextureFormatRequest::Specific(TextureFormat::CompressedSrgbFormat(format)) if client.is_compressed() => { if format.is_supported(context) { let e = format.to_glenum(); Ok((e, e)) } else { return Err(FormatNotSupportedError); } }, TextureFormatRequest::AnyFloatingPoint | TextureFormatRequest::AnyCompressed | TextureFormatRequest::AnySrgb | TextureFormatRequest::AnyCompressedSrgb | TextureFormatRequest::Specific(TextureFormat::UncompressedFloat(_)) | TextureFormatRequest::Specific(TextureFormat::Srgb(_)) | TextureFormatRequest::Specific(TextureFormat::CompressedFormat(_)) | TextureFormatRequest::Specific(TextureFormat::CompressedSrgbFormat(_)) => { match client { ClientFormatAny::ClientFormat(ClientFormat::U8) => Ok((gl::RED, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U8U8) => Ok((gl::RG, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U8U8U8) => Ok((gl::RGB, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U8U8U8U8) => Ok((gl::RGBA, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8) => Ok((gl::RED, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8I8) => Ok((gl::RG, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8I8I8) => Ok((gl::RGB, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8I8I8I8) => Ok((gl::RGBA, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U16) => Ok((gl::RED, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U16U16) => Ok((gl::RG, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U16U16U16) => Ok((gl::RGB, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U16U16U16U16) => Ok((gl::RGBA, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16) => Ok((gl::RED, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16I16) => Ok((gl::RG, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16I16I16) => Ok((gl::RGB, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16I16I16I16) => Ok((gl::RGBA, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U32) => Ok((gl::RED, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::U32U32) => Ok((gl::RG, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::U32U32U32) => Ok((gl::RGB, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::U32U32U32U32) => Ok((gl::RGBA, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::I32) => Ok((gl::RED, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::I32I32) => Ok((gl::RG, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::I32I32I32) => Ok((gl::RGB, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::I32I32I32I32) => Ok((gl::RGBA, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::U3U3U2) => Ok((gl::RGB, gl::UNSIGNED_BYTE_3_3_2)), ClientFormatAny::ClientFormat(ClientFormat::U5U6U5) => Ok((gl::RGB, gl::UNSIGNED_SHORT_5_6_5)), ClientFormatAny::ClientFormat(ClientFormat::U4U4U4U4) => Ok((gl::RGBA, gl::UNSIGNED_SHORT_4_4_4_4)), ClientFormatAny::ClientFormat(ClientFormat::U5U5U5U1) => Ok((gl::RGBA, gl::UNSIGNED_SHORT_5_5_5_1)), ClientFormatAny::ClientFormat(ClientFormat::U10U10U10U2) => Ok((gl::RGBA, gl::UNSIGNED_INT_10_10_10_2)), ClientFormatAny::ClientFormat(ClientFormat::F16) => Ok((gl::RED, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F16F16) => Ok((gl::RG, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F16F16F16) => Ok((gl::RGB, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F16F16F16F16) => Ok((gl::RGBA, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32) => Ok((gl::RED, gl::FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32F32) => Ok((gl::RG, gl::FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32F32F32) => Ok((gl::RGB, gl::FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32F32F32F32) => Ok((gl::RGBA, gl::FLOAT)), // this kind of situation shouldn't happen, it should have a special handling when // client is compressed. ClientFormatAny::CompressedFormat(_) => unreachable!(), ClientFormatAny::CompressedSrgbFormat(_) => unreachable!(), } }, TextureFormatRequest::AnyIntegral | TextureFormatRequest::AnyUnsigned | TextureFormatRequest::Specific(TextureFormat::UncompressedIntegral(_)) | TextureFormatRequest::Specific(TextureFormat::UncompressedUnsigned(_)) => { match client { ClientFormatAny::ClientFormat(ClientFormat::U8) => Ok((gl::RED_INTEGER, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U8U8) => Ok((gl::RG_INTEGER, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U8U8U8) => Ok((gl::RGB_INTEGER, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U8U8U8U8) => Ok((gl::RGBA_INTEGER, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8) => Ok((gl::RED_INTEGER, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8I8) => Ok((gl::RG_INTEGER, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8I8I8) => Ok((gl::RGB_INTEGER, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8I8I8I8) => Ok((gl::RGBA_INTEGER, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U16) => Ok((gl::RED_INTEGER, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U16U16) => Ok((gl::RG_INTEGER, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U16U16U16) => Ok((gl::RGB_INTEGER, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U16U16U16U16) => Ok((gl::RGBA_INTEGER, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16) => Ok((gl::RED_INTEGER, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16I16) => Ok((gl::RG_INTEGER, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16I16I16) => Ok((gl::RGB_INTEGER, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16I16I16I16) => Ok((gl::RGBA_INTEGER, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U32) => Ok((gl::RED_INTEGER, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::U32U32) => Ok((gl::RG_INTEGER, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::U32U32U32) => Ok((gl::RGB_INTEGER, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::U32U32U32U32) => Ok((gl::RGBA_INTEGER, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::I32) => Ok((gl::RED_INTEGER, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::I32I32) => Ok((gl::RG_INTEGER, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::I32I32I32) => Ok((gl::RGB_INTEGER, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::I32I32I32I32) => Ok((gl::RGBA_INTEGER, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::U3U3U2) => Ok((gl::RGB_INTEGER, gl::UNSIGNED_BYTE_3_3_2)), ClientFormatAny::ClientFormat(ClientFormat::U5U6U5) => Ok((gl::RGB_INTEGER, gl::UNSIGNED_SHORT_5_6_5)), ClientFormatAny::ClientFormat(ClientFormat::U4U4U4U4) => Ok((gl::RGBA_INTEGER, gl::UNSIGNED_SHORT_4_4_4_4)), ClientFormatAny::ClientFormat(ClientFormat::U5U5U5U1) => Ok((gl::RGBA_INTEGER, gl::UNSIGNED_SHORT_5_5_5_1)), ClientFormatAny::ClientFormat(ClientFormat::U10U10U10U2) => Ok((gl::RGBA_INTEGER, gl::UNSIGNED_INT_10_10_10_2)), ClientFormatAny::ClientFormat(ClientFormat::F16) => Ok((gl::RED_INTEGER, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F16F16) => Ok((gl::RG_INTEGER, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F16F16F16) => Ok((gl::RGB_INTEGER, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F16F16F16F16) => Ok((gl::RGBA_INTEGER, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32) => Ok((gl::RED_INTEGER, gl::FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32F32) => Ok((gl::RG_INTEGER, gl::FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32F32F32) => Ok((gl::RGB_INTEGER, gl::FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32F32F32F32) => Ok((gl::RGBA_INTEGER, gl::FLOAT)), // this kind of situation shouldn't happen, it should have a special handling when // client is compressed. ClientFormatAny::CompressedFormat(_) => unreachable!(), ClientFormatAny::CompressedSrgbFormat(_) => unreachable!(), } }, TextureFormatRequest::AnyDepth | TextureFormatRequest::Specific(TextureFormat::DepthFormat(_)) => { if client != ClientFormatAny::ClientFormat(ClientFormat::F32) { panic!("Only ClientFormatAny::ClientFormat(ClientFormat::F32) can be used to upload on a depth texture"); } Ok((gl::DEPTH_COMPONENT, gl::FLOAT)) } TextureFormatRequest::AnyStencil | TextureFormatRequest::Specific(TextureFormat::StencilFormat(_)) => { match client { ClientFormatAny::ClientFormat(ClientFormat::U8) => Ok((gl::RED_INTEGER, gl::UNSIGNED_BYTE)), ClientFormatAny::ClientFormat(ClientFormat::I8) => Ok((gl::RED_INTEGER, gl::BYTE)), ClientFormatAny::ClientFormat(ClientFormat::U16) => Ok((gl::RED_INTEGER, gl::UNSIGNED_SHORT)), ClientFormatAny::ClientFormat(ClientFormat::I16) => Ok((gl::RED_INTEGER, gl::SHORT)), ClientFormatAny::ClientFormat(ClientFormat::U32) => Ok((gl::RED_INTEGER, gl::UNSIGNED_INT)), ClientFormatAny::ClientFormat(ClientFormat::I32) => Ok((gl::RED_INTEGER, gl::INT)), ClientFormatAny::ClientFormat(ClientFormat::F16) => Ok((gl::RED_INTEGER, gl::HALF_FLOAT)), ClientFormatAny::ClientFormat(ClientFormat::F32) => Ok((gl::RED_INTEGER, gl::FLOAT)), _ => panic!("Can't upload to a stencil texture with more than one channel") } } TextureFormatRequest::AnyDepthStencil | TextureFormatRequest::Specific(TextureFormat::DepthStencilFormat(_)) => { unimplemented!(); }, }; if inverted { value.and_then(|(format, ty)| { let format = match format { gl::RGB => gl::BGR, gl::RGBA => gl::BGRA, f => return Err(FormatNotSupportedError) }; Ok((format, ty)) }) } else { value } }
43.215574
138
0.558857
1ab9d0e4458593da02802be7c806bcb45b0066c4
2,437
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use glib::object::IsA; use glib::translate::*; use glib::GString; use gtk_sys; use std::fmt; use TextBuffer; glib_wrapper! { pub struct TextMark(Object<gtk_sys::GtkTextMark, gtk_sys::GtkTextMarkClass, TextMarkClass>); match fn { get_type => || gtk_sys::gtk_text_mark_get_type(), } } impl TextMark { pub fn new(name: Option<&str>, left_gravity: bool) -> TextMark { assert_initialized_main_thread!(); unsafe { from_glib_full(gtk_sys::gtk_text_mark_new( name.to_glib_none().0, left_gravity.to_glib(), )) } } } pub const NONE_TEXT_MARK: Option<&TextMark> = None; pub trait TextMarkExt: 'static { fn get_buffer(&self) -> Option<TextBuffer>; fn get_deleted(&self) -> bool; fn get_left_gravity(&self) -> bool; fn get_name(&self) -> Option<GString>; fn get_visible(&self) -> bool; fn set_visible(&self, setting: bool); } impl<O: IsA<TextMark>> TextMarkExt for O { fn get_buffer(&self) -> Option<TextBuffer> { unsafe { from_glib_none(gtk_sys::gtk_text_mark_get_buffer( self.as_ref().to_glib_none().0, )) } } fn get_deleted(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_text_mark_get_deleted( self.as_ref().to_glib_none().0, )) } } fn get_left_gravity(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_text_mark_get_left_gravity( self.as_ref().to_glib_none().0, )) } } fn get_name(&self) -> Option<GString> { unsafe { from_glib_none(gtk_sys::gtk_text_mark_get_name( self.as_ref().to_glib_none().0, )) } } fn get_visible(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_text_mark_get_visible( self.as_ref().to_glib_none().0, )) } } fn set_visible(&self, setting: bool) { unsafe { gtk_sys::gtk_text_mark_set_visible(self.as_ref().to_glib_none().0, setting.to_glib()); } } } impl fmt::Display for TextMark { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "TextMark") } }
24.128713
98
0.569553
9cd8e981461d8decc1ff9a5b69a5e1f30f44c1b3
13,239
//! Types which represent a SQL data type. //! //! The structs in this module are *only* used as markers to represent a SQL type. //! They should never be used in your structs. //! If you'd like to know the rust types which can be used for a given SQL type, //! see the documentation for that SQL type. //! Additional types may be provided by other crates. //! //! To see which SQL type can be used with a given Rust type, //! see the "Implementors" section of [`FromSql`]. //! //! [`FromSql`]: ../deserialize/trait.FromSql.html //! //! Any backend specific types are re-exported through this module mod fold; pub mod ops; mod ord; pub use self::fold::Foldable; pub use self::ord::SqlOrd; use crate::query_builder::QueryId; /// The boolean SQL type. /// /// On backends without a native boolean type, /// this is emulated with the smallest supported integer. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`bool`][bool] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`bool`][bool] /// /// [bool]: https://doc.rust-lang.org/nightly/std/primitive.bool.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "16", array_oid = "1000")] #[sqlite_type = "Integer"] #[mysql_type = "Tiny"] pub struct Bool; /// The tiny integer SQL type. /// /// This is only available on MySQL. /// Keep in mind that `infer_schema!` will see `TINYINT(1)` as `Bool`, /// not `TinyInt`. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`i8`][i8] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`i8`][i8] /// /// [i8]: https://doc.rust-lang.org/nightly/std/primitive.i8.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[mysql_type = "Tiny"] pub struct TinyInt; #[doc(hidden)] pub type Tinyint = TinyInt; /// The small integer SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`i16`][i16] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`i16`][i16] /// /// [i16]: https://doc.rust-lang.org/nightly/std/primitive.i16.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "21", array_oid = "1005")] #[sqlite_type = "SmallInt"] #[mysql_type = "Short"] pub struct SmallInt; #[doc(hidden)] pub type Int2 = SmallInt; #[doc(hidden)] pub type Smallint = SmallInt; /// The integer SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`i32`][i32] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`i32`][i32] /// /// [i32]: https://doc.rust-lang.org/nightly/std/primitive.i32.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "23", array_oid = "1007")] #[sqlite_type = "Integer"] #[mysql_type = "Long"] pub struct Integer; #[doc(hidden)] pub type Int4 = Integer; /// The big integer SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`i64`][i64] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`i64`][i64] /// /// [i64]: https://doc.rust-lang.org/nightly/std/primitive.i64.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "20", array_oid = "1016")] #[sqlite_type = "Long"] #[mysql_type = "LongLong"] pub struct BigInt; #[doc(hidden)] pub type Int8 = BigInt; #[doc(hidden)] pub type Bigint = BigInt; /// The float SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`f32`][f32] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`f32`][f32] /// /// [f32]: https://doc.rust-lang.org/nightly/std/primitive.f32.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "700", array_oid = "1021")] #[sqlite_type = "Float"] #[mysql_type = "Float"] pub struct Float; #[doc(hidden)] pub type Float4 = Float; /// The double precision float SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`f64`][f64] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`f64`][f64] /// /// [f64]: https://doc.rust-lang.org/nightly/std/primitive.f64.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "701", array_oid = "1022")] #[sqlite_type = "Double"] #[mysql_type = "Double"] pub struct Double; #[doc(hidden)] pub type Float8 = Double; /// The arbitrary precision numeric SQL type. /// /// This type is only supported on PostgreSQL and MySQL. /// On SQLite, [`Double`](struct.Double.html) should be used instead. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`bigdecimal::BigDecimal`] with `feature = ["numeric"]` /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`bigdecimal::BigDecimal`] with `feature = ["numeric"]` /// /// [`bigdecimal::BigDecimal`]: /bigdecimal/struct.BigDecimal.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1700", array_oid = "1231")] #[mysql_type = "String"] #[sqlite_type = "Double"] pub struct Numeric; /// Alias for `Numeric` pub type Decimal = Numeric; /// The text SQL type. /// /// On all backends strings must be valid UTF-8. /// On PostgreSQL strings must not include nul bytes. /// /// Schema inference will treat all variants of `TEXT` as this type (e.g. /// `VARCHAR`, `MEDIUMTEXT`, etc). /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`String`][String] /// - [`&str`][str] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`String`][String] /// /// [String]: https://doc.rust-lang.org/nightly/std/string/struct.String.html /// [str]: https://doc.rust-lang.org/nightly/std/primitive.str.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "25", array_oid = "1009")] #[sqlite_type = "Text"] #[mysql_type = "String"] pub struct Text; /// The SQL `VARCHAR` type /// /// This type is generally interchangeable with `TEXT`, so Diesel has this as an /// alias rather than a separate type (Diesel does not currently support /// implicit coercions). /// /// One notable exception to this is with arrays on PG. `TEXT[]` cannot be /// coerced to `VARCHAR[]`. It is recommended that you always use `TEXT[]` if /// you need a string array on PG. pub type VarChar = Text; #[doc(hidden)] pub type Varchar = VarChar; #[doc(hidden)] pub type Char = Text; #[doc(hidden)] pub type Tinytext = Text; #[doc(hidden)] pub type Mediumtext = Text; #[doc(hidden)] pub type Longtext = Text; /// The binary SQL type. /// /// Schema inference will treat all variants of `BLOB` as this type (e.g. /// `VARBINARY`, `MEDIUMBLOB`, etc). /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`Vec<u8>`][Vec] /// - [`&[u8]`][slice] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`Vec<u8>`][Vec] /// /// [Vec]: https://doc.rust-lang.org/nightly/std/vec/struct.Vec.html /// [slice]: https://doc.rust-lang.org/nightly/std/primitive.slice.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "17", array_oid = "1001")] #[sqlite_type = "Binary"] #[mysql_type = "Blob"] pub struct Binary; #[doc(hidden)] pub type Tinyblob = Binary; #[doc(hidden)] pub type Blob = Binary; #[doc(hidden)] pub type Mediumblob = Binary; #[doc(hidden)] pub type Longblob = Binary; #[doc(hidden)] pub type Varbinary = Binary; #[doc(hidden)] pub type Bit = Binary; /// The date SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`chrono::NaiveDate`][NaiveDate] with `feature = "chrono"` /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`chrono::NaiveDate`][NaiveDate] with `feature = "chrono"` /// /// [NaiveDate]: /chrono/naive/date/struct.NaiveDate.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1082", array_oid = "1182")] #[sqlite_type = "Text"] #[mysql_type = "Date"] pub struct Date; /// The interval SQL type. /// /// This type is currently only implemented for PostgreSQL. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`PgInterval`] which can be constructed using [`IntervalDsl`] /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`PgInterval`] which can be constructed using [`IntervalDsl`] /// /// [`PgInterval`]: ../pg/data_types/struct.PgInterval.html /// [`IntervalDsl`]: ../pg/expression/extensions/trait.IntervalDsl.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1186", array_oid = "1187")] pub struct Interval; /// The time SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`chrono::NaiveTime`][NaiveTime] with `feature = "chrono"` /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`chrono::NaiveTime`][NaiveTime] with `feature = "chrono"` /// /// [NaiveTime]: /chrono/naive/time/struct.NaiveTime.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1083", array_oid = "1183")] #[sqlite_type = "Text"] #[mysql_type = "Time"] pub struct Time; /// The timestamp SQL type. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - [`std::time::SystemTime`][SystemTime] (PG only) /// - [`chrono::NaiveDateTime`][NaiveDateTime] with `feature = "chrono"` /// - [`time::Timespec`][Timespec] with `feature = "deprecated-time"` (PG only) /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - [`std::time::SystemTime`][SystemTime] (PG only) /// - [`chrono::NaiveDateTime`][NaiveDateTime] with `feature = "chrono"` /// - [`time::Timespec`][Timespec] with `feature = "deprecated-time"` (PG only) /// /// [SystemTime]: https://doc.rust-lang.org/nightly/std/time/struct.SystemTime.html /// [NaiveDateTime]: /chrono/naive/datetime/struct.NaiveDateTime.html /// [Timespec]: /time/struct.Timespec.html #[derive(Debug, Clone, Copy, Default, QueryId, SqlType)] #[postgres(oid = "1114", array_oid = "1115")] #[sqlite_type = "Text"] #[mysql_type = "Timestamp"] pub struct Timestamp; /// The nullable SQL type. /// /// This wraps another SQL type to indicate that it can be null. /// By default all values are assumed to be `NOT NULL`. /// /// ### [`ToSql`](../serialize/trait.ToSql.html) impls /// /// - Any `T` which implements `ToSql<ST>` /// - `Option<T>` for any `T` which implements `ToSql<ST>` /// /// ### [`FromSql`](../deserialize/trait.FromSql.html) impls /// /// - `Option<T>` for any `T` which implements `FromSql<ST>` #[derive(Debug, Clone, Copy, Default)] pub struct Nullable<ST: NotNull>(ST); #[cfg(feature = "postgres")] pub use crate::pg::types::sql_types::*; #[cfg(feature = "mysql")] pub use crate::mysql::types::*; /// Indicates that a SQL type exists for a backend. /// /// This trait can be derived using the [`SqlType` derive](derive.SqlType.html) /// /// # Example /// /// ```rust /// #[derive(diesel::sql_types::SqlType)] /// #[postgres(oid = "23", array_oid = "1007")] /// #[sqlite_type = "Integer"] /// #[mysql_type = "Long"] /// pub struct Integer; /// ``` pub trait HasSqlType<ST>: TypeMetadata { /// Fetch the metadata for the given type /// /// This method may use `lookup` to do dynamic runtime lookup. Implementors /// of this method should not do dynamic lookup unless absolutely necessary fn metadata(lookup: &Self::MetadataLookup) -> Self::TypeMetadata; #[doc(hidden)] #[cfg(feature = "mysql")] fn mysql_row_metadata(out: &mut Vec<Self::TypeMetadata>, lookup: &Self::MetadataLookup) { out.push(Self::metadata(lookup)) } } /// Information about how a backend stores metadata about given SQL types pub trait TypeMetadata { /// The actual type used to represent metadata. /// /// On PostgreSQL, this is the type's OID. /// On MySQL and SQLite, this is an enum representing all storage classes /// they support. type TypeMetadata; /// The type used for runtime lookup of metadata. /// /// For most backends, which don't support user defined types, this will /// be `()`. type MetadataLookup; } /// A marker trait indicating that a SQL type is not null. /// /// All SQL types must implement this trait. /// /// # Deriving /// /// This trait is automatically implemented by `#[derive(SqlType)]` pub trait NotNull {} /// Converts a type which may or may not be nullable into its nullable /// representation. pub trait IntoNullable { /// The nullable representation of this type. /// /// For all types except `Nullable`, this will be `Nullable<Self>`. type Nullable; } impl<T: NotNull> IntoNullable for T { type Nullable = Nullable<T>; } impl<T: NotNull> IntoNullable for Nullable<T> { type Nullable = Nullable<T>; } /// A marker trait indicating that a SQL type represents a single value, as /// opposed to a list of values. /// /// This trait should generally be implemented for all SQL types with the /// exception of Rust tuples. If a column could have this as its type, this /// trait should be implemented. /// /// # Deriving /// /// This trait is automatically implemented by `#[derive(SqlType)]` pub trait SingleValue {} impl<T: NotNull + SingleValue> SingleValue for Nullable<T> {} #[doc(inline)] pub use diesel_derives::DieselNumericOps; #[doc(inline)] pub use diesel_derives::SqlType;
29.289823
93
0.646197
67e3f83de540b20d663d2dac81066889ec829cfa
10,587
//! SkStream and relatives. //! This implementation covers the minimal subset to interface with Rust streams. //! //! Bindings that wrap functions that use Skia stream types, _must_ use Rust streams instead. use crate::prelude::*; use crate::Data; use skia_bindings as sb; use skia_bindings::{SkDynamicMemoryWStream, SkMemoryStream, SkStream, SkStreamAsset, SkWStream}; use std::{ffi, fmt, io, marker::PhantomData, ptr}; /// Trait representing an Skia allocated Stream type with a base class of SkStream. #[repr(transparent)] pub struct Stream<N: NativeStreamBase>(ptr::NonNull<N>); unsafe impl<N: NativeStreamBase> Send for Stream<N> {} pub trait NativeStreamBase { fn as_stream_mut(&mut self) -> &mut SkStream; } impl<T: NativeStreamBase> Drop for Stream<T> { fn drop(&mut self) { unsafe { sb::C_SkStream_delete(self.0.as_ptr() as *mut _); } } } impl<N: NativeStreamBase> Stream<N> { pub fn from_ptr(ptr: *mut N) -> Option<Stream<N>> { ptr::NonNull::new(ptr).map(Stream) } } pub type StreamAsset = Stream<SkStreamAsset>; impl NativeBase<SkStream> for SkStreamAsset {} impl NativeStreamBase for SkStreamAsset { fn as_stream_mut(&mut self) -> &mut SkStream { self.base_mut() } } impl NativeAccess<SkStreamAsset> for StreamAsset { fn native(&self) -> &SkStreamAsset { unsafe { self.0.as_ref() } } fn native_mut(&mut self) -> &mut SkStreamAsset { unsafe { self.0.as_mut() } } } impl fmt::Debug for StreamAsset { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("StreamAsset").finish() } } #[repr(C)] pub struct MemoryStream<'a> { native: ptr::NonNull<SkMemoryStream>, pd: PhantomData<&'a ()>, } unsafe impl Send for MemoryStream<'_> {} impl NativeBase<SkStream> for SkMemoryStream {} impl NativeStreamBase for SkMemoryStream { fn as_stream_mut(&mut self) -> &mut SkStream { self.base_mut() } } impl NativeAccess<SkMemoryStream> for MemoryStream<'_> { fn native(&self) -> &SkMemoryStream { unsafe { self.native.as_ref() } } fn native_mut(&mut self) -> &mut SkMemoryStream { unsafe { self.native.as_mut() } } } impl fmt::Debug for MemoryStream<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("MemoryStream") .field("offset", &self.native().fOffset) .finish() } } impl MemoryStream<'_> { // Create a stream asset that refers the bytes provided. pub fn from_bytes(bytes: &[u8]) -> MemoryStream { let ptr = unsafe { sb::C_SkMemoryStream_MakeDirect(bytes.as_ptr() as _, bytes.len()) }; MemoryStream { native: ptr::NonNull::new(ptr).unwrap(), pd: PhantomData, } } } pub type DynamicMemoryWStream = Handle<SkDynamicMemoryWStream>; impl NativeBase<SkWStream> for SkDynamicMemoryWStream {} impl NativeDrop for SkDynamicMemoryWStream { fn drop(&mut self) { unsafe { sb::C_SkWStream_destruct(self.base_mut()); } } } impl fmt::Debug for DynamicMemoryWStream { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DynamicMemoryWStream") .field( "bytes_written_before_tail", &self.native().fBytesWrittenBeforeTail, ) .finish() } } impl DynamicMemoryWStream { pub fn new() -> Self { Self::construct(|w_stream| unsafe { sb::C_SkDynamicMemoryWStream_Construct(w_stream) }) } pub fn from_bytes(bytes: &[u8]) -> Self { let mut stream = Self::new(); stream.write(bytes); stream } pub fn write(&mut self, bytes: &[u8]) -> bool { unsafe { sb::C_SkWStream_write( self.native_mut().base_mut(), bytes.as_ptr() as _, bytes.len(), ) } } pub fn detach_as_data(&mut self) -> Data { Data::from_ptr(unsafe { sb::C_SkDynamicMemoryWStream_detachAsData(self.native_mut()) }) .unwrap() } pub fn detach_as_stream(&mut self) -> StreamAsset { StreamAsset::from_ptr(unsafe { sb::C_SkDynamicMemoryWStream_detachAsStream(self.native_mut()) }) .unwrap() } } #[test] fn detaching_empty_dynamic_memory_w_stream_leads_to_non_null_data() { let mut stream = DynamicMemoryWStream::new(); let data = stream.detach_as_data(); assert_eq!(0, data.size()) } #[test] fn memory_stream_from_bytes() { let stream = MemoryStream::from_bytes(&[1, 2, 3]); drop(stream); } pub struct RustStream<'a> { inner: Handle<sb::RustStream>, _phantom: PhantomData<&'a mut ()>, } impl RustStream<'_> { pub fn stream_mut(&mut self) -> &mut SkStream { self.inner.native_mut().base_mut() } } impl NativeBase<SkStream> for sb::RustStream {} impl NativeDrop for sb::RustStream { fn drop(&mut self) {} } impl<'a> RustStream<'a> { pub fn new<T: io::Read>(val: &'a mut T) -> Self { unsafe extern "C" fn read_trampoline<T>( val: *mut ffi::c_void, buf: *mut ffi::c_void, count: usize, ) -> usize where T: io::Read, { let val: &mut T = &mut *(val as *mut _); if buf.is_null() { const BUF_SIZE: usize = 128; let mut buf = [0; BUF_SIZE]; let mut out_bytes = 0; let mut count = count; // This is OK because we just abort if it panics anyway, we don't try // to continue at all. let val = std::panic::AssertUnwindSafe(val); let out_bytes = match std::panic::catch_unwind(move || { while count > 0 { let bytes = match val.0.read(&mut buf[..count.min(BUF_SIZE)]) { Ok(0) => break, Ok(bytes) => bytes, Err(_) => 0, }; count -= bytes; out_bytes += bytes; } out_bytes }) { Ok(res) => res, Err(_) => { println!("Panic in FFI callback for `SkStream::read`"); std::process::abort(); } }; out_bytes } else { let buf: &mut [u8] = std::slice::from_raw_parts_mut(buf as _, count as _); match val.read(buf) { Ok(bytes) => bytes, Err(_) => 0, } } } let (length, seek_start, seek_current): ( usize, Option<unsafe extern "C" fn(_, _) -> _>, Option<unsafe extern "C" fn(_, _) -> _>, ); #[cfg(feature = "nightly")] { trait MaybeSeek { fn maybe_seek(&mut self, from: io::SeekFrom) -> Option<u64>; } impl<T> MaybeSeek for T { default fn maybe_seek(&mut self, _: io::SeekFrom) -> Option<u64> { None } } impl<T> MaybeSeek for T where T: io::Seek, { fn maybe_seek(&mut self, from: io::SeekFrom) -> Option<u64> { self.seek(from).ok() } } unsafe extern "C" fn seek_start_trampoline<T: MaybeSeek>( val: *mut ffi::c_void, pos: usize, ) -> bool { let val: &mut T = &mut *(val as *mut _); // This is OK because we just abort if it panics anyway, we don't try // to continue at all. let val = std::panic::AssertUnwindSafe(val); match std::panic::catch_unwind(move || { val.0.maybe_seek(io::SeekFrom::Start(pos as _)).is_some() }) { Ok(res) => res, Err(_) => { println!("Panic in FFI callback for `SkStream::seek`"); std::process::abort(); } } } unsafe extern "C" fn seek_current_trampoline<T: MaybeSeek>( val: *mut ffi::c_void, offset: libc::c_long, ) -> bool { let val: &mut T = &mut *(val as *mut _); // This is OK because we just abort if it panics anyway, we don't try // to continue at all. let val = std::panic::AssertUnwindSafe(val); match std::panic::catch_unwind(move || { val.0 .maybe_seek(io::SeekFrom::Current(offset as _)) .is_some() }) { Ok(res) => res, Err(_) => { println!("Panic in FFI callback for `SkStream::move`"); std::process::abort(); } } } length = if let Some(cur) = val.maybe_seek(io::SeekFrom::Current(0)) { let length = val.maybe_seek(io::SeekFrom::End(0)).unwrap(); val.maybe_seek(io::SeekFrom::Start(cur)); length as usize } else { std::usize::MAX }; seek_start = Some(seek_start_trampoline::<T>); seek_current = Some(seek_current_trampoline::<T>); } #[cfg(not(feature = "nightly"))] { length = usize::MAX; seek_start = None; seek_current = None; } RustStream { inner: Handle::construct(|ptr| unsafe { sb::C_RustStream_construct( ptr, val as *mut T as *mut ffi::c_void, length, Some(read_trampoline::<T>), seek_start, seek_current, ); }), _phantom: PhantomData, } } }
30.076705
96
0.492207
235e269ec82416cf97e7db163aa504d0abfb1051
6,251
/// BaseAccount defines a base account type. It contains all the necessary fields /// for basic account functionality. Any custom account type should extend this /// type for additional functionality (e.g. vesting). #[derive(Clone, PartialEq, ::prost::Message)] pub struct BaseAccount { #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, #[prost(message, optional, tag = "2")] pub pub_key: ::core::option::Option<::prost_types::Any>, #[prost(uint64, tag = "3")] pub account_number: u64, #[prost(uint64, tag = "4")] pub sequence: u64, } /// ModuleAccount defines an account for modules that holds coins on a pool. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ModuleAccount { #[prost(message, optional, tag = "1")] pub base_account: ::core::option::Option<BaseAccount>, #[prost(string, tag = "2")] pub name: ::prost::alloc::string::String, #[prost(string, repeated, tag = "3")] pub permissions: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// Params defines the parameters for the auth module. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Params { #[prost(uint64, tag = "1")] pub max_memo_characters: u64, #[prost(uint64, tag = "2")] pub tx_sig_limit: u64, #[prost(uint64, tag = "3")] pub tx_size_cost_per_byte: u64, #[prost(uint64, tag = "4")] pub sig_verify_cost_ed25519: u64, #[prost(uint64, tag = "5")] pub sig_verify_cost_secp256k1: u64, } /// GenesisState defines the auth module's genesis state. #[derive(Clone, PartialEq, ::prost::Message)] pub struct GenesisState { /// params defines all the paramaters of the module. #[prost(message, optional, tag = "1")] pub params: ::core::option::Option<Params>, /// accounts are the accounts present at genesis. #[prost(message, repeated, tag = "2")] pub accounts: ::prost::alloc::vec::Vec<::prost_types::Any>, } /// QueryAccountRequest is the request type for the Query/Account RPC method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct QueryAccountRequest { /// address defines the address to query for. #[prost(string, tag = "1")] pub address: ::prost::alloc::string::String, } /// QueryAccountResponse is the response type for the Query/Account RPC method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct QueryAccountResponse { /// account defines the account of the corresponding address. #[prost(message, optional, tag = "1")] pub account: ::core::option::Option<::prost_types::Any>, } /// QueryParamsRequest is the request type for the Query/Params RPC method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct QueryParamsRequest {} /// QueryParamsResponse is the response type for the Query/Params RPC method. #[derive(Clone, PartialEq, ::prost::Message)] pub struct QueryParamsResponse { /// params defines the parameters of the module. #[prost(message, optional, tag = "1")] pub params: ::core::option::Option<Params>, } #[doc = r" Generated client implementations."] pub mod query_client { #![allow(unused_variables, dead_code, missing_docs)] use tonic::codegen::*; #[doc = " Query defines the gRPC querier service."] pub struct QueryClient<T> { inner: tonic::client::Grpc<T>, } impl QueryClient<tonic::transport::Channel> { #[doc = r" Attempt to create a new client by connecting to a given endpoint."] pub async fn connect<D>(dst: D) -> Result<Self, tonic::transport::Error> where D: std::convert::TryInto<tonic::transport::Endpoint>, D::Error: Into<StdError>, { let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; Ok(Self::new(conn)) } } impl<T> QueryClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::ResponseBody: Body + HttpBody + Send + 'static, T::Error: Into<StdError>, <T::ResponseBody as HttpBody>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor(inner: T, interceptor: impl Into<tonic::Interceptor>) -> Self { let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); Self { inner } } #[doc = " Account returns account details based on address."] pub async fn account( &mut self, request: impl tonic::IntoRequest<super::QueryAccountRequest>, ) -> Result<tonic::Response<super::QueryAccountResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/cosmos.auth.v1beta1.Query/Account"); self.inner.unary(request.into_request(), path, codec).await } #[doc = " Params queries all parameters."] pub async fn params( &mut self, request: impl tonic::IntoRequest<super::QueryParamsRequest>, ) -> Result<tonic::Response<super::QueryParamsResponse>, tonic::Status> { self.inner.ready().await.map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/cosmos.auth.v1beta1.Query/Params"); self.inner.unary(request.into_request(), path, codec).await } } impl<T: Clone> Clone for QueryClient<T> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), } } } impl<T> std::fmt::Debug for QueryClient<T> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "QueryClient {{ ... }}") } } }
41.397351
98
0.609982
16f3d309e9088052af31fdf76fb0f5c2f0fd6d56
117,559
/* automatically generated by rust-bindgen */ pub type va_list = __builtin_va_list; extern "C" { pub fn sqlite3_libversion() -> *const libc::c_char; } extern "C" { pub fn sqlite3_sourceid() -> *const libc::c_char; } extern "C" { pub fn sqlite3_libversion_number() -> libc::c_int; } extern "C" { pub fn sqlite3_compileoption_used(zOptName: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_compileoption_get(N: libc::c_int) -> *const libc::c_char; } extern "C" { pub fn sqlite3_threadsafe() -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3 { _unused: [u8; 0], } pub type sqlite_int64 = libc::c_longlong; pub type sqlite_uint64 = libc::c_ulonglong; pub type sqlite3_int64 = sqlite_int64; pub type sqlite3_uint64 = sqlite_uint64; extern "C" { pub fn sqlite3_close(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_close_v2(arg1: *mut sqlite3) -> libc::c_int; } pub type sqlite3_callback = ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *mut *mut libc::c_char, arg4: *mut *mut libc::c_char, ) -> libc::c_int, >; extern "C" { pub fn sqlite3_exec( arg1: *mut sqlite3, sql: *const libc::c_char, callback: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *mut *mut libc::c_char, arg4: *mut *mut libc::c_char, ) -> libc::c_int, >, arg2: *mut libc::c_void, errmsg: *mut *mut libc::c_char, ) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_file { pub pMethods: *const sqlite3_io_methods, } #[test] fn bindgen_test_layout_sqlite3_file() { assert_eq!( ::core::mem::size_of::<sqlite3_file>(), 8usize, concat!("Size of: ", stringify!(sqlite3_file)) ); assert_eq!( ::core::mem::align_of::<sqlite3_file>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_file)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_file>())).pMethods as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_file), "::", stringify!(pMethods) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_io_methods { pub iVersion: libc::c_int, pub xClose: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_file) -> libc::c_int>, pub xRead: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, arg2: *mut libc::c_void, iAmt: libc::c_int, iOfst: sqlite3_int64, ) -> libc::c_int, >, pub xWrite: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, arg2: *const libc::c_void, iAmt: libc::c_int, iOfst: sqlite3_int64, ) -> libc::c_int, >, pub xTruncate: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, size: sqlite3_int64) -> libc::c_int, >, pub xSync: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, flags: libc::c_int) -> libc::c_int, >, pub xFileSize: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, pSize: *mut sqlite3_int64) -> libc::c_int, >, pub xLock: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, arg2: libc::c_int) -> libc::c_int, >, pub xUnlock: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, arg2: libc::c_int) -> libc::c_int, >, pub xCheckReservedLock: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, pResOut: *mut libc::c_int) -> libc::c_int, >, pub xFileControl: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, op: libc::c_int, pArg: *mut libc::c_void, ) -> libc::c_int, >, pub xSectorSize: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_file) -> libc::c_int>, pub xDeviceCharacteristics: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_file) -> libc::c_int>, pub xShmMap: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, iPg: libc::c_int, pgsz: libc::c_int, arg2: libc::c_int, arg3: *mut *mut libc::c_void, ) -> libc::c_int, >, pub xShmLock: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, offset: libc::c_int, n: libc::c_int, flags: libc::c_int, ) -> libc::c_int, >, pub xShmBarrier: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_file)>, pub xShmUnmap: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_file, deleteFlag: libc::c_int) -> libc::c_int, >, pub xFetch: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, iOfst: sqlite3_int64, iAmt: libc::c_int, pp: *mut *mut libc::c_void, ) -> libc::c_int, >, pub xUnfetch: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_file, iOfst: sqlite3_int64, p: *mut libc::c_void, ) -> libc::c_int, >, } #[test] fn bindgen_test_layout_sqlite3_io_methods() { assert_eq!( ::core::mem::size_of::<sqlite3_io_methods>(), 152usize, concat!("Size of: ", stringify!(sqlite3_io_methods)) ); assert_eq!( ::core::mem::align_of::<sqlite3_io_methods>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_io_methods)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).iVersion as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(iVersion) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xClose as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xClose) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xRead as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xRead) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xWrite as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xWrite) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xTruncate as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xTruncate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xSync as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xSync) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xFileSize as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xFileSize) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xLock as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xLock) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xUnlock as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xUnlock) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xCheckReservedLock as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xCheckReservedLock) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xFileControl as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xFileControl) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xSectorSize as *const _ as usize }, 88usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xSectorSize) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xDeviceCharacteristics as *const _ as usize }, 96usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xDeviceCharacteristics) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xShmMap as *const _ as usize }, 104usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xShmMap) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xShmLock as *const _ as usize }, 112usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xShmLock) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xShmBarrier as *const _ as usize }, 120usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xShmBarrier) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xShmUnmap as *const _ as usize }, 128usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xShmUnmap) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xFetch as *const _ as usize }, 136usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xFetch) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_io_methods>())).xUnfetch as *const _ as usize }, 144usize, concat!( "Offset of field: ", stringify!(sqlite3_io_methods), "::", stringify!(xUnfetch) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_mutex { _unused: [u8; 0], } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_api_routines { _unused: [u8; 0], } pub type sqlite3_syscall_ptr = ::core::option::Option<unsafe extern "C" fn()>; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_vfs { pub iVersion: libc::c_int, pub szOsFile: libc::c_int, pub mxPathname: libc::c_int, pub pNext: *mut sqlite3_vfs, pub zName: *const libc::c_char, pub pAppData: *mut libc::c_void, pub xOpen: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, arg2: *mut sqlite3_file, flags: libc::c_int, pOutFlags: *mut libc::c_int, ) -> libc::c_int, >, pub xDelete: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, syncDir: libc::c_int, ) -> libc::c_int, >, pub xAccess: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, flags: libc::c_int, pResOut: *mut libc::c_int, ) -> libc::c_int, >, pub xFullPathname: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, nOut: libc::c_int, zOut: *mut libc::c_char, ) -> libc::c_int, >, pub xDlOpen: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zFilename: *const libc::c_char, ) -> *mut libc::c_void, >, pub xDlError: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, nByte: libc::c_int, zErrMsg: *mut libc::c_char, ), >, pub xDlSym: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: *mut libc::c_void, zSymbol: *const libc::c_char, ) -> ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: *mut libc::c_void, zSymbol: *const libc::c_char, ), >, >, pub xDlClose: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut libc::c_void), >, pub xRandomness: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, nByte: libc::c_int, zOut: *mut libc::c_char, ) -> libc::c_int, >, pub xSleep: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_vfs, microseconds: libc::c_int) -> libc::c_int, >, pub xCurrentTime: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut f64) -> libc::c_int, >, pub xGetLastError: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, arg2: libc::c_int, arg3: *mut libc::c_char, ) -> libc::c_int, >, pub xCurrentTimeInt64: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_vfs, arg2: *mut sqlite3_int64) -> libc::c_int, >, pub xSetSystemCall: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, arg2: sqlite3_syscall_ptr, ) -> libc::c_int, >, pub xGetSystemCall: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, ) -> sqlite3_syscall_ptr, >, pub xNextSystemCall: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vfs, zName: *const libc::c_char, ) -> *const libc::c_char, >, } #[test] fn bindgen_test_layout_sqlite3_vfs() { assert_eq!( ::core::mem::size_of::<sqlite3_vfs>(), 168usize, concat!("Size of: ", stringify!(sqlite3_vfs)) ); assert_eq!( ::core::mem::align_of::<sqlite3_vfs>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_vfs)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).iVersion as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(iVersion) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).szOsFile as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(szOsFile) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).mxPathname as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(mxPathname) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).pNext as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(pNext) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).zName as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(zName) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).pAppData as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(pAppData) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xOpen as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xOpen) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xDelete as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xDelete) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xAccess as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xAccess) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xFullPathname as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xFullPathname) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xDlOpen as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xDlOpen) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xDlError as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xDlError) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xDlSym as *const _ as usize }, 88usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xDlSym) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xDlClose as *const _ as usize }, 96usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xDlClose) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xRandomness as *const _ as usize }, 104usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xRandomness) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xSleep as *const _ as usize }, 112usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xSleep) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xCurrentTime as *const _ as usize }, 120usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xCurrentTime) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xGetLastError as *const _ as usize }, 128usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xGetLastError) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xCurrentTimeInt64 as *const _ as usize }, 136usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xCurrentTimeInt64) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xSetSystemCall as *const _ as usize }, 144usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xSetSystemCall) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xGetSystemCall as *const _ as usize }, 152usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xGetSystemCall) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vfs>())).xNextSystemCall as *const _ as usize }, 160usize, concat!( "Offset of field: ", stringify!(sqlite3_vfs), "::", stringify!(xNextSystemCall) ) ); } extern "C" { pub fn sqlite3_initialize() -> libc::c_int; } extern "C" { pub fn sqlite3_shutdown() -> libc::c_int; } extern "C" { pub fn sqlite3_os_init() -> libc::c_int; } extern "C" { pub fn sqlite3_os_end() -> libc::c_int; } extern "C" { pub fn sqlite3_config(arg1: libc::c_int, ...) -> libc::c_int; } extern "C" { pub fn sqlite3_db_config(arg1: *mut sqlite3, op: libc::c_int, ...) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_mem_methods { pub xMalloc: ::core::option::Option<unsafe extern "C" fn(arg1: libc::c_int) -> *mut libc::c_void>, pub xFree: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, pub xRealloc: ::core::option::Option< unsafe extern "C" fn(arg1: *mut libc::c_void, arg2: libc::c_int) -> *mut libc::c_void, >, pub xSize: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void) -> libc::c_int>, pub xRoundup: ::core::option::Option<unsafe extern "C" fn(arg1: libc::c_int) -> libc::c_int>, pub xInit: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void) -> libc::c_int>, pub xShutdown: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, pub pAppData: *mut libc::c_void, } #[test] fn bindgen_test_layout_sqlite3_mem_methods() { assert_eq!( ::core::mem::size_of::<sqlite3_mem_methods>(), 64usize, concat!("Size of: ", stringify!(sqlite3_mem_methods)) ); assert_eq!( ::core::mem::align_of::<sqlite3_mem_methods>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_mem_methods)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xMalloc as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xMalloc) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xFree as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xFree) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xRealloc as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xRealloc) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xSize as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xSize) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xRoundup as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xRoundup) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xInit as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xInit) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).xShutdown as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(xShutdown) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mem_methods>())).pAppData as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_mem_methods), "::", stringify!(pAppData) ) ); } extern "C" { pub fn sqlite3_extended_result_codes(arg1: *mut sqlite3, onoff: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_last_insert_rowid(arg1: *mut sqlite3) -> sqlite3_int64; } extern "C" { pub fn sqlite3_set_last_insert_rowid(arg1: *mut sqlite3, arg2: sqlite3_int64); } extern "C" { pub fn sqlite3_changes(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_total_changes(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_interrupt(arg1: *mut sqlite3); } extern "C" { pub fn sqlite3_complete(sql: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_complete16(sql: *const libc::c_void) -> libc::c_int; } extern "C" { pub fn sqlite3_busy_handler( arg1: *mut sqlite3, arg2: ::core::option::Option< unsafe extern "C" fn(arg1: *mut libc::c_void, arg2: libc::c_int) -> libc::c_int, >, arg3: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_busy_timeout(arg1: *mut sqlite3, ms: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_get_table( db: *mut sqlite3, zSql: *const libc::c_char, pazResult: *mut *mut *mut libc::c_char, pnRow: *mut libc::c_int, pnColumn: *mut libc::c_int, pzErrmsg: *mut *mut libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_free_table(result: *mut *mut libc::c_char); } extern "C" { pub fn sqlite3_mprintf(arg1: *const libc::c_char, ...) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_vmprintf(arg1: *const libc::c_char, arg2: va_list) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_snprintf( arg1: libc::c_int, arg2: *mut libc::c_char, arg3: *const libc::c_char, ... ) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_vsnprintf( arg1: libc::c_int, arg2: *mut libc::c_char, arg3: *const libc::c_char, arg4: va_list, ) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_malloc(arg1: libc::c_int) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_malloc64(arg1: sqlite3_uint64) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_realloc(arg1: *mut libc::c_void, arg2: libc::c_int) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_realloc64(arg1: *mut libc::c_void, arg2: sqlite3_uint64) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_free(arg1: *mut libc::c_void); } extern "C" { pub fn sqlite3_msize(arg1: *mut libc::c_void) -> sqlite3_uint64; } extern "C" { pub fn sqlite3_memory_used() -> sqlite3_int64; } extern "C" { pub fn sqlite3_memory_highwater(resetFlag: libc::c_int) -> sqlite3_int64; } extern "C" { pub fn sqlite3_randomness(N: libc::c_int, P: *mut libc::c_void); } extern "C" { pub fn sqlite3_set_authorizer( arg1: *mut sqlite3, xAuth: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *const libc::c_char, arg4: *const libc::c_char, arg5: *const libc::c_char, arg6: *const libc::c_char, ) -> libc::c_int, >, pUserData: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_trace( arg1: *mut sqlite3, xTrace: ::core::option::Option< unsafe extern "C" fn(arg1: *mut libc::c_void, arg2: *const libc::c_char), >, arg2: *mut libc::c_void, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_profile( arg1: *mut sqlite3, xProfile: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: *const libc::c_char, arg3: sqlite3_uint64, ), >, arg2: *mut libc::c_void, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_trace_v2( arg1: *mut sqlite3, uMask: libc::c_uint, xCallback: ::core::option::Option< unsafe extern "C" fn( arg1: libc::c_uint, arg2: *mut libc::c_void, arg3: *mut libc::c_void, arg4: *mut libc::c_void, ) -> libc::c_int, >, pCtx: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_progress_handler( arg1: *mut sqlite3, arg2: libc::c_int, arg3: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void) -> libc::c_int>, arg4: *mut libc::c_void, ); } extern "C" { pub fn sqlite3_open(filename: *const libc::c_char, ppDb: *mut *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_open16(filename: *const libc::c_void, ppDb: *mut *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_open_v2( filename: *const libc::c_char, ppDb: *mut *mut sqlite3, flags: libc::c_int, zVfs: *const libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_uri_parameter( zFilename: *const libc::c_char, zParam: *const libc::c_char, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_uri_boolean( zFile: *const libc::c_char, zParam: *const libc::c_char, bDefault: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_uri_int64( arg1: *const libc::c_char, arg2: *const libc::c_char, arg3: sqlite3_int64, ) -> sqlite3_int64; } extern "C" { pub fn sqlite3_errcode(db: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_extended_errcode(db: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_errmsg(arg1: *mut sqlite3) -> *const libc::c_char; } extern "C" { pub fn sqlite3_errmsg16(arg1: *mut sqlite3) -> *const libc::c_void; } extern "C" { pub fn sqlite3_errstr(arg1: libc::c_int) -> *const libc::c_char; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_stmt { _unused: [u8; 0], } extern "C" { pub fn sqlite3_limit(arg1: *mut sqlite3, id: libc::c_int, newVal: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_prepare( db: *mut sqlite3, zSql: *const libc::c_char, nByte: libc::c_int, ppStmt: *mut *mut sqlite3_stmt, pzTail: *mut *const libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_prepare_v2( db: *mut sqlite3, zSql: *const libc::c_char, nByte: libc::c_int, ppStmt: *mut *mut sqlite3_stmt, pzTail: *mut *const libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_prepare_v3( db: *mut sqlite3, zSql: *const libc::c_char, nByte: libc::c_int, prepFlags: libc::c_uint, ppStmt: *mut *mut sqlite3_stmt, pzTail: *mut *const libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_prepare16( db: *mut sqlite3, zSql: *const libc::c_void, nByte: libc::c_int, ppStmt: *mut *mut sqlite3_stmt, pzTail: *mut *const libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_prepare16_v2( db: *mut sqlite3, zSql: *const libc::c_void, nByte: libc::c_int, ppStmt: *mut *mut sqlite3_stmt, pzTail: *mut *const libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_prepare16_v3( db: *mut sqlite3, zSql: *const libc::c_void, nByte: libc::c_int, prepFlags: libc::c_uint, ppStmt: *mut *mut sqlite3_stmt, pzTail: *mut *const libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_sql(pStmt: *mut sqlite3_stmt) -> *const libc::c_char; } extern "C" { pub fn sqlite3_expanded_sql(pStmt: *mut sqlite3_stmt) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_normalized_sql(pStmt: *mut sqlite3_stmt) -> *const libc::c_char; } extern "C" { pub fn sqlite3_stmt_readonly(pStmt: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_stmt_isexplain(pStmt: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_stmt_busy(arg1: *mut sqlite3_stmt) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_value { _unused: [u8; 0], } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_context { _unused: [u8; 0], } extern "C" { pub fn sqlite3_bind_blob( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *const libc::c_void, n: libc::c_int, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_blob64( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *const libc::c_void, arg4: sqlite3_uint64, arg5: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_double( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: f64, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_int( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_int64( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: sqlite3_int64, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_null(arg1: *mut sqlite3_stmt, arg2: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_text( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *const libc::c_char, arg4: libc::c_int, arg5: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_text16( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *const libc::c_void, arg4: libc::c_int, arg5: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_text64( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *const libc::c_char, arg4: sqlite3_uint64, arg5: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, encoding: libc::c_uchar, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_value( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *const sqlite3_value, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_pointer( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: *mut libc::c_void, arg4: *const libc::c_char, arg5: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_zeroblob( arg1: *mut sqlite3_stmt, arg2: libc::c_int, n: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_zeroblob64( arg1: *mut sqlite3_stmt, arg2: libc::c_int, arg3: sqlite3_uint64, ) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_parameter_count(arg1: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_bind_parameter_name( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_bind_parameter_index( arg1: *mut sqlite3_stmt, zName: *const libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_clear_bindings(arg1: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_column_count(pStmt: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_column_name(arg1: *mut sqlite3_stmt, N: libc::c_int) -> *const libc::c_char; } extern "C" { pub fn sqlite3_column_name16(arg1: *mut sqlite3_stmt, N: libc::c_int) -> *const libc::c_void; } extern "C" { pub fn sqlite3_column_database_name( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_column_database_name16( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_void; } extern "C" { pub fn sqlite3_column_table_name( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_column_table_name16( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_void; } extern "C" { pub fn sqlite3_column_origin_name( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_column_origin_name16( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_void; } extern "C" { pub fn sqlite3_column_decltype( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_column_decltype16( arg1: *mut sqlite3_stmt, arg2: libc::c_int, ) -> *const libc::c_void; } extern "C" { pub fn sqlite3_step(arg1: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_data_count(pStmt: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_column_blob(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> *const libc::c_void; } extern "C" { pub fn sqlite3_column_double(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> f64; } extern "C" { pub fn sqlite3_column_int(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_column_int64(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> sqlite3_int64; } extern "C" { pub fn sqlite3_column_text(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> *const libc::c_uchar; } extern "C" { pub fn sqlite3_column_text16(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> *const libc::c_void; } extern "C" { pub fn sqlite3_column_value(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> *mut sqlite3_value; } extern "C" { pub fn sqlite3_column_bytes(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_column_bytes16(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_column_type(arg1: *mut sqlite3_stmt, iCol: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_finalize(pStmt: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_reset(pStmt: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_create_function( db: *mut sqlite3, zFunctionName: *const libc::c_char, nArg: libc::c_int, eTextRep: libc::c_int, pApp: *mut libc::c_void, xFunc: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xStep: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xFinal: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_context)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_create_function16( db: *mut sqlite3, zFunctionName: *const libc::c_void, nArg: libc::c_int, eTextRep: libc::c_int, pApp: *mut libc::c_void, xFunc: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xStep: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xFinal: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_context)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_create_function_v2( db: *mut sqlite3, zFunctionName: *const libc::c_char, nArg: libc::c_int, eTextRep: libc::c_int, pApp: *mut libc::c_void, xFunc: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xStep: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xFinal: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_context)>, xDestroy: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_create_window_function( db: *mut sqlite3, zFunctionName: *const libc::c_char, nArg: libc::c_int, eTextRep: libc::c_int, pApp: *mut libc::c_void, xStep: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xFinal: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_context)>, xValue: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_context)>, xInverse: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, xDestroy: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_aggregate_count(arg1: *mut sqlite3_context) -> libc::c_int; } extern "C" { pub fn sqlite3_expired(arg1: *mut sqlite3_stmt) -> libc::c_int; } extern "C" { pub fn sqlite3_transfer_bindings( arg1: *mut sqlite3_stmt, arg2: *mut sqlite3_stmt, ) -> libc::c_int; } extern "C" { pub fn sqlite3_global_recover() -> libc::c_int; } extern "C" { pub fn sqlite3_thread_cleanup(); } extern "C" { pub fn sqlite3_memory_alarm( arg1: ::core::option::Option< unsafe extern "C" fn(arg1: *mut libc::c_void, arg2: sqlite3_int64, arg3: libc::c_int), >, arg2: *mut libc::c_void, arg3: sqlite3_int64, ) -> libc::c_int; } extern "C" { pub fn sqlite3_value_blob(arg1: *mut sqlite3_value) -> *const libc::c_void; } extern "C" { pub fn sqlite3_value_double(arg1: *mut sqlite3_value) -> f64; } extern "C" { pub fn sqlite3_value_int(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_int64(arg1: *mut sqlite3_value) -> sqlite3_int64; } extern "C" { pub fn sqlite3_value_pointer( arg1: *mut sqlite3_value, arg2: *const libc::c_char, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_value_text(arg1: *mut sqlite3_value) -> *const libc::c_uchar; } extern "C" { pub fn sqlite3_value_text16(arg1: *mut sqlite3_value) -> *const libc::c_void; } extern "C" { pub fn sqlite3_value_text16le(arg1: *mut sqlite3_value) -> *const libc::c_void; } extern "C" { pub fn sqlite3_value_text16be(arg1: *mut sqlite3_value) -> *const libc::c_void; } extern "C" { pub fn sqlite3_value_bytes(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_bytes16(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_type(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_numeric_type(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_nochange(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_frombind(arg1: *mut sqlite3_value) -> libc::c_int; } extern "C" { pub fn sqlite3_value_subtype(arg1: *mut sqlite3_value) -> libc::c_uint; } extern "C" { pub fn sqlite3_value_dup(arg1: *const sqlite3_value) -> *mut sqlite3_value; } extern "C" { pub fn sqlite3_value_free(arg1: *mut sqlite3_value); } extern "C" { pub fn sqlite3_aggregate_context( arg1: *mut sqlite3_context, nBytes: libc::c_int, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_user_data(arg1: *mut sqlite3_context) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_context_db_handle(arg1: *mut sqlite3_context) -> *mut sqlite3; } extern "C" { pub fn sqlite3_get_auxdata(arg1: *mut sqlite3_context, N: libc::c_int) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_set_auxdata( arg1: *mut sqlite3_context, N: libc::c_int, arg2: *mut libc::c_void, arg3: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } pub type sqlite3_destructor_type = ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>; extern "C" { pub fn sqlite3_result_blob( arg1: *mut sqlite3_context, arg2: *const libc::c_void, arg3: libc::c_int, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_blob64( arg1: *mut sqlite3_context, arg2: *const libc::c_void, arg3: sqlite3_uint64, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_double(arg1: *mut sqlite3_context, arg2: f64); } extern "C" { pub fn sqlite3_result_error( arg1: *mut sqlite3_context, arg2: *const libc::c_char, arg3: libc::c_int, ); } extern "C" { pub fn sqlite3_result_error16( arg1: *mut sqlite3_context, arg2: *const libc::c_void, arg3: libc::c_int, ); } extern "C" { pub fn sqlite3_result_error_toobig(arg1: *mut sqlite3_context); } extern "C" { pub fn sqlite3_result_error_nomem(arg1: *mut sqlite3_context); } extern "C" { pub fn sqlite3_result_error_code(arg1: *mut sqlite3_context, arg2: libc::c_int); } extern "C" { pub fn sqlite3_result_int(arg1: *mut sqlite3_context, arg2: libc::c_int); } extern "C" { pub fn sqlite3_result_int64(arg1: *mut sqlite3_context, arg2: sqlite3_int64); } extern "C" { pub fn sqlite3_result_null(arg1: *mut sqlite3_context); } extern "C" { pub fn sqlite3_result_text( arg1: *mut sqlite3_context, arg2: *const libc::c_char, arg3: libc::c_int, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_text64( arg1: *mut sqlite3_context, arg2: *const libc::c_char, arg3: sqlite3_uint64, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, encoding: libc::c_uchar, ); } extern "C" { pub fn sqlite3_result_text16( arg1: *mut sqlite3_context, arg2: *const libc::c_void, arg3: libc::c_int, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_text16le( arg1: *mut sqlite3_context, arg2: *const libc::c_void, arg3: libc::c_int, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_text16be( arg1: *mut sqlite3_context, arg2: *const libc::c_void, arg3: libc::c_int, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_value(arg1: *mut sqlite3_context, arg2: *mut sqlite3_value); } extern "C" { pub fn sqlite3_result_pointer( arg1: *mut sqlite3_context, arg2: *mut libc::c_void, arg3: *const libc::c_char, arg4: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ); } extern "C" { pub fn sqlite3_result_zeroblob(arg1: *mut sqlite3_context, n: libc::c_int); } extern "C" { pub fn sqlite3_result_zeroblob64(arg1: *mut sqlite3_context, n: sqlite3_uint64) -> libc::c_int; } extern "C" { pub fn sqlite3_result_subtype(arg1: *mut sqlite3_context, arg2: libc::c_uint); } extern "C" { pub fn sqlite3_create_collation( arg1: *mut sqlite3, zName: *const libc::c_char, eTextRep: libc::c_int, pArg: *mut libc::c_void, xCompare: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *const libc::c_void, arg4: libc::c_int, arg5: *const libc::c_void, ) -> libc::c_int, >, ) -> libc::c_int; } extern "C" { pub fn sqlite3_create_collation_v2( arg1: *mut sqlite3, zName: *const libc::c_char, eTextRep: libc::c_int, pArg: *mut libc::c_void, xCompare: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *const libc::c_void, arg4: libc::c_int, arg5: *const libc::c_void, ) -> libc::c_int, >, xDestroy: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_create_collation16( arg1: *mut sqlite3, zName: *const libc::c_void, eTextRep: libc::c_int, pArg: *mut libc::c_void, xCompare: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *const libc::c_void, arg4: libc::c_int, arg5: *const libc::c_void, ) -> libc::c_int, >, ) -> libc::c_int; } extern "C" { pub fn sqlite3_collation_needed( arg1: *mut sqlite3, arg2: *mut libc::c_void, arg3: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: *mut sqlite3, eTextRep: libc::c_int, arg3: *const libc::c_char, ), >, ) -> libc::c_int; } extern "C" { pub fn sqlite3_collation_needed16( arg1: *mut sqlite3, arg2: *mut libc::c_void, arg3: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: *mut sqlite3, eTextRep: libc::c_int, arg3: *const libc::c_void, ), >, ) -> libc::c_int; } extern "C" { pub fn sqlite3_sleep(arg1: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_win32_set_directory( type_: libc::c_ulong, zValue: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_win32_set_directory8( type_: libc::c_ulong, zValue: *const libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_win32_set_directory16( type_: libc::c_ulong, zValue: *const libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_get_autocommit(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_db_handle(arg1: *mut sqlite3_stmt) -> *mut sqlite3; } extern "C" { pub fn sqlite3_db_filename( db: *mut sqlite3, zDbName: *const libc::c_char, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_db_readonly(db: *mut sqlite3, zDbName: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_next_stmt(pDb: *mut sqlite3, pStmt: *mut sqlite3_stmt) -> *mut sqlite3_stmt; } extern "C" { pub fn sqlite3_commit_hook( arg1: *mut sqlite3, arg2: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void) -> libc::c_int>, arg3: *mut libc::c_void, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_rollback_hook( arg1: *mut sqlite3, arg2: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, arg3: *mut libc::c_void, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_update_hook( arg1: *mut sqlite3, arg2: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: libc::c_int, arg3: *const libc::c_char, arg4: *const libc::c_char, arg5: sqlite3_int64, ), >, arg3: *mut libc::c_void, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_enable_shared_cache(arg1: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_release_memory(arg1: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_db_release_memory(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_soft_heap_limit64(N: sqlite3_int64) -> sqlite3_int64; } extern "C" { pub fn sqlite3_soft_heap_limit(N: libc::c_int); } extern "C" { pub fn sqlite3_table_column_metadata( db: *mut sqlite3, zDbName: *const libc::c_char, zTableName: *const libc::c_char, zColumnName: *const libc::c_char, pzDataType: *mut *const libc::c_char, pzCollSeq: *mut *const libc::c_char, pNotNull: *mut libc::c_int, pPrimaryKey: *mut libc::c_int, pAutoinc: *mut libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_load_extension( db: *mut sqlite3, zFile: *const libc::c_char, zProc: *const libc::c_char, pzErrMsg: *mut *mut libc::c_char, ) -> libc::c_int; } extern "C" { pub fn sqlite3_enable_load_extension(db: *mut sqlite3, onoff: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_auto_extension( xEntryPoint: ::core::option::Option<unsafe extern "C" fn()>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_cancel_auto_extension( xEntryPoint: ::core::option::Option<unsafe extern "C" fn()>, ) -> libc::c_int; } extern "C" { pub fn sqlite3_reset_auto_extension(); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_module { pub iVersion: libc::c_int, pub xCreate: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3, pAux: *mut libc::c_void, argc: libc::c_int, argv: *const *const libc::c_char, ppVTab: *mut *mut sqlite3_vtab, arg2: *mut *mut libc::c_char, ) -> libc::c_int, >, pub xConnect: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3, pAux: *mut libc::c_void, argc: libc::c_int, argv: *const *const libc::c_char, ppVTab: *mut *mut sqlite3_vtab, arg2: *mut *mut libc::c_char, ) -> libc::c_int, >, pub xBestIndex: ::core::option::Option< unsafe extern "C" fn( pVTab: *mut sqlite3_vtab, arg1: *mut sqlite3_index_info, ) -> libc::c_int, >, pub xDisconnect: ::core::option::Option<unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> libc::c_int>, pub xDestroy: ::core::option::Option<unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> libc::c_int>, pub xOpen: ::core::option::Option< unsafe extern "C" fn( pVTab: *mut sqlite3_vtab, ppCursor: *mut *mut sqlite3_vtab_cursor, ) -> libc::c_int, >, pub xClose: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_vtab_cursor) -> libc::c_int>, pub xFilter: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vtab_cursor, idxNum: libc::c_int, idxStr: *const libc::c_char, argc: libc::c_int, argv: *mut *mut sqlite3_value, ) -> libc::c_int, >, pub xNext: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_vtab_cursor) -> libc::c_int>, pub xEof: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_vtab_cursor) -> libc::c_int>, pub xColumn: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vtab_cursor, arg2: *mut sqlite3_context, arg3: libc::c_int, ) -> libc::c_int, >, pub xRowid: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vtab_cursor, pRowid: *mut sqlite3_int64, ) -> libc::c_int, >, pub xUpdate: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_vtab, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, arg4: *mut sqlite3_int64, ) -> libc::c_int, >, pub xBegin: ::core::option::Option<unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> libc::c_int>, pub xSync: ::core::option::Option<unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> libc::c_int>, pub xCommit: ::core::option::Option<unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> libc::c_int>, pub xRollback: ::core::option::Option<unsafe extern "C" fn(pVTab: *mut sqlite3_vtab) -> libc::c_int>, pub xFindFunction: ::core::option::Option< unsafe extern "C" fn( pVtab: *mut sqlite3_vtab, nArg: libc::c_int, zName: *const libc::c_char, pxFunc: *mut ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_context, arg2: libc::c_int, arg3: *mut *mut sqlite3_value, ), >, ppArg: *mut *mut libc::c_void, ) -> libc::c_int, >, pub xRename: ::core::option::Option< unsafe extern "C" fn(pVtab: *mut sqlite3_vtab, zNew: *const libc::c_char) -> libc::c_int, >, pub xSavepoint: ::core::option::Option< unsafe extern "C" fn(pVTab: *mut sqlite3_vtab, arg1: libc::c_int) -> libc::c_int, >, pub xRelease: ::core::option::Option< unsafe extern "C" fn(pVTab: *mut sqlite3_vtab, arg1: libc::c_int) -> libc::c_int, >, pub xRollbackTo: ::core::option::Option< unsafe extern "C" fn(pVTab: *mut sqlite3_vtab, arg1: libc::c_int) -> libc::c_int, >, pub xShadowName: ::core::option::Option<unsafe extern "C" fn(arg1: *const libc::c_char) -> libc::c_int>, } #[test] fn bindgen_test_layout_sqlite3_module() { assert_eq!( ::core::mem::size_of::<sqlite3_module>(), 192usize, concat!("Size of: ", stringify!(sqlite3_module)) ); assert_eq!( ::core::mem::align_of::<sqlite3_module>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_module)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).iVersion as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(iVersion) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xCreate as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xCreate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xConnect as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xConnect) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xBestIndex as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xBestIndex) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xDisconnect as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xDisconnect) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xDestroy as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xDestroy) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xOpen as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xOpen) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xClose as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xClose) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xFilter as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xFilter) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xNext as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xNext) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xEof as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xEof) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xColumn as *const _ as usize }, 88usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xColumn) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xRowid as *const _ as usize }, 96usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xRowid) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xUpdate as *const _ as usize }, 104usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xUpdate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xBegin as *const _ as usize }, 112usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xBegin) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xSync as *const _ as usize }, 120usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xSync) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xCommit as *const _ as usize }, 128usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xCommit) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xRollback as *const _ as usize }, 136usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xRollback) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xFindFunction as *const _ as usize }, 144usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xFindFunction) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xRename as *const _ as usize }, 152usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xRename) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xSavepoint as *const _ as usize }, 160usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xSavepoint) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xRelease as *const _ as usize }, 168usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xRelease) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xRollbackTo as *const _ as usize }, 176usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xRollbackTo) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_module>())).xShadowName as *const _ as usize }, 184usize, concat!( "Offset of field: ", stringify!(sqlite3_module), "::", stringify!(xShadowName) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_index_info { pub nConstraint: libc::c_int, pub aConstraint: *mut sqlite3_index_info_sqlite3_index_constraint, pub nOrderBy: libc::c_int, pub aOrderBy: *mut sqlite3_index_info_sqlite3_index_orderby, pub aConstraintUsage: *mut sqlite3_index_info_sqlite3_index_constraint_usage, pub idxNum: libc::c_int, pub idxStr: *mut libc::c_char, pub needToFreeIdxStr: libc::c_int, pub orderByConsumed: libc::c_int, pub estimatedCost: f64, pub estimatedRows: sqlite3_int64, pub idxFlags: libc::c_int, pub colUsed: sqlite3_uint64, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_index_info_sqlite3_index_constraint { pub iColumn: libc::c_int, pub op: libc::c_uchar, pub usable: libc::c_uchar, pub iTermOffset: libc::c_int, } #[test] fn bindgen_test_layout_sqlite3_index_info_sqlite3_index_constraint() { assert_eq!( ::core::mem::size_of::<sqlite3_index_info_sqlite3_index_constraint>(), 12usize, concat!( "Size of: ", stringify!(sqlite3_index_info_sqlite3_index_constraint) ) ); assert_eq!( ::core::mem::align_of::<sqlite3_index_info_sqlite3_index_constraint>(), 4usize, concat!( "Alignment of ", stringify!(sqlite3_index_info_sqlite3_index_constraint) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_constraint>())).iColumn as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_constraint), "::", stringify!(iColumn) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_constraint>())).op as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_constraint), "::", stringify!(op) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_constraint>())).usable as *const _ as usize }, 5usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_constraint), "::", stringify!(usable) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_constraint>())).iTermOffset as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_constraint), "::", stringify!(iTermOffset) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_index_info_sqlite3_index_orderby { pub iColumn: libc::c_int, pub desc: libc::c_uchar, } #[test] fn bindgen_test_layout_sqlite3_index_info_sqlite3_index_orderby() { assert_eq!( ::core::mem::size_of::<sqlite3_index_info_sqlite3_index_orderby>(), 8usize, concat!( "Size of: ", stringify!(sqlite3_index_info_sqlite3_index_orderby) ) ); assert_eq!( ::core::mem::align_of::<sqlite3_index_info_sqlite3_index_orderby>(), 4usize, concat!( "Alignment of ", stringify!(sqlite3_index_info_sqlite3_index_orderby) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_orderby>())).iColumn as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_orderby), "::", stringify!(iColumn) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_orderby>())).desc as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_orderby), "::", stringify!(desc) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_index_info_sqlite3_index_constraint_usage { pub argvIndex: libc::c_int, pub omit: libc::c_uchar, } #[test] fn bindgen_test_layout_sqlite3_index_info_sqlite3_index_constraint_usage() { assert_eq!( ::core::mem::size_of::<sqlite3_index_info_sqlite3_index_constraint_usage>(), 8usize, concat!( "Size of: ", stringify!(sqlite3_index_info_sqlite3_index_constraint_usage) ) ); assert_eq!( ::core::mem::align_of::<sqlite3_index_info_sqlite3_index_constraint_usage>(), 4usize, concat!( "Alignment of ", stringify!(sqlite3_index_info_sqlite3_index_constraint_usage) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_constraint_usage>())).argvIndex as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_constraint_usage), "::", stringify!(argvIndex) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info_sqlite3_index_constraint_usage>())).omit as *const _ as usize }, 4usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info_sqlite3_index_constraint_usage), "::", stringify!(omit) ) ); } #[test] fn bindgen_test_layout_sqlite3_index_info() { assert_eq!( ::core::mem::size_of::<sqlite3_index_info>(), 96usize, concat!("Size of: ", stringify!(sqlite3_index_info)) ); assert_eq!( ::core::mem::align_of::<sqlite3_index_info>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_index_info)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).nConstraint as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(nConstraint) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).aConstraint as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(aConstraint) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).nOrderBy as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(nOrderBy) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).aOrderBy as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(aOrderBy) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).aConstraintUsage as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(aConstraintUsage) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).idxNum as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(idxNum) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).idxStr as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(idxStr) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).needToFreeIdxStr as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(needToFreeIdxStr) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).orderByConsumed as *const _ as usize }, 60usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(orderByConsumed) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).estimatedCost as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(estimatedCost) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).estimatedRows as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(estimatedRows) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).idxFlags as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(idxFlags) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_index_info>())).colUsed as *const _ as usize }, 88usize, concat!( "Offset of field: ", stringify!(sqlite3_index_info), "::", stringify!(colUsed) ) ); } extern "C" { pub fn sqlite3_create_module( db: *mut sqlite3, zName: *const libc::c_char, p: *const sqlite3_module, pClientData: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_create_module_v2( db: *mut sqlite3, zName: *const libc::c_char, p: *const sqlite3_module, pClientData: *mut libc::c_void, xDestroy: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_vtab { pub pModule: *const sqlite3_module, pub nRef: libc::c_int, pub zErrMsg: *mut libc::c_char, } #[test] fn bindgen_test_layout_sqlite3_vtab() { assert_eq!( ::core::mem::size_of::<sqlite3_vtab>(), 24usize, concat!("Size of: ", stringify!(sqlite3_vtab)) ); assert_eq!( ::core::mem::align_of::<sqlite3_vtab>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_vtab)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vtab>())).pModule as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_vtab), "::", stringify!(pModule) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vtab>())).nRef as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_vtab), "::", stringify!(nRef) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vtab>())).zErrMsg as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_vtab), "::", stringify!(zErrMsg) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_vtab_cursor { pub pVtab: *mut sqlite3_vtab, } #[test] fn bindgen_test_layout_sqlite3_vtab_cursor() { assert_eq!( ::core::mem::size_of::<sqlite3_vtab_cursor>(), 8usize, concat!("Size of: ", stringify!(sqlite3_vtab_cursor)) ); assert_eq!( ::core::mem::align_of::<sqlite3_vtab_cursor>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_vtab_cursor)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_vtab_cursor>())).pVtab as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_vtab_cursor), "::", stringify!(pVtab) ) ); } extern "C" { pub fn sqlite3_declare_vtab(arg1: *mut sqlite3, zSQL: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_overload_function( arg1: *mut sqlite3, zFuncName: *const libc::c_char, nArg: libc::c_int, ) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_blob { _unused: [u8; 0], } extern "C" { pub fn sqlite3_blob_open( arg1: *mut sqlite3, zDb: *const libc::c_char, zTable: *const libc::c_char, zColumn: *const libc::c_char, iRow: sqlite3_int64, flags: libc::c_int, ppBlob: *mut *mut sqlite3_blob, ) -> libc::c_int; } extern "C" { pub fn sqlite3_blob_reopen(arg1: *mut sqlite3_blob, arg2: sqlite3_int64) -> libc::c_int; } extern "C" { pub fn sqlite3_blob_close(arg1: *mut sqlite3_blob) -> libc::c_int; } extern "C" { pub fn sqlite3_blob_bytes(arg1: *mut sqlite3_blob) -> libc::c_int; } extern "C" { pub fn sqlite3_blob_read( arg1: *mut sqlite3_blob, Z: *mut libc::c_void, N: libc::c_int, iOffset: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_blob_write( arg1: *mut sqlite3_blob, z: *const libc::c_void, n: libc::c_int, iOffset: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_vfs_find(zVfsName: *const libc::c_char) -> *mut sqlite3_vfs; } extern "C" { pub fn sqlite3_vfs_register(arg1: *mut sqlite3_vfs, makeDflt: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_vfs_unregister(arg1: *mut sqlite3_vfs) -> libc::c_int; } extern "C" { pub fn sqlite3_mutex_alloc(arg1: libc::c_int) -> *mut sqlite3_mutex; } extern "C" { pub fn sqlite3_mutex_free(arg1: *mut sqlite3_mutex); } extern "C" { pub fn sqlite3_mutex_enter(arg1: *mut sqlite3_mutex); } extern "C" { pub fn sqlite3_mutex_try(arg1: *mut sqlite3_mutex) -> libc::c_int; } extern "C" { pub fn sqlite3_mutex_leave(arg1: *mut sqlite3_mutex); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_mutex_methods { pub xMutexInit: ::core::option::Option<unsafe extern "C" fn() -> libc::c_int>, pub xMutexEnd: ::core::option::Option<unsafe extern "C" fn() -> libc::c_int>, pub xMutexAlloc: ::core::option::Option<unsafe extern "C" fn(arg1: libc::c_int) -> *mut sqlite3_mutex>, pub xMutexFree: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_mutex)>, pub xMutexEnter: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_mutex)>, pub xMutexTry: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_mutex) -> libc::c_int>, pub xMutexLeave: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_mutex)>, pub xMutexHeld: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_mutex) -> libc::c_int>, pub xMutexNotheld: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_mutex) -> libc::c_int>, } #[test] fn bindgen_test_layout_sqlite3_mutex_methods() { assert_eq!( ::core::mem::size_of::<sqlite3_mutex_methods>(), 72usize, concat!("Size of: ", stringify!(sqlite3_mutex_methods)) ); assert_eq!( ::core::mem::align_of::<sqlite3_mutex_methods>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_mutex_methods)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexInit as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexInit) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexEnd as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexEnd) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexAlloc as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexAlloc) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexFree as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexFree) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexEnter as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexEnter) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexTry as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexTry) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexLeave as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexLeave) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexHeld as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexHeld) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_mutex_methods>())).xMutexNotheld as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_mutex_methods), "::", stringify!(xMutexNotheld) ) ); } extern "C" { pub fn sqlite3_mutex_held(arg1: *mut sqlite3_mutex) -> libc::c_int; } extern "C" { pub fn sqlite3_mutex_notheld(arg1: *mut sqlite3_mutex) -> libc::c_int; } extern "C" { pub fn sqlite3_db_mutex(arg1: *mut sqlite3) -> *mut sqlite3_mutex; } extern "C" { pub fn sqlite3_file_control( arg1: *mut sqlite3, zDbName: *const libc::c_char, op: libc::c_int, arg2: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_test_control(op: libc::c_int, ...) -> libc::c_int; } extern "C" { pub fn sqlite3_keyword_count() -> libc::c_int; } extern "C" { pub fn sqlite3_keyword_name( arg1: libc::c_int, arg2: *mut *const libc::c_char, arg3: *mut libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_keyword_check(arg1: *const libc::c_char, arg2: libc::c_int) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_str { _unused: [u8; 0], } extern "C" { pub fn sqlite3_str_new(arg1: *mut sqlite3) -> *mut sqlite3_str; } extern "C" { pub fn sqlite3_str_finish(arg1: *mut sqlite3_str) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_str_appendf(arg1: *mut sqlite3_str, zFormat: *const libc::c_char, ...); } extern "C" { pub fn sqlite3_str_vappendf( arg1: *mut sqlite3_str, zFormat: *const libc::c_char, arg2: va_list, ); } extern "C" { pub fn sqlite3_str_append(arg1: *mut sqlite3_str, zIn: *const libc::c_char, N: libc::c_int); } extern "C" { pub fn sqlite3_str_appendall(arg1: *mut sqlite3_str, zIn: *const libc::c_char); } extern "C" { pub fn sqlite3_str_appendchar(arg1: *mut sqlite3_str, N: libc::c_int, C: libc::c_char); } extern "C" { pub fn sqlite3_str_reset(arg1: *mut sqlite3_str); } extern "C" { pub fn sqlite3_str_errcode(arg1: *mut sqlite3_str) -> libc::c_int; } extern "C" { pub fn sqlite3_str_length(arg1: *mut sqlite3_str) -> libc::c_int; } extern "C" { pub fn sqlite3_str_value(arg1: *mut sqlite3_str) -> *mut libc::c_char; } extern "C" { pub fn sqlite3_status( op: libc::c_int, pCurrent: *mut libc::c_int, pHighwater: *mut libc::c_int, resetFlag: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_status64( op: libc::c_int, pCurrent: *mut sqlite3_int64, pHighwater: *mut sqlite3_int64, resetFlag: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_db_status( arg1: *mut sqlite3, op: libc::c_int, pCur: *mut libc::c_int, pHiwtr: *mut libc::c_int, resetFlg: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_stmt_status( arg1: *mut sqlite3_stmt, op: libc::c_int, resetFlg: libc::c_int, ) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_pcache { _unused: [u8; 0], } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_pcache_page { pub pBuf: *mut libc::c_void, pub pExtra: *mut libc::c_void, } #[test] fn bindgen_test_layout_sqlite3_pcache_page() { assert_eq!( ::core::mem::size_of::<sqlite3_pcache_page>(), 16usize, concat!("Size of: ", stringify!(sqlite3_pcache_page)) ); assert_eq!( ::core::mem::align_of::<sqlite3_pcache_page>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_pcache_page)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_page>())).pBuf as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_page), "::", stringify!(pBuf) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_page>())).pExtra as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_page), "::", stringify!(pExtra) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_pcache_methods2 { pub iVersion: libc::c_int, pub pArg: *mut libc::c_void, pub xInit: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void) -> libc::c_int>, pub xShutdown: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, pub xCreate: ::core::option::Option< unsafe extern "C" fn( szPage: libc::c_int, szExtra: libc::c_int, bPurgeable: libc::c_int, ) -> *mut sqlite3_pcache, >, pub xCachesize: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_pcache, nCachesize: libc::c_int), >, pub xPagecount: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_pcache) -> libc::c_int>, pub xFetch: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_pcache, key: libc::c_uint, createFlag: libc::c_int, ) -> *mut sqlite3_pcache_page, >, pub xUnpin: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_pcache, arg2: *mut sqlite3_pcache_page, discard: libc::c_int, ), >, pub xRekey: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_pcache, arg2: *mut sqlite3_pcache_page, oldKey: libc::c_uint, newKey: libc::c_uint, ), >, pub xTruncate: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_pcache, iLimit: libc::c_uint), >, pub xDestroy: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_pcache)>, pub xShrink: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_pcache)>, } #[test] fn bindgen_test_layout_sqlite3_pcache_methods2() { assert_eq!( ::core::mem::size_of::<sqlite3_pcache_methods2>(), 104usize, concat!("Size of: ", stringify!(sqlite3_pcache_methods2)) ); assert_eq!( ::core::mem::align_of::<sqlite3_pcache_methods2>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_pcache_methods2)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).iVersion as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(iVersion) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).pArg as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(pArg) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xInit as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xInit) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xShutdown as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xShutdown) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xCreate as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xCreate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xCachesize as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xCachesize) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xPagecount as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xPagecount) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xFetch as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xFetch) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xUnpin as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xUnpin) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xRekey as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xRekey) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xTruncate as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xTruncate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xDestroy as *const _ as usize }, 88usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xDestroy) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods2>())).xShrink as *const _ as usize }, 96usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods2), "::", stringify!(xShrink) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_pcache_methods { pub pArg: *mut libc::c_void, pub xInit: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void) -> libc::c_int>, pub xShutdown: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, pub xCreate: ::core::option::Option< unsafe extern "C" fn(szPage: libc::c_int, bPurgeable: libc::c_int) -> *mut sqlite3_pcache, >, pub xCachesize: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_pcache, nCachesize: libc::c_int), >, pub xPagecount: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_pcache) -> libc::c_int>, pub xFetch: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_pcache, key: libc::c_uint, createFlag: libc::c_int, ) -> *mut libc::c_void, >, pub xUnpin: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_pcache, arg2: *mut libc::c_void, discard: libc::c_int, ), >, pub xRekey: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_pcache, arg2: *mut libc::c_void, oldKey: libc::c_uint, newKey: libc::c_uint, ), >, pub xTruncate: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_pcache, iLimit: libc::c_uint), >, pub xDestroy: ::core::option::Option<unsafe extern "C" fn(arg1: *mut sqlite3_pcache)>, } #[test] fn bindgen_test_layout_sqlite3_pcache_methods() { assert_eq!( ::core::mem::size_of::<sqlite3_pcache_methods>(), 88usize, concat!("Size of: ", stringify!(sqlite3_pcache_methods)) ); assert_eq!( ::core::mem::align_of::<sqlite3_pcache_methods>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_pcache_methods)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).pArg as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(pArg) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xInit as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xInit) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xShutdown as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xShutdown) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xCreate as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xCreate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xCachesize as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xCachesize) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xPagecount as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xPagecount) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xFetch as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xFetch) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xUnpin as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xUnpin) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xRekey as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xRekey) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xTruncate as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xTruncate) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_pcache_methods>())).xDestroy as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_pcache_methods), "::", stringify!(xDestroy) ) ); } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_backup { _unused: [u8; 0], } extern "C" { pub fn sqlite3_backup_init( pDest: *mut sqlite3, zDestName: *const libc::c_char, pSource: *mut sqlite3, zSourceName: *const libc::c_char, ) -> *mut sqlite3_backup; } extern "C" { pub fn sqlite3_backup_step(p: *mut sqlite3_backup, nPage: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_backup_finish(p: *mut sqlite3_backup) -> libc::c_int; } extern "C" { pub fn sqlite3_backup_remaining(p: *mut sqlite3_backup) -> libc::c_int; } extern "C" { pub fn sqlite3_backup_pagecount(p: *mut sqlite3_backup) -> libc::c_int; } extern "C" { pub fn sqlite3_unlock_notify( pBlocked: *mut sqlite3, xNotify: ::core::option::Option< unsafe extern "C" fn(apArg: *mut *mut libc::c_void, nArg: libc::c_int), >, pNotifyArg: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_stricmp(arg1: *const libc::c_char, arg2: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_strnicmp( arg1: *const libc::c_char, arg2: *const libc::c_char, arg3: libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_strglob(zGlob: *const libc::c_char, zStr: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_strlike( zGlob: *const libc::c_char, zStr: *const libc::c_char, cEsc: libc::c_uint, ) -> libc::c_int; } extern "C" { pub fn sqlite3_log(iErrCode: libc::c_int, zFormat: *const libc::c_char, ...); } extern "C" { pub fn sqlite3_wal_hook( arg1: *mut sqlite3, arg2: ::core::option::Option< unsafe extern "C" fn( arg1: *mut libc::c_void, arg2: *mut sqlite3, arg3: *const libc::c_char, arg4: libc::c_int, ) -> libc::c_int, >, arg3: *mut libc::c_void, ) -> *mut libc::c_void; } extern "C" { pub fn sqlite3_wal_autocheckpoint(db: *mut sqlite3, N: libc::c_int) -> libc::c_int; } extern "C" { pub fn sqlite3_wal_checkpoint(db: *mut sqlite3, zDb: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_wal_checkpoint_v2( db: *mut sqlite3, zDb: *const libc::c_char, eMode: libc::c_int, pnLog: *mut libc::c_int, pnCkpt: *mut libc::c_int, ) -> libc::c_int; } extern "C" { pub fn sqlite3_vtab_config(arg1: *mut sqlite3, op: libc::c_int, ...) -> libc::c_int; } extern "C" { pub fn sqlite3_vtab_on_conflict(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_vtab_nochange(arg1: *mut sqlite3_context) -> libc::c_int; } extern "C" { pub fn sqlite3_vtab_collation( arg1: *mut sqlite3_index_info, arg2: libc::c_int, ) -> *const libc::c_char; } extern "C" { pub fn sqlite3_stmt_scanstatus( pStmt: *mut sqlite3_stmt, idx: libc::c_int, iScanStatusOp: libc::c_int, pOut: *mut libc::c_void, ) -> libc::c_int; } extern "C" { pub fn sqlite3_stmt_scanstatus_reset(arg1: *mut sqlite3_stmt); } extern "C" { pub fn sqlite3_db_cacheflush(arg1: *mut sqlite3) -> libc::c_int; } extern "C" { pub fn sqlite3_system_errno(arg1: *mut sqlite3) -> libc::c_int; } #[repr(C)] #[derive(Copy, Clone)] pub struct sqlite3_snapshot { pub hidden: [libc::c_uchar; 48usize], } #[test] fn bindgen_test_layout_sqlite3_snapshot() { assert_eq!( ::core::mem::size_of::<sqlite3_snapshot>(), 48usize, concat!("Size of: ", stringify!(sqlite3_snapshot)) ); assert_eq!( ::core::mem::align_of::<sqlite3_snapshot>(), 1usize, concat!("Alignment of ", stringify!(sqlite3_snapshot)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_snapshot>())).hidden as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_snapshot), "::", stringify!(hidden) ) ); } extern "C" { pub fn sqlite3_snapshot_get( db: *mut sqlite3, zSchema: *const libc::c_char, ppSnapshot: *mut *mut sqlite3_snapshot, ) -> libc::c_int; } extern "C" { pub fn sqlite3_snapshot_open( db: *mut sqlite3, zSchema: *const libc::c_char, pSnapshot: *mut sqlite3_snapshot, ) -> libc::c_int; } extern "C" { pub fn sqlite3_snapshot_free(arg1: *mut sqlite3_snapshot); } extern "C" { pub fn sqlite3_snapshot_cmp( p1: *mut sqlite3_snapshot, p2: *mut sqlite3_snapshot, ) -> libc::c_int; } extern "C" { pub fn sqlite3_snapshot_recover(db: *mut sqlite3, zDb: *const libc::c_char) -> libc::c_int; } extern "C" { pub fn sqlite3_wal_info( db: *mut sqlite3, zDb: *const libc::c_char, pnPrior: *mut libc::c_uint, pnFrame: *mut libc::c_uint, ) -> libc::c_int; } extern "C" { pub fn sqlite3_serialize( db: *mut sqlite3, zSchema: *const libc::c_char, piSize: *mut sqlite3_int64, mFlags: libc::c_uint, ) -> *mut libc::c_uchar; } extern "C" { pub fn sqlite3_deserialize( db: *mut sqlite3, zSchema: *const libc::c_char, pData: *mut libc::c_uchar, szDb: sqlite3_int64, szBuf: sqlite3_int64, mFlags: libc::c_uint, ) -> libc::c_int; } pub type sqlite3_rtree_dbl = f64; extern "C" { pub fn sqlite3_rtree_geometry_callback( db: *mut sqlite3, zGeom: *const libc::c_char, xGeom: ::core::option::Option< unsafe extern "C" fn( arg1: *mut sqlite3_rtree_geometry, arg2: libc::c_int, arg3: *mut sqlite3_rtree_dbl, arg4: *mut libc::c_int, ) -> libc::c_int, >, pContext: *mut libc::c_void, ) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_rtree_geometry { pub pContext: *mut libc::c_void, pub nParam: libc::c_int, pub aParam: *mut sqlite3_rtree_dbl, pub pUser: *mut libc::c_void, pub xDelUser: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, } #[test] fn bindgen_test_layout_sqlite3_rtree_geometry() { assert_eq!( ::core::mem::size_of::<sqlite3_rtree_geometry>(), 40usize, concat!("Size of: ", stringify!(sqlite3_rtree_geometry)) ); assert_eq!( ::core::mem::align_of::<sqlite3_rtree_geometry>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_rtree_geometry)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_geometry>())).pContext as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_geometry), "::", stringify!(pContext) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_geometry>())).nParam as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_geometry), "::", stringify!(nParam) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_geometry>())).aParam as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_geometry), "::", stringify!(aParam) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_geometry>())).pUser as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_geometry), "::", stringify!(pUser) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_geometry>())).xDelUser as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_geometry), "::", stringify!(xDelUser) ) ); } extern "C" { pub fn sqlite3_rtree_query_callback( db: *mut sqlite3, zQueryFunc: *const libc::c_char, xQueryFunc: ::core::option::Option< unsafe extern "C" fn(arg1: *mut sqlite3_rtree_query_info) -> libc::c_int, >, pContext: *mut libc::c_void, xDestructor: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, ) -> libc::c_int; } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct sqlite3_rtree_query_info { pub pContext: *mut libc::c_void, pub nParam: libc::c_int, pub aParam: *mut sqlite3_rtree_dbl, pub pUser: *mut libc::c_void, pub xDelUser: ::core::option::Option<unsafe extern "C" fn(arg1: *mut libc::c_void)>, pub aCoord: *mut sqlite3_rtree_dbl, pub anQueue: *mut libc::c_uint, pub nCoord: libc::c_int, pub iLevel: libc::c_int, pub mxLevel: libc::c_int, pub iRowid: sqlite3_int64, pub rParentScore: sqlite3_rtree_dbl, pub eParentWithin: libc::c_int, pub eWithin: libc::c_int, pub rScore: sqlite3_rtree_dbl, pub apSqlParam: *mut *mut sqlite3_value, } #[test] fn bindgen_test_layout_sqlite3_rtree_query_info() { assert_eq!( ::core::mem::size_of::<sqlite3_rtree_query_info>(), 112usize, concat!("Size of: ", stringify!(sqlite3_rtree_query_info)) ); assert_eq!( ::core::mem::align_of::<sqlite3_rtree_query_info>(), 8usize, concat!("Alignment of ", stringify!(sqlite3_rtree_query_info)) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).pContext as *const _ as usize }, 0usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(pContext) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).nParam as *const _ as usize }, 8usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(nParam) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).aParam as *const _ as usize }, 16usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(aParam) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).pUser as *const _ as usize }, 24usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(pUser) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).xDelUser as *const _ as usize }, 32usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(xDelUser) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).aCoord as *const _ as usize }, 40usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(aCoord) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).anQueue as *const _ as usize }, 48usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(anQueue) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).nCoord as *const _ as usize }, 56usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(nCoord) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).iLevel as *const _ as usize }, 60usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(iLevel) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).mxLevel as *const _ as usize }, 64usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(mxLevel) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).iRowid as *const _ as usize }, 72usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(iRowid) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).rParentScore as *const _ as usize }, 80usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(rParentScore) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).eParentWithin as *const _ as usize }, 88usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(eParentWithin) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).eWithin as *const _ as usize }, 92usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(eWithin) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).rScore as *const _ as usize }, 96usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(rScore) ) ); assert_eq!( unsafe { &(*(::core::ptr::null::<sqlite3_rtree_query_info>())).apSqlParam as *const _ as usize }, 104usize, concat!( "Offset of field: ", stringify!(sqlite3_rtree_query_info), "::", stringify!(apSqlParam) ) ); } pub type __builtin_va_list = *mut libc::c_char;
29.463409
100
0.537024
ab0b09b63bed0281c5e6a02870239ce6c257ad62
36,660
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::Ahb1enr { #[doc = r" Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } } #[doc = "Possible values of the field `OTGHSULPIEN`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum OtghsulpienR { #[doc = "Enabled."] Enabled, #[doc = "Disabled."] Disabled, } impl OtghsulpienR { #[doc = r" Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u8 { match *self { OtghsulpienR::Enabled => 1, OtghsulpienR::Disabled => 0, } } #[allow(missing_docs)] #[doc(hidden)] #[inline(always)] pub fn _from(bits: u8) -> OtghsulpienR { match bits { 1 => OtghsulpienR::Enabled, 0 => OtghsulpienR::Disabled, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `Enabled`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == OtghsulpienR::Enabled } #[doc = "Checks if the value of the field is `Disabled`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == OtghsulpienR::Disabled } } #[doc = "Possible values of the field `OTGHSEN`"] pub type OtghsenR = OtghsulpienR; #[doc = "Possible values of the field `ETHMACPTPEN`"] pub type EthmacptpenR = OtghsulpienR; #[doc = "Possible values of the field `ETHMACRXEN`"] pub type EthmacrxenR = OtghsulpienR; #[doc = "Possible values of the field `ETHMACTXEN`"] pub type EthmactxenR = OtghsulpienR; #[doc = "Possible values of the field `ETHMACEN`"] pub type EthmacenR = OtghsulpienR; #[doc = "Possible values of the field `DMA2DEN`"] pub type Dma2denR = OtghsulpienR; #[doc = "Possible values of the field `DMA2EN`"] pub type Dma2enR = OtghsulpienR; #[doc = "Possible values of the field `DMA1EN`"] pub type Dma1enR = OtghsulpienR; #[doc = "Possible values of the field `CCMDATARAMEN`"] pub type CcmdataramenR = OtghsulpienR; #[doc = "Possible values of the field `BKPSRAMEN`"] pub type BkpsramenR = OtghsulpienR; #[doc = "Possible values of the field `CRCEN`"] pub type CrcenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOKEN`"] pub type GpiokenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOJEN`"] pub type GpiojenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOIEN`"] pub type GpioienR = OtghsulpienR; #[doc = "Possible values of the field `GPIOHEN`"] pub type GpiohenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOGEN`"] pub type GpiogenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOFEN`"] pub type GpiofenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOEEN`"] pub type GpioeenR = OtghsulpienR; #[doc = "Possible values of the field `GPIODEN`"] pub type GpiodenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOCEN`"] pub type GpiocenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOBEN`"] pub type GpiobenR = OtghsulpienR; #[doc = "Possible values of the field `GPIOAEN`"] pub type GpioaenR = OtghsulpienR; #[doc = "Values that can be written to the field `OTGHSULPIEN`"] pub enum OtghsulpienW { #[doc = "Enabled."] Enabled, #[doc = "Disabled."] Disabled, } impl OtghsulpienW { #[allow(missing_docs)] #[doc(hidden)] #[inline(always)] pub fn _bits(&self) -> u8 { match *self { OtghsulpienW::Enabled => 1, OtghsulpienW::Disabled => 0, } } } #[doc = r" Proxy"] pub struct _OtghsulpienW<'a> { w: &'a mut W, } impl<'a> _OtghsulpienW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: OtghsulpienW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 30; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `OTGHSEN`"] pub type OtghsenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _OtghsenW<'a> { w: &'a mut W, } impl<'a> _OtghsenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: OtghsenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 29; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `ETHMACPTPEN`"] pub type EthmacptpenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _EthmacptpenW<'a> { w: &'a mut W, } impl<'a> _EthmacptpenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EthmacptpenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `ETHMACRXEN`"] pub type EthmacrxenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _EthmacrxenW<'a> { w: &'a mut W, } impl<'a> _EthmacrxenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EthmacrxenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 27; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `ETHMACTXEN`"] pub type EthmactxenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _EthmactxenW<'a> { w: &'a mut W, } impl<'a> _EthmactxenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EthmactxenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 26; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `ETHMACEN`"] pub type EthmacenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _EthmacenW<'a> { w: &'a mut W, } impl<'a> _EthmacenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EthmacenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 25; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DMA2DEN`"] pub type Dma2denW = OtghsulpienW; #[doc = r" Proxy"] pub struct _Dma2denW<'a> { w: &'a mut W, } impl<'a> _Dma2denW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: Dma2denW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 23; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DMA2EN`"] pub type Dma2enW = OtghsulpienW; #[doc = r" Proxy"] pub struct _Dma2enW<'a> { w: &'a mut W, } impl<'a> _Dma2enW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: Dma2enW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 22; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DMA1EN`"] pub type Dma1enW = OtghsulpienW; #[doc = r" Proxy"] pub struct _Dma1enW<'a> { w: &'a mut W, } impl<'a> _Dma1enW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: Dma1enW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 21; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `CCMDATARAMEN`"] pub type CcmdataramenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _CcmdataramenW<'a> { w: &'a mut W, } impl<'a> _CcmdataramenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CcmdataramenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `BKPSRAMEN`"] pub type BkpsramenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _BkpsramenW<'a> { w: &'a mut W, } impl<'a> _BkpsramenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BkpsramenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 18; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `CRCEN`"] pub type CrcenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _CrcenW<'a> { w: &'a mut W, } impl<'a> _CrcenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CrcenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOKEN`"] pub type GpiokenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiokenW<'a> { w: &'a mut W, } impl<'a> _GpiokenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiokenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 10; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOJEN`"] pub type GpiojenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiojenW<'a> { w: &'a mut W, } impl<'a> _GpiojenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiojenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 9; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOIEN`"] pub type GpioienW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpioienW<'a> { w: &'a mut W, } impl<'a> _GpioienW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpioienW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOHEN`"] pub type GpiohenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiohenW<'a> { w: &'a mut W, } impl<'a> _GpiohenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiohenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 7; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOGEN`"] pub type GpiogenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiogenW<'a> { w: &'a mut W, } impl<'a> _GpiogenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiogenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOFEN`"] pub type GpiofenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiofenW<'a> { w: &'a mut W, } impl<'a> _GpiofenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiofenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOEEN`"] pub type GpioeenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpioeenW<'a> { w: &'a mut W, } impl<'a> _GpioeenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpioeenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIODEN`"] pub type GpiodenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiodenW<'a> { w: &'a mut W, } impl<'a> _GpiodenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiodenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 3; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOCEN`"] pub type GpiocenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiocenW<'a> { w: &'a mut W, } impl<'a> _GpiocenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiocenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 2; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOBEN`"] pub type GpiobenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpiobenW<'a> { w: &'a mut W, } impl<'a> _GpiobenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpiobenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 1; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `GPIOAEN`"] pub type GpioaenW = OtghsulpienW; #[doc = r" Proxy"] pub struct _GpioaenW<'a> { w: &'a mut W, } impl<'a> _GpioaenW<'a> { #[doc = r" Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: GpioaenW) -> &'a mut W { { self.bits(variant._bits()) } } #[doc = "Enabled."] #[inline(always)] pub fn enabled(self) -> &'a mut W { self.variant(OtghsulpienW::Enabled) } #[doc = "Disabled."] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(OtghsulpienW::Disabled) } #[doc = r" Writes raw bits to the field"] #[inline(always)] pub fn bits(self, bits: u8) -> &'a mut W { const MASK: u8 = 1; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((bits & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 30 - USB OTG HSULPI clock enable"] #[inline(always)] pub fn otghsulpien(&self) -> OtghsulpienR { OtghsulpienR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 30; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 29 - USB OTG HS clock enable"] #[inline(always)] pub fn otghsen(&self) -> OtghsenR { OtghsenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 29; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 28 - Ethernet PTP clock enable"] #[inline(always)] pub fn ethmacptpen(&self) -> EthmacptpenR { EthmacptpenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 27 - Ethernet Reception clock enable"] #[inline(always)] pub fn ethmacrxen(&self) -> EthmacrxenR { EthmacrxenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 27; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 26 - Ethernet Transmission clock enable"] #[inline(always)] pub fn ethmactxen(&self) -> EthmactxenR { EthmactxenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 26; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 25 - Ethernet MAC clock enable"] #[inline(always)] pub fn ethmacen(&self) -> EthmacenR { EthmacenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 25; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 23 - DMA2D clock enable"] #[inline(always)] pub fn dma2den(&self) -> Dma2denR { Dma2denR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 23; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 22 - DMA2 clock enable"] #[inline(always)] pub fn dma2en(&self) -> Dma2enR { Dma2enR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 22; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 21 - DMA1 clock enable"] #[inline(always)] pub fn dma1en(&self) -> Dma1enR { Dma1enR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 21; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 20 - CCM data RAM clock enable"] #[inline(always)] pub fn ccmdataramen(&self) -> CcmdataramenR { CcmdataramenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 18 - Backup SRAM interface clock enable"] #[inline(always)] pub fn bkpsramen(&self) -> BkpsramenR { BkpsramenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 18; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 12 - CRC clock enable"] #[inline(always)] pub fn crcen(&self) -> CrcenR { CrcenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 10 - IO port K clock enable"] #[inline(always)] pub fn gpioken(&self) -> GpiokenR { GpiokenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 10; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 9 - IO port J clock enable"] #[inline(always)] pub fn gpiojen(&self) -> GpiojenR { GpiojenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 9; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 8 - IO port I clock enable"] #[inline(always)] pub fn gpioien(&self) -> GpioienR { GpioienR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 7 - IO port H clock enable"] #[inline(always)] pub fn gpiohen(&self) -> GpiohenR { GpiohenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 6 - IO port G clock enable"] #[inline(always)] pub fn gpiogen(&self) -> GpiogenR { GpiogenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 5 - IO port F clock enable"] #[inline(always)] pub fn gpiofen(&self) -> GpiofenR { GpiofenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 4 - IO port E clock enable"] #[inline(always)] pub fn gpioeen(&self) -> GpioeenR { GpioeenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 3 - IO port D clock enable"] #[inline(always)] pub fn gpioden(&self) -> GpiodenR { GpiodenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 3; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 2 - IO port C clock enable"] #[inline(always)] pub fn gpiocen(&self) -> GpiocenR { GpiocenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 2; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 1 - IO port B clock enable"] #[inline(always)] pub fn gpioben(&self) -> GpiobenR { GpiobenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 0 - IO port A clock enable"] #[inline(always)] pub fn gpioaen(&self) -> GpioaenR { GpioaenR::_from({ const MASK: u8 = 1; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } } impl W { #[doc = r" Reset value of the register"] #[inline(always)] pub fn reset_value() -> W { W { bits: 1048576 } } #[doc = r" Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bit 30 - USB OTG HSULPI clock enable"] #[inline(always)] pub fn otghsulpien(&mut self) -> _OtghsulpienW { _OtghsulpienW { w: self } } #[doc = "Bit 29 - USB OTG HS clock enable"] #[inline(always)] pub fn otghsen(&mut self) -> _OtghsenW { _OtghsenW { w: self } } #[doc = "Bit 28 - Ethernet PTP clock enable"] #[inline(always)] pub fn ethmacptpen(&mut self) -> _EthmacptpenW { _EthmacptpenW { w: self } } #[doc = "Bit 27 - Ethernet Reception clock enable"] #[inline(always)] pub fn ethmacrxen(&mut self) -> _EthmacrxenW { _EthmacrxenW { w: self } } #[doc = "Bit 26 - Ethernet Transmission clock enable"] #[inline(always)] pub fn ethmactxen(&mut self) -> _EthmactxenW { _EthmactxenW { w: self } } #[doc = "Bit 25 - Ethernet MAC clock enable"] #[inline(always)] pub fn ethmacen(&mut self) -> _EthmacenW { _EthmacenW { w: self } } #[doc = "Bit 23 - DMA2D clock enable"] #[inline(always)] pub fn dma2den(&mut self) -> _Dma2denW { _Dma2denW { w: self } } #[doc = "Bit 22 - DMA2 clock enable"] #[inline(always)] pub fn dma2en(&mut self) -> _Dma2enW { _Dma2enW { w: self } } #[doc = "Bit 21 - DMA1 clock enable"] #[inline(always)] pub fn dma1en(&mut self) -> _Dma1enW { _Dma1enW { w: self } } #[doc = "Bit 20 - CCM data RAM clock enable"] #[inline(always)] pub fn ccmdataramen(&mut self) -> _CcmdataramenW { _CcmdataramenW { w: self } } #[doc = "Bit 18 - Backup SRAM interface clock enable"] #[inline(always)] pub fn bkpsramen(&mut self) -> _BkpsramenW { _BkpsramenW { w: self } } #[doc = "Bit 12 - CRC clock enable"] #[inline(always)] pub fn crcen(&mut self) -> _CrcenW { _CrcenW { w: self } } #[doc = "Bit 10 - IO port K clock enable"] #[inline(always)] pub fn gpioken(&mut self) -> _GpiokenW { _GpiokenW { w: self } } #[doc = "Bit 9 - IO port J clock enable"] #[inline(always)] pub fn gpiojen(&mut self) -> _GpiojenW { _GpiojenW { w: self } } #[doc = "Bit 8 - IO port I clock enable"] #[inline(always)] pub fn gpioien(&mut self) -> _GpioienW { _GpioienW { w: self } } #[doc = "Bit 7 - IO port H clock enable"] #[inline(always)] pub fn gpiohen(&mut self) -> _GpiohenW { _GpiohenW { w: self } } #[doc = "Bit 6 - IO port G clock enable"] #[inline(always)] pub fn gpiogen(&mut self) -> _GpiogenW { _GpiogenW { w: self } } #[doc = "Bit 5 - IO port F clock enable"] #[inline(always)] pub fn gpiofen(&mut self) -> _GpiofenW { _GpiofenW { w: self } } #[doc = "Bit 4 - IO port E clock enable"] #[inline(always)] pub fn gpioeen(&mut self) -> _GpioeenW { _GpioeenW { w: self } } #[doc = "Bit 3 - IO port D clock enable"] #[inline(always)] pub fn gpioden(&mut self) -> _GpiodenW { _GpiodenW { w: self } } #[doc = "Bit 2 - IO port C clock enable"] #[inline(always)] pub fn gpiocen(&mut self) -> _GpiocenW { _GpiocenW { w: self } } #[doc = "Bit 1 - IO port B clock enable"] #[inline(always)] pub fn gpioben(&mut self) -> _GpiobenW { _GpiobenW { w: self } } #[doc = "Bit 0 - IO port A clock enable"] #[inline(always)] pub fn gpioaen(&mut self) -> _GpioaenW { _GpioaenW { w: self } } }
29.141494
65
0.533088
916ed1009955987268df7dbbfcb2c58fce2e4595
15,071
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::FUNC_SEL_P7 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `pin0`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN0R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN0R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN0R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN0R { match value { i => PIN0R::_Reserved(i), } } } #[doc = "Possible values of the field `pin1`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN1R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN1R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN1R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN1R { match value { i => PIN1R::_Reserved(i), } } } #[doc = "Possible values of the field `pin2`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN2R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN2R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN2R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN2R { match value { i => PIN2R::_Reserved(i), } } } #[doc = "Possible values of the field `pin3`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN3R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN3R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN3R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN3R { match value { i => PIN3R::_Reserved(i), } } } #[doc = "Possible values of the field `pin4`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN4R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN4R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN4R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN4R { match value { i => PIN4R::_Reserved(i), } } } #[doc = "Possible values of the field `pin5`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN5R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN5R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN5R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN5R { match value { i => PIN5R::_Reserved(i), } } } #[doc = "Possible values of the field `pin6`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN6R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN6R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN6R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN6R { match value { i => PIN6R::_Reserved(i), } } } #[doc = "Possible values of the field `pin7`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PIN7R { #[doc = r" Reserved"] _Reserved(u8), } impl PIN7R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { PIN7R::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> PIN7R { match value { i => PIN7R::_Reserved(i), } } } #[doc = "Values that can be written to the field `pin0`"] pub enum PIN0W {} impl PIN0W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN0W<'a> { w: &'a mut W, } impl<'a> _PIN0W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN0W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin1`"] pub enum PIN1W {} impl PIN1W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN1W<'a> { w: &'a mut W, } impl<'a> _PIN1W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN1W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 4; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin2`"] pub enum PIN2W {} impl PIN2W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN2W<'a> { w: &'a mut W, } impl<'a> _PIN2W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN2W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin3`"] pub enum PIN3W {} impl PIN3W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN3W<'a> { w: &'a mut W, } impl<'a> _PIN3W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN3W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 12; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin4`"] pub enum PIN4W {} impl PIN4W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN4W<'a> { w: &'a mut W, } impl<'a> _PIN4W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN4W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 16; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin5`"] pub enum PIN5W {} impl PIN5W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN5W<'a> { w: &'a mut W, } impl<'a> _PIN5W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN5W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 20; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin6`"] pub enum PIN6W {} impl PIN6W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN6W<'a> { w: &'a mut W, } impl<'a> _PIN6W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN6W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 24; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `pin7`"] pub enum PIN7W {} impl PIN7W { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self {} } } #[doc = r" Proxy"] pub struct _PIN7W<'a> { w: &'a mut W, } impl<'a> _PIN7W<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: PIN7W) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 15; const OFFSET: u8 = 28; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:3 - P7.0 Output Function Select"] #[inline] pub fn pin0(&self) -> PIN0R { PIN0R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 4:7 - P7.1 Output Function Select"] #[inline] pub fn pin1(&self) -> PIN1R { PIN1R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 8:11 - P7.2 Output Function Select"] #[inline] pub fn pin2(&self) -> PIN2R { PIN2R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 12:15 - P7.3 Output Function Select"] #[inline] pub fn pin3(&self) -> PIN3R { PIN3R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 12; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 16:19 - P7.4 Output Function Select"] #[inline] pub fn pin4(&self) -> PIN4R { PIN4R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 20:23 - P7.5 Output Function Select"] #[inline] pub fn pin5(&self) -> PIN5R { PIN5R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 24:27 - P7.6 Output Function Select"] #[inline] pub fn pin6(&self) -> PIN6R { PIN6R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bits 28:31 - P7.7 Output Function Select"] #[inline] pub fn pin7(&self) -> PIN7R { PIN7R::_from({ const MASK: u8 = 15; const OFFSET: u8 = 28; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:3 - P7.0 Output Function Select"] #[inline] pub fn pin0(&mut self) -> _PIN0W { _PIN0W { w: self } } #[doc = "Bits 4:7 - P7.1 Output Function Select"] #[inline] pub fn pin1(&mut self) -> _PIN1W { _PIN1W { w: self } } #[doc = "Bits 8:11 - P7.2 Output Function Select"] #[inline] pub fn pin2(&mut self) -> _PIN2W { _PIN2W { w: self } } #[doc = "Bits 12:15 - P7.3 Output Function Select"] #[inline] pub fn pin3(&mut self) -> _PIN3W { _PIN3W { w: self } } #[doc = "Bits 16:19 - P7.4 Output Function Select"] #[inline] pub fn pin4(&mut self) -> _PIN4W { _PIN4W { w: self } } #[doc = "Bits 20:23 - P7.5 Output Function Select"] #[inline] pub fn pin5(&mut self) -> _PIN5W { _PIN5W { w: self } } #[doc = "Bits 24:27 - P7.6 Output Function Select"] #[inline] pub fn pin6(&mut self) -> _PIN6W { _PIN6W { w: self } } #[doc = "Bits 28:31 - P7.7 Output Function Select"] #[inline] pub fn pin7(&mut self) -> _PIN7W { _PIN7W { w: self } } }
25.076539
59
0.505607
1a6d13be123c4ea7315085228aeaece1ffaa21a2
2,031
// Copyright 2019 Twitter, Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use datastructures::Counter; use libc::uint64_t; /// Create a new `Counter` #[no_mangle] pub extern "C" fn counter_new() -> *mut Counter<u64> { Box::into_raw(Box::new(Counter::<u64>::default())) } /// Clear the count stored in the `Counter` #[no_mangle] pub unsafe extern "C" fn counter_reset(ptr: *mut Counter<u64>) { let counter = { assert!(!ptr.is_null()); &mut *ptr }; counter.reset(); } /// Get the count stored in the `Counter` #[no_mangle] pub unsafe extern "C" fn counter_count(ptr: *mut Counter<u64>) -> uint64_t { let counter = { assert!(!ptr.is_null()); &mut *ptr }; counter.get() } /// Decrement the value of the `Counter` by count #[no_mangle] pub unsafe extern "C" fn counter_decr(ptr: *mut Counter<u64>, count: uint64_t) { let counter = { assert!(!ptr.is_null()); &mut *ptr }; counter.decrement(count); } /// Free the `Counter` #[no_mangle] pub unsafe extern "C" fn counter_free(ptr: *mut Counter<u64>) { if ptr.is_null() { return; } Box::from_raw(ptr); } /// Increment the value of the `Counter` by count #[no_mangle] pub unsafe extern "C" fn counter_incr(ptr: *mut Counter<u64>, count: uint64_t) { let counter = { assert!(!ptr.is_null()); &mut *ptr }; counter.increment(count); } #[allow(dead_code)] pub extern "C" fn fix_linking_when_not_using_stdlib() { panic!() }
26.376623
80
0.649434
229929483b0545caeade7cac53661bcd471f069a
255
// resources/settings/camera.ron use crate::components::prelude::Size; #[derive(Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct CameraSettings { pub z: f32, pub size: Size, pub follow_offset: (f32, f32), }
21.25
37
0.643137
7299c2e3f5964fe2ee701773a8b8f286def8e0d6
18,002
use crate::actions::{self, Action, ActionsCtx}; use crate::db::{Database, QueryUtils}; use crate::experiments::{CapLints, CrateSelect, Experiment, GitHubIssue, Mode, Status}; use crate::prelude::*; use crate::server::github::{GitHub, Issue, Repository}; use crate::server::messages::{Label, Message}; use crate::server::routes::webhooks::args::{ AbortArgs, CheckArgs, EditArgs, RetryArgs, RetryReportArgs, RunArgs, }; use crate::server::{Data, GithubData}; use crate::toolchain::Toolchain; use rustwide::Toolchain as RustwideToolchain; pub fn ping(data: &Data, github_data: &GithubData, issue: &Issue) -> Fallible<()> { Message::new() .line("ping_pong", "**Pong!**") .send(&issue.url, data, github_data)?; Ok(()) } pub fn check( host: &str, data: &Data, github_data: &GithubData, repo: &Repository, issue: &Issue, args: CheckArgs, ) -> Fallible<()> { run( host, data, github_data, repo, issue, RunArgs { mode: Some(Mode::CheckOnly), name: args.name, start: args.start, end: args.end, crates: args.crates, cap_lints: args.cap_lints, priority: args.priority, ignore_blacklist: args.ignore_blacklist, assign: args.assign, requirement: args.requirement, }, ) } pub fn run( host: &str, data: &Data, github_data: &GithubData, repo: &Repository, issue: &Issue, args: RunArgs, ) -> Fallible<()> { let name = setup_run_name(&data.db, issue, args.name)?; let mut message = Message::new().line( "ok_hand", format!("Experiment **`{}`** created and queued.", name), ); // Autodetect toolchains only if none of them was specified let (mut detected_start, mut detected_end) = (None, None); if args.start.is_none() && args.end.is_none() { if let Some(build) = crate::server::try_builds::get_sha(&data.db, &repo.full_name, issue.number)? { detected_start = Some(Toolchain { source: RustwideToolchain::ci(&build.base_sha, false), rustflags: None, ci_try: false, patches: Vec::new(), }); detected_end = Some(Toolchain { source: RustwideToolchain::ci(&build.merge_sha, false), rustflags: None, ci_try: true, patches: Vec::new(), }); message = message.line( "robot", format!("Automatically detected try build {}", build.merge_sha), ); let pr_head = github_data .api .get_pr_head_sha(&repo.full_name, issue.number)?; let mut merge_commit = github_data .api .get_commit(&repo.full_name, &build.merge_sha)?; if merge_commit.parents.len() == 2 { // The first parent is the rust-lang/rust commit, and the second // parent (index 1) is the PR commit let old_pr_head = merge_commit.parents.remove(1).sha; if pr_head != old_pr_head { message = message.line( "warning", format!( "Try build based on commit {}, but latest commit is {}. Did you forget to make a new try build?", old_pr_head, pr_head ), ); } } else { message = message.line( "warning", format!("Unexpected parents for merge commit {}", build.merge_sha), ); } } } // Make crater runs created via webhook require linux by default. let requirement = args.requirement.unwrap_or_else(|| "linux".to_string()); let crates = args .crates .map(|c| c.resolve()) .transpose() .map_err(|e| e.context("Failed to resolve crate list"))?; actions::CreateExperiment { name: name.clone(), toolchains: [ args.start .or(detected_start) .ok_or_else(|| err_msg("missing start toolchain"))?, args.end .or(detected_end) .ok_or_else(|| err_msg("missing end toolchain"))?, ], mode: args.mode.unwrap_or(Mode::BuildAndTest), crates: crates.unwrap_or(CrateSelect::Full), cap_lints: args.cap_lints.unwrap_or(CapLints::Forbid), priority: args.priority.unwrap_or(0), github_issue: Some(GitHubIssue { api_url: issue.url.clone(), html_url: issue.html_url.clone(), number: issue.number, }), ignore_blacklist: args.ignore_blacklist.unwrap_or(false), assign: args.assign, requirement: Some(requirement), } .apply(&ActionsCtx::new(&data.db, &data.config))?; message .line( "mag", format!( "You can check out [the queue](https://{}) and [this experiment's details](https://{0}/ex/{1}).", host, name ), ).set_label(Label::ExperimentQueued) .send(&issue.url, data,github_data)?; Ok(()) } pub fn edit(data: &Data, github_data: &GithubData, issue: &Issue, args: EditArgs) -> Fallible<()> { let name = get_name(&data.db, issue, args.name)?; let crates = args .crates .map(|c| c.resolve()) .transpose() .map_err(|e| e.context("Failed to resolve crate list"))?; actions::EditExperiment { name: name.clone(), toolchains: [args.start, args.end], crates, mode: args.mode, cap_lints: args.cap_lints, priority: args.priority, ignore_blacklist: args.ignore_blacklist, assign: args.assign, requirement: args.requirement, } .apply(&ActionsCtx::new(&data.db, &data.config))?; Message::new() .line( "memo", format!("Configuration of the **`{}`** experiment changed.", name), ) .send(&issue.url, data, github_data)?; Ok(()) } pub fn retry_report( data: &Data, github_data: &GithubData, issue: &Issue, args: RetryReportArgs, ) -> Fallible<()> { let name = get_name(&data.db, issue, args.name)?; if let Some(mut experiment) = Experiment::get(&data.db, &name)? { if experiment.status != Status::ReportFailed && experiment.status != Status::GeneratingReport { bail!( "generation of the report of the **`{}`** experiment didn't fail!", name ); } experiment.set_status(&data.db, Status::NeedsReport)?; data.reports_worker.wake(); Message::new() .line( "hammer_and_wrench", format!("Generation of the report for **`{}`** queued again.", name), ) .set_label(Label::ExperimentQueued) .send(&issue.url, data, github_data)?; Ok(()) } else { bail!("an experiment named **`{}`** doesn't exist!", name); } } pub fn retry( data: &Data, github_data: &GithubData, issue: &Issue, args: RetryArgs, ) -> Fallible<()> { let name = get_name(&data.db, issue, args.name)?; if let Some(mut experiment) = Experiment::get(&data.db, &name)? { if experiment.status != Status::Failed { bail!("Experiment **`{}`** didn't fail!", name); } experiment.set_status(&data.db, Status::Queued)?; data.reports_worker.wake(); Message::new() .line( "hammer_and_wrench", format!("Experiment **`{}`** queued again.", name), ) .set_label(Label::ExperimentQueued) .send(&issue.url, data, github_data)?; Ok(()) } else { bail!("an experiment named **`{}`** doesn't exist!", name); } } pub fn abort( data: &Data, github_data: &GithubData, issue: &Issue, args: AbortArgs, ) -> Fallible<()> { let name = get_name(&data.db, issue, args.name)?; actions::DeleteExperiment { name: name.clone() } .apply(&ActionsCtx::new(&data.db, &data.config))?; Message::new() .line("wastebasket", format!("Experiment **`{}`** deleted!", name)) .set_label(Label::ExperimentCompleted) .send(&issue.url, data, github_data)?; Ok(()) } pub fn reload_acl(data: &Data, github_data: &GithubData, issue: &Issue) -> Fallible<()> { data.acl.refresh_cache(&github_data.api)?; Message::new() .line("hammer_and_wrench", "List of authorized users reloaded!") .send(&issue.url, data, github_data)?; Ok(()) } fn get_name(db: &Database, issue: &Issue, name: Option<String>) -> Fallible<String> { if let Some(name) = name { store_experiment_name(db, issue, &name)?; Ok(name) } else if let Some(default) = default_experiment_name(db, issue)? { Ok(default) } else { bail!("missing experiment name"); } } fn store_experiment_name(db: &Database, issue: &Issue, name: &str) -> Fallible<()> { // Store the provided experiment name to provide it automatically on next command // We don't have to worry about conflicts here since the table is defined with // ON CONFLICT IGNORE. db.execute( "INSERT INTO saved_names (issue, experiment) VALUES (?1, ?2);", &[&issue.number, &name], )?; Ok(()) } fn default_experiment_name(db: &Database, issue: &Issue) -> Fallible<Option<String>> { let name = db.get_row( "SELECT experiment FROM saved_names WHERE issue = ?1", &[&issue.number], |r| r.get(0), )?; Ok(if let Some(name) = name { Some(name) } else if issue.pull_request.is_some() { Some(format!("pr-{}", issue.number)) } else { None }) } /// Set up the name for a new run's experiment, including auto-incrementing generated names and /// storing experiment names in the database. fn setup_run_name(db: &Database, issue: &Issue, name: Option<String>) -> Fallible<String> { let name = if let Some(name) = name { name } else { generate_new_experiment_name(db, issue)? }; store_experiment_name(db, issue, &name)?; Ok(name) } /// Automatically generate experiment name, auto-incrementing to the first one which does not /// exist. E.g. if this function is passed the an issue `12345`, and experiment `pr-12345` /// exists, then this command returns Ok("pr-12345-1"). Does not store the result in the database. fn generate_new_experiment_name(db: &Database, issue: &Issue) -> Fallible<String> { let mut name = format!("pr-{}", issue.number); let mut idx = 1u16; while Experiment::exists(db, &name)? { name = format!("pr-{}-{}", issue.number, idx); idx = idx .checked_add(1) .ok_or_else(|| err_msg("too many similarly-named pull requests"))?; } Ok(name) } #[cfg(test)] mod tests { use super::{ default_experiment_name, generate_new_experiment_name, get_name, setup_run_name, store_experiment_name, }; use crate::actions::{self, Action, ActionsCtx}; use crate::config::Config; use crate::db::Database; use crate::prelude::*; use crate::server::github; /// Simulate to the `run` command, and return experiment name fn dummy_run(db: &Database, issue: &github::Issue, name: Option<String>) -> Fallible<String> { let config = Config::default(); let ctx = ActionsCtx::new(db, &config); let name = setup_run_name(db, issue, name)?; actions::CreateExperiment::dummy(&name).apply(&ctx)?; Ok(name) } /// Simulate to the `edit` command, and return experiment name fn dummy_edit(db: &Database, issue: &github::Issue, name: Option<String>) -> Fallible<String> { let config = Config::default(); let ctx = ActionsCtx::new(db, &config); let name = get_name(db, issue, name)?; actions::EditExperiment::dummy(&name).apply(&ctx)?; Ok(name) } #[test] fn test_default_experiment_name() { let db = Database::temp().unwrap(); // With simple issues no default should be used let issue = github::Issue { number: 1, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: None, }; assert!(default_experiment_name(&db, &issue).unwrap().is_none()); // With pull requests pr-{number} should be used let pr = github::Issue { number: 2, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: Some(github::PullRequest { html_url: String::new(), }), }; assert_eq!( default_experiment_name(&db, &pr).unwrap().unwrap().as_str(), "pr-2" ); // With a saved experiment name that name should be returned store_experiment_name(&db, &pr, "foo").unwrap(); assert_eq!( default_experiment_name(&db, &pr).unwrap().unwrap().as_str(), "foo" ); } #[test] fn test_run() { let db = Database::temp().unwrap(); let pr1 = github::Issue { number: 1, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: Some(github::PullRequest { html_url: String::new(), }), }; // test with supplied name assert_eq!( dummy_run(&db, &pr1, Some("pr-1".to_owned())).expect("dummy run failed"), "pr-1" ); // make sure it fails the second time assert!(dummy_run(&db, &pr1, Some("pr-1".to_owned())).is_err(),); let pr2 = github::Issue { number: 2, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: Some(github::PullRequest { html_url: String::new(), }), }; // test with default-generated name assert_eq!( dummy_run(&db, &pr2, None).expect("dummy run failed"), "pr-2" ); // make sure it increments correctly assert_eq!( dummy_run(&db, &pr2, None).expect("dummy run failed"), "pr-2-1" ); // make sure we don't get e.g. pr-2-1-1 assert_eq!( dummy_run(&db, &pr2, None).expect("dummy run failed"), "pr-2-2" ); // make sure we can manually supply name and then continue incrementing assert_eq!( dummy_run(&db, &pr1, Some("pr-2-custom".to_owned())).expect("dummy run failed"), "pr-2-custom" ); assert_eq!( dummy_run(&db, &pr2, None).expect("dummy run failed"), "pr-2-3" ); } #[test] fn test_edit() { let db = Database::temp().unwrap(); // test retrieval of name generated in a supplied-name run let pr1 = github::Issue { number: 1, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: Some(github::PullRequest { html_url: String::new(), }), }; assert_eq!( dummy_run(&db, &pr1, Some("pr-1-custom".to_owned())).expect("dummy run failed"), "pr-1-custom" ); assert_eq!( dummy_edit(&db, &pr1, None).expect("dummy edit failed"), "pr-1-custom" ); // test retrieval of name generated in an auto-generated run let pr2 = github::Issue { number: 2, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: Some(github::PullRequest { html_url: String::new(), }), }; assert_eq!( dummy_run(&db, &pr2, None).expect("dummy run failed"), "pr-2" ); // make sure edit doesn't change name assert_eq!( dummy_edit(&db, &pr2, None).expect("dummy edit failed"), "pr-2" ); // test idempotence assert_eq!( dummy_edit(&db, &pr2, None).expect("dummy edit failed"), "pr-2" ); // test that name incrementing is reflected here assert_eq!( dummy_run(&db, &pr2, None).expect("dummy run failed"), "pr-2-1" ); assert_eq!( dummy_edit(&db, &pr2, None).expect("dummy edit failed"), "pr-2-1" ); } #[test] fn test_generate_new_experiment_name() { let db = Database::temp().unwrap(); let config = Config::default(); let ctx = ActionsCtx::new(&db, &config); let pr = github::Issue { number: 12345, url: String::new(), html_url: String::new(), labels: Vec::new(), pull_request: Some(github::PullRequest { html_url: String::new(), }), }; actions::CreateExperiment::dummy("pr-12345") .apply(&ctx) .expect("could not store dummy experiment"); let new_name = generate_new_experiment_name(&db, &pr).unwrap(); assert_eq!(new_name, "pr-12345-1"); actions::CreateExperiment::dummy("pr-12345-1") .apply(&ctx) .expect("could not store dummy experiment"); assert_eq!( &generate_new_experiment_name(&db, &pr).unwrap(), "pr-12345-2" ); } }
31.975133
125
0.537996
163e4da7275f5a24ea60aa9f1ba7674a06a432ea
7,705
use crate::hash_path::path::Component; use crate::hash_path::path::Path; use crate::prelude::*; use holochain_wasmer_guest::*; /// This is the root of the [ `Path` ] tree. /// /// Forms the entry point to all anchors so that agents can navigate down the tree from here. /// /// The string "hdkanchor". pub const ROOT: &str = "hdkanchor"; #[derive(PartialEq, SerializedBytes, serde::Serialize, serde::Deserialize, Debug, Clone)] /// An anchor can only be 1 or 2 levels deep as "type" and "text". /// /// The second level is optional and the Strings use the standard [ `TryInto` ] for path [ `Component` ] internally. /// /// __Anchors are required to be included in an application's [ `entry_defs` ]__ callback and so implement all the standard methods. /// Technically the [ `Anchor` ] entry definition is the [ `Path` ] definition. /// /// e.g. `entry_defs![Anchor::entry_def()]` /// /// The methods implemented on anchor follow the patterns that predate the Path module but `Path::from(&anchor)` is always possible to use the newer APIs. pub struct Anchor { pub anchor_type: String, pub anchor_text: Option<String>, } // Provide all the default entry conventions for anchors. entry_def!(Anchor Path::entry_def()); /// Anchors are just a special case of path, so we can move from anchor to path losslessly. /// We simply format the anchor structure into a string that works with the path string handling. impl From<&Anchor> for Path { fn from(anchor: &Anchor) -> Self { Self::from(&format!( "{1}{0}{2}{0}{3}", crate::hash_path::path::DELIMITER, ROOT, anchor.anchor_type, anchor.anchor_text.as_ref().unwrap_or(&String::default()) )) } } /// Paths are more general than anchors so a path could be represented that is not a valid anchor. /// The obvious example would be a path of binary data that is not valid utf-8 strings or a path /// that is more than 2 levels deep. impl TryFrom<&Path> for Anchor { type Error = SerializedBytesError; fn try_from(path: &Path) -> Result<Self, Self::Error> { let components: Vec<Component> = path.as_ref().to_owned(); if components.len() == 2 || components.len() == 3 { if components[0] == Component::from(ROOT) { Ok(Anchor { anchor_type: (&components[1]).try_into()?, anchor_text: { match components.get(2) { Some(component) => Some(component.try_into()?), None => None, } }, }) } else { Err(SerializedBytesError::Deserialize(format!( "Bad anchor path root {:0?} should be {:1?}", components[0].as_ref(), ROOT.as_bytes(), ))) } } else { Err(SerializedBytesError::Deserialize(format!( "Bad anchor path length {}", components.len() ))) } } } /// Simple string interface to simple string based paths. /// a.k.a "the anchor pattern" that predates paths by a few years. pub fn anchor(anchor_type: String, anchor_text: String) -> ExternResult<holo_hash::EntryHash> { let path: Path = (&Anchor { anchor_type, anchor_text: Some(anchor_text), }) .into(); path.ensure()?; path.hash() } /// Attempt to get an anchor by its hash. /// Returns None if the hash doesn't point to an anchor. /// We can't do anything fancy like ensure the anchor if not exists because we only have a hash. pub fn get_anchor(anchor_address: EntryHash) -> ExternResult<Option<Anchor>> { Ok( match crate::prelude::get(anchor_address, GetOptions::content())?.and_then(|el| el.into()) { Some(Entry::App(eb)) => { let path = Path::try_from(SerializedBytes::from(eb))?; Some(Anchor::try_from(&path)?) } _ => None, }, ) } /// Returns every entry hash in a vector from the root of an anchor. /// Hashes are sorted in the same way that paths sort children. pub fn list_anchor_type_addresses() -> ExternResult<Vec<EntryHash>> { let links = Path::from(ROOT) .children()? .into_iter() .map(|link| link.target) .collect(); Ok(links) } /// Returns every entry hash in a vector from the second level of an anchor. /// Uses the string argument to build the path from the root. /// Hashes are sorted in the same way that paths sort children. pub fn list_anchor_addresses(anchor_type: String) -> ExternResult<Vec<EntryHash>> { let path: Path = (&Anchor { anchor_type, anchor_text: None, }) .into(); path.ensure()?; let links = path .children()? .into_iter() .map(|link| link.target) .collect(); Ok(links) } /// Old version of holochain that anchors was designed for had two part link tags but now link /// tags are a single array of bytes, so to get an external interface that is somewhat backwards /// compatible we need to rebuild the anchors from the paths serialized into the links and then /// return them. pub fn list_anchor_tags(anchor_type: String) -> ExternResult<Vec<String>> { let path: Path = (&Anchor { anchor_type, anchor_text: None, }) .into(); path.ensure()?; let hopefully_anchor_tags: Result<Vec<String>, SerializedBytesError> = path .children()? .into_iter() .map(|link| match Path::try_from(&link.tag) { Ok(path) => match Anchor::try_from(&path) { Ok(anchor) => match anchor.anchor_text { Some(text) => Ok(text), None => Err(SerializedBytesError::Deserialize( "missing anchor text".into(), )), }, Err(e) => Err(e), }, Err(e) => Err(e), }) .collect(); let mut anchor_tags = hopefully_anchor_tags?; anchor_tags.sort(); anchor_tags.dedup(); Ok(anchor_tags) } #[cfg(test)] #[test] fn hash_path_root() { assert_eq!(ROOT, "hdkanchor"); } #[cfg(test)] #[test] fn hash_path_anchor_path() { for (atype, text, path_string) in vec![ ("foo", None, "hdkanchor.foo"), ("foo", Some("bar".to_string()), "hdkanchor.foo.bar"), ] { assert_eq!( Path::from(path_string), (&Anchor { anchor_type: atype.to_string(), anchor_text: text, }) .into(), ); } } #[cfg(test)] #[test] fn hash_path_anchor_entry_def() { assert_eq!(Path::entry_def_id(), Anchor::entry_def_id(),); assert_eq!(Path::crdt_type(), Anchor::crdt_type(),); assert_eq!(Path::required_validations(), Anchor::required_validations(),); assert_eq!(Path::entry_visibility(), Anchor::entry_visibility(),); assert_eq!(Path::entry_def(), Anchor::entry_def(),); } #[cfg(test)] #[test] fn hash_path_anchor_from_path() { let path = Path::from(vec![ Component::from(vec![ 104, 0, 0, 0, 100, 0, 0, 0, 107, 0, 0, 0, 97, 0, 0, 0, 110, 0, 0, 0, 99, 0, 0, 0, 104, 0, 0, 0, 111, 0, 0, 0, 114, 0, 0, 0, ]), Component::from(vec![102, 0, 0, 0, 111, 0, 0, 0, 111, 0, 0, 0]), Component::from(vec![98, 0, 0, 0, 97, 0, 0, 0, 114, 0, 0, 0]), ]); assert_eq!( Anchor::try_from(&path).unwrap(), Anchor { anchor_type: "foo".into(), anchor_text: Some("bar".into()), }, ); }
33.942731
154
0.578715
d541c9d46d9c077b026c02a8e3e119a475447c4e
358
use std::io; use crate::buffer::Buffer; #[derive(Debug)] pub enum State { Handshake, Play, Status, Login, } pub trait Packet: Sized { fn deserialize(buffer: &mut Buffer, state: &State) -> io::Result<Self>; fn serialize(&self, buffer: &mut Buffer) -> io::Result<()>; fn get_id(&self) -> i32; fn get_state(&self) -> State; }
18.842105
75
0.606145
69468fc9b7901679212b1e35b82054513a0cd319
2,375
//! Requests and Responses, communicated between `tab-cli` and `tab-daemon`. use crate::chunk::OutputChunk; use crate::{ chunk::InputChunk, tab::{CreateTabMetadata, TabId, TabMetadata}, }; use serde::{Deserialize, Serialize}; use std::collections::HashMap; /// A request, sent from a CLI connection to the daemon process. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum Request { /// Subscribes to stdout/stderr on the given tab /// The WebSocket will produce a series of Chunk messages, /// The messages will have incrementing (but not sequential) indices. /// The messages may begin with data from the scrollback buffer Subscribe(TabId), /// Deactivates the subscription for the given tab. Unsubscribe(TabId), /// Sends the stdin data to the given tab Input(TabId, InputChunk), /// Terminates the shell on the given tab CreateTab(CreateTabMetadata), /// Resizes the given tab, to the provided (cols, rows) ResizeTab(TabId, (u16, u16)), /// Re-tasks clients with the tabid selected to the given tab Retask(TabId, TabId), /// Terminates the shell on the given tab CloseTab(TabId), /// Disconnects any sessions for the given tab DisconnectTab(TabId), /// Shuts down all tab processes, including the daemon and all ptys GlobalShutdown, } /// A response, sent from the daemon process to a connected CLI #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum Response { /// An initial 'hello' message with introductory state, including a full list of running tabs. Init(InitResponse), /// A raw output chunk, identified by a `TabId` and an index. Output(TabId, OutputChunk), /// A notification that metadata about a running tab has changed. TabUpdate(TabMetadata), /// A notification that the client is being re-tasks, and will now be serving the user on another tab. Retask(TabId), /// A notification that the active tab has been terminated TabTerminated(TabId), /// A notification that the client should disconnect Disconnect, } /// An initialization message sent to CLI connections. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct InitResponse { /// A complete set of active tabs, identified by TabId values. pub tabs: HashMap<TabId, TabMetadata>, }
35.447761
106
0.706526
1d4241595eae3ef0af288c7f7853ea1310ac881b
2,402
use amethyst::{ core::{ArcThreadPool, SystemBundle}, ecs::prelude::{Dispatcher, DispatcherBuilder, System, World}, DataInit, Error, Result, }; pub struct CustomGameData<'a, 'b> { pub base: Dispatcher<'a, 'b>, pub running: Dispatcher<'a, 'b>, } impl<'a, 'b> CustomGameData<'a, 'b> { /// Update game data pub fn update(&mut self, world: &World, running: bool) { if running { self.running.dispatch(&world.res); } self.base.dispatch(&world.res); } } pub struct CustomGameDataBuilder<'a, 'b> { pub base: DispatcherBuilder<'a, 'b>, pub running: DispatcherBuilder<'a, 'b>, } impl<'a, 'b> Default for CustomGameDataBuilder<'a, 'b> { fn default() -> Self { CustomGameDataBuilder::new() } } impl<'a, 'b> CustomGameDataBuilder<'a, 'b> { pub fn new() -> Self { CustomGameDataBuilder { base: DispatcherBuilder::new(), running: DispatcherBuilder::new(), } } pub fn with_base<S>(mut self, system: S, name: &str, dependencies: &[&str]) -> Self where for<'c> S: System<'c> + Send + 'a, { self.base.add(system, name, dependencies); self } pub fn with_base_bundle<B>(mut self, bundle: B) -> Result<Self> where B: SystemBundle<'a, 'b>, { bundle .build(&mut self.base) .map_err(|err| Error::Core(err))?; Ok(self) } pub fn with_running<S>(mut self, system: S, name: &str, dependencies: &[&str]) -> Self where for<'c> S: System<'c> + Send + 'a, { self.running.add(system, name, dependencies); self } } impl<'a, 'b> DataInit<CustomGameData<'a, 'b>> for CustomGameDataBuilder<'a, 'b> { fn build(self, world: &mut World) -> CustomGameData<'a, 'b> { #[cfg(not(no_threading))] let pool = world.read_resource::<ArcThreadPool>().clone(); #[cfg(not(no_threading))] let mut base = self.base.with_pool(pool.clone()).build(); #[cfg(no_threading)] let mut base = self.base.build(); base.setup(&mut world.res); #[cfg(not(no_threading))] let mut running = self.running.with_pool(pool.clone()).build(); #[cfg(no_threading)] let mut running = self.running.build(); running.setup(&mut world.res); CustomGameData { base, running } } }
27.295455
90
0.568276
799ae131cd26b071a04bfe0e7fa9ab43268d3a62
2,272
/* Hello, Rust! This file aims to explore the Rust programming language. Just to play around, of course, and have a simple compiled list of the syntax of Rust! */ /* The main() function is the main entry point of every Rust program; this shouldn't be too surprising since many programming languages do this too. */ fn main() { example_function(); hello_world(); constants_and_variables(); } // =================== // FUNCTIONS // =================== /* Declaring a function is as simple as using the `fn` keyword followed by the name of the function. Interestingly, the compiler warns you if your function name is non-snake case. I assume that the de facto standard of Rust function names is the snake case convention! */ fn example_function() { // Enter code here } fn hello_world() { /* println!() is the equivalent of outputting content. Since you're familiar with Python, it's the equivalent of Python's print() command! */ println!("Hello, world!"); } fn constants_and_variables() { /* In Rust, both constants and variables are defined with the `let` keyword. A little similar to Swift, but different later on. Alternatively, the `const` keyword can be used to define a constant. However, the type of the constant must be explicitly mentioned and the constant is strictly immutable. */ let language_name = "Rust"; const LANGUAGE_MASCOT: &str = "Ferris"; /* language_name = ""; <- ⚠ error languge_mascot = "Gopher"; <- ⚠ error Rust will raise an error when you attempt to change the value of a constant. Constants are immutable, and hence their values cannot be changed. */ println!("language_name: {}", language_name); println!("language_mascot: {}", LANGUAGE_MASCOT); /* On the other hand, variables are defined with the `let mut` keywords. As probably guessable, the `mut` here stands for mutable; the value of mutable variables can change. */ let mut greeting = "Good morning!"; println!("greeting (at declaration): {}", greeting); greeting = "Good afternoon!"; println!("greeting (after reassignment): {}", greeting); }
31.555556
97
0.651849
de0f337ea08f8895b653912ddb3ccae8635472a5
4,942
extern crate alloc; use alloc::string::String; use log::*; use esp_idf_hal::mutex; use esp_idf_sys::*; use crate::private::cstr::CString; const SNTP_SERVER_NUM: usize = SNTP_MAX_SERVERS as usize; #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "std", derive(Hash))] #[cfg_attr(feature = "use_serde", derive(Serialize, Deserialize))] pub enum OperatingMode { Poll, ListenOnly, } impl From<u8_t> for OperatingMode { fn from(from: u8_t) -> Self { match from as u32 { SNTP_OPMODE_POLL => OperatingMode::Poll, SNTP_OPMODE_LISTENONLY => OperatingMode::ListenOnly, _ => unreachable!(), } } } impl From<OperatingMode> for u8_t { fn from(from: OperatingMode) -> Self { match from { OperatingMode::Poll => SNTP_OPMODE_POLL as u8_t, OperatingMode::ListenOnly => SNTP_OPMODE_LISTENONLY as u8_t, } } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "std", derive(Hash))] #[cfg_attr(feature = "use_serde", derive(Serialize, Deserialize))] pub enum SyncMode { Smooth, Immediate, } impl From<sntp_sync_mode_t> for SyncMode { #[allow(non_upper_case_globals)] fn from(from: sntp_sync_mode_t) -> Self { match from { sntp_sync_mode_t_SNTP_SYNC_MODE_SMOOTH => SyncMode::Smooth, sntp_sync_mode_t_SNTP_SYNC_MODE_IMMED => SyncMode::Immediate, _ => unreachable!(), } } } impl From<SyncMode> for sntp_sync_mode_t { fn from(from: SyncMode) -> Self { match from { SyncMode::Smooth => sntp_sync_mode_t_SNTP_SYNC_MODE_SMOOTH, SyncMode::Immediate => sntp_sync_mode_t_SNTP_SYNC_MODE_IMMED, } } } #[derive(Copy, Clone, Debug, PartialEq)] #[cfg_attr(feature = "std", derive(Hash))] #[cfg_attr(feature = "use_serde", derive(Serialize, Deserialize))] pub enum SyncStatus { Reset, Completed, InProgress, } impl From<sntp_sync_status_t> for SyncStatus { #[allow(non_upper_case_globals)] fn from(from: sntp_sync_status_t) -> Self { match from { sntp_sync_status_t_SNTP_SYNC_STATUS_RESET => SyncStatus::Reset, sntp_sync_status_t_SNTP_SYNC_STATUS_COMPLETED => SyncStatus::Completed, sntp_sync_status_t_SNTP_SYNC_STATUS_IN_PROGRESS => SyncStatus::InProgress, _ => unreachable!(), } } } pub struct SntpConf { pub servers: [String; SNTP_SERVER_NUM], pub operating_mode: OperatingMode, pub sync_mode: SyncMode, } impl Default for SntpConf { fn default() -> Self { let mut servers: [String; SNTP_SERVER_NUM] = Default::default(); // Only 0-3 are valid ntp pool domain names for (i, item) in servers.iter_mut().enumerate().take(SNTP_SERVER_NUM.min(4)) { *item = format!("{}.pool.ntp.org", i); } Self { servers, operating_mode: OperatingMode::Poll, sync_mode: SyncMode::Immediate, } } } static TAKEN: mutex::Mutex<bool> = mutex::Mutex::new(false); pub struct EspSntp { // Needs to be kept around because the C bindings only have a pointer. _sntp_servers: [CString; SNTP_SERVER_NUM], } impl EspSntp { pub fn new_default() -> Result<Self, EspError> { Self::new(&Default::default()) } pub fn new(conf: &SntpConf) -> Result<Self, EspError> { let mut taken = TAKEN.lock(); if *taken { esp!(ESP_ERR_INVALID_STATE as i32)?; } let sntp = Self::init(conf)?; *taken = true; Ok(sntp) } fn init(conf: &SntpConf) -> Result<Self, EspError> { info!("Initializing"); unsafe { sntp_setoperatingmode(u8_t::from(conf.operating_mode)) }; unsafe { sntp_set_sync_mode(sntp_sync_mode_t::from(conf.sync_mode)) }; let mut c_servers: [CString; SNTP_SERVER_NUM] = Default::default(); for (i, s) in conf.servers.iter().enumerate() { let c_server = CString::new(s.as_str()).unwrap(); unsafe { sntp_setservername(i as u8, c_server.as_ptr()) }; c_servers[i] = c_server; } unsafe { sntp_set_time_sync_notification_cb(Some(Self::sync_cb)); sntp_init(); }; info!("Initialization complete"); Ok(Self { _sntp_servers: c_servers, }) } pub fn get_sync_status(&self) -> SyncStatus { SyncStatus::from(unsafe { sntp_get_sync_status() }) } unsafe extern "C" fn sync_cb(tv: *mut esp_idf_sys::timeval) { debug!( " Sync cb called: sec: {}, usec: {}", (*tv).tv_sec, (*tv).tv_usec, ); } } impl Drop for EspSntp { fn drop(&mut self) { { let mut taken = TAKEN.lock(); unsafe { sntp_stop() }; *taken = false; } info!("Dropped"); } }
26.427807
86
0.603197
8ac85de08087aca92accda5e0ac869a7d400e067
914
use std::collections::BTreeMap; #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[serde(rename_all = "lowercase")] pub struct Schema { #[serde(skip_serializing_if = "Option::is_none", rename = "$ref")] ref_path: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] description: Option<String>, #[serde(skip_serializing_if = "Option::is_none", rename = "type")] schema_type: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] format: Option<String>, #[serde(skip_serializing_if = "Option::is_none", rename = "enum")] enum_values: Option<Vec<String>>, #[serde(skip_serializing_if = "Option::is_none")] required: Option<Vec<String>>, #[serde(skip_serializing_if = "Option::is_none")] items: Option<Box<Schema>>, #[serde(skip_serializing_if = "Option::is_none")] properties: Option<BTreeMap<String, Schema>>, }
35.153846
70
0.681619
01f7027a5e37920cc2a812fddd0ac9f664eb6994
16,482
use std::fmt; use std::sync::Arc; use std::time::Duration; use std::thread; use futures::{Future, Stream}; use futures::future::{self, Either}; use futures::sync::{mpsc, oneshot}; use request::{Request, RequestBuilder}; use response::Response; use {async_impl, header, Certificate, Identity, Method, IntoUrl, Proxy, RedirectPolicy, wait}; /// A `Client` to make Requests with. /// /// The Client has various configuration values to tweak, but the defaults /// are set to what is usually the most commonly desired value. /// /// The `Client` holds a connection pool internally, so it is advised that /// you create one and **reuse** it. /// /// # Examples /// /// ```rust /// # use reqwest::{Error, Client}; /// # /// # fn run() -> Result<(), Error> { /// let client = Client::new(); /// let resp = client.get("http://httpbin.org/").send()?; /// # drop(resp); /// # Ok(()) /// # } /// /// ``` #[derive(Clone)] pub struct Client { inner: ClientHandle, } /// A `ClientBuilder` can be used to create a `Client` with custom configuration. /// /// # Example /// /// ``` /// # fn run() -> Result<(), reqwest::Error> { /// use std::time::Duration; /// /// let client = reqwest::Client::builder() /// .gzip(true) /// .timeout(Duration::from_secs(10)) /// .build()?; /// # Ok(()) /// # } /// ``` pub struct ClientBuilder { inner: async_impl::ClientBuilder, timeout: Timeout, } impl ClientBuilder { /// Constructs a new `ClientBuilder` pub fn new() -> ClientBuilder { ClientBuilder { inner: async_impl::ClientBuilder::new(), timeout: Timeout::default(), } } /// Returns a `Client` that uses this `ClientBuilder` configuration. /// /// # Errors /// /// This method fails if native TLS backend cannot be initialized. pub fn build(self) -> ::Result<Client> { ClientHandle::new(self).map(|handle| Client { inner: handle, }) } /// Add a custom root certificate. /// /// This can be used to connect to a server that has a self-signed /// certificate for example. /// /// # Example /// ``` /// # use std::fs::File; /// # use std::io::Read; /// # fn build_client() -> Result<(), Box<std::error::Error>> { /// // read a local binary DER encoded certificate /// let mut buf = Vec::new(); /// File::open("my-cert.der")?.read_to_end(&mut buf)?; /// /// // create a certificate /// let cert = reqwest::Certificate::from_der(&buf)?; /// /// // get a client builder /// let client = reqwest::Client::builder() /// .add_root_certificate(cert) /// .build()?; /// # drop(client); /// # Ok(()) /// # } /// ``` /// /// # Errors /// /// This method fails if adding root certificate was unsuccessful. pub fn add_root_certificate(self, cert: Certificate) -> ClientBuilder { self.with_inner(move |inner| inner.add_root_certificate(cert)) } /// Sets the identity to be used for client certificate authentication. /// /// # Example /// /// ``` /// # use std::fs::File; /// # use std::io::Read; /// # fn build_client() -> Result<(), Box<std::error::Error>> { /// // read a local PKCS12 bundle /// let mut buf = Vec::new(); /// File::open("my-ident.pfx")?.read_to_end(&mut buf)?; /// /// // create an Identity from the PKCS#12 archive /// let pkcs12 = reqwest::Identity::from_pkcs12_der(&buf, "my-privkey-password")?; /// /// // get a client builder /// let client = reqwest::Client::builder() /// .identity(pkcs12) /// .build()?; /// # drop(client); /// # Ok(()) /// # } /// ``` pub fn identity(self, identity: Identity) -> ClientBuilder { self.with_inner(move |inner| inner.identity(identity)) } /// Controls the use of hostname verification. /// /// Defaults to `false`. /// /// # Warning /// /// You should think very carefully before you use this method. If /// hostname verification is not used, any valid certificate for any /// site will be trusted for use from any other. This introduces a /// significant vulnerability to man-in-the-middle attacks. pub fn danger_accept_invalid_hostnames(self, accept_invalid_hostname: bool) -> ClientBuilder { self.with_inner(|inner| inner.danger_accept_invalid_hostnames(accept_invalid_hostname)) } /// Controls the use of certificate validation. /// /// Defaults to `false`. /// /// # Warning /// /// You should think very carefully before using this method. If /// invalid certificates are trusted, *any* certificate for *any* site /// will be trusted for use. This includes expired certificates. This /// introduces significant vulnerabilities, and should only be used /// as a last resort. pub fn danger_accept_invalid_certs(self, accept_invalid_certs: bool) -> ClientBuilder { self.with_inner(|inner| inner.danger_accept_invalid_certs(accept_invalid_certs)) } /// Sets the default headers for every request. /// /// # Example /// /// ```rust /// use reqwest::header; /// # fn build_client() -> Result<(), Box<std::error::Error>> { /// let mut headers = header::HeaderMap::new(); /// headers.insert(header::AUTHORIZATION, header::HeaderValue::from_static("secret")); /// /// // get a client builder /// let client = reqwest::Client::builder() /// .default_headers(headers) /// .build()?; /// let res = client.get("https://www.rust-lang.org").send()?; /// # Ok(()) /// # } /// ``` /// /// Override the default headers: /// /// ```rust /// use reqwest::header; /// # fn build_client() -> Result<(), Box<std::error::Error>> { /// let mut headers = header::HeaderMap::new(); /// headers.insert(header::AUTHORIZATION, header::HeaderValue::from_static("secret")); /// /// // get a client builder /// let client = reqwest::Client::builder() /// .default_headers(headers) /// .build()?; /// let res = client /// .get("https://www.rust-lang.org") /// .header(header::AUTHORIZATION, "token") /// .send()?; /// # Ok(()) /// # } /// ``` pub fn default_headers(self, headers: header::HeaderMap) -> ClientBuilder { self.with_inner(move |inner| inner.default_headers(headers)) } /// Enable auto gzip decompression by checking the ContentEncoding response header. /// /// If auto gzip decompresson is turned on: /// - When sending a request and if the request's headers do not already contain /// an `Accept-Encoding` **and** `Range` values, the `Accept-Encoding` header is set to `gzip`. /// The body is **not** automatically inflated. /// - When receiving a response, if it's headers contain a `Content-Encoding` value that /// equals to `gzip`, both values `Content-Encoding` and `Content-Length` are removed from the /// headers' set. The body is automatically deinflated. /// /// Default is enabled. pub fn gzip(self, enable: bool) -> ClientBuilder { self.with_inner(|inner| inner.gzip(enable)) } /// Add a `Proxy` to the list of proxies the `Client` will use. pub fn proxy(self, proxy: Proxy) -> ClientBuilder { self.with_inner(move |inner| inner.proxy(proxy)) } /// Set a `RedirectPolicy` for this client. /// /// Default will follow redirects up to a maximum of 10. pub fn redirect(self, policy: RedirectPolicy) -> ClientBuilder { self.with_inner(move |inner| inner.redirect(policy)) } /// Enable or disable automatic setting of the `Referer` header. /// /// Default is `true`. pub fn referer(self, enable: bool) -> ClientBuilder { self.with_inner(|inner| inner.referer(enable)) } /// Set a timeout for connect, read and write operations of a `Client`. /// /// Default is 30 seconds. /// /// Pass `None` to disable timeout. pub fn timeout<T>(mut self, timeout: T) -> ClientBuilder where T: Into<Option<Duration>>, { self.timeout = Timeout(timeout.into()); self } fn with_inner<F>(mut self, func: F) -> ClientBuilder where F: FnOnce(async_impl::ClientBuilder) -> async_impl::ClientBuilder, { self.inner = func(self.inner); self } } impl Client { /// Constructs a new `Client`. /// /// # Panic /// /// This method panics if native TLS backend cannot be created or /// initialized. Use `Client::builder()` if you wish to handle the failure /// as an `Error` instead of panicking. pub fn new() -> Client { ClientBuilder::new() .build() .expect("Client failed to initialize") } /// Creates a `ClientBuilder` to configure a `Client`. pub fn builder() -> ClientBuilder { ClientBuilder::new() } /// Convenience method to make a `GET` request to a URL. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn get<U: IntoUrl>(&self, url: U) -> RequestBuilder { self.request(Method::GET, url) } /// Convenience method to make a `POST` request to a URL. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn post<U: IntoUrl>(&self, url: U) -> RequestBuilder { self.request(Method::POST, url) } /// Convenience method to make a `PUT` request to a URL. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn put<U: IntoUrl>(&self, url: U) -> RequestBuilder { self.request(Method::PUT, url) } /// Convenience method to make a `PATCH` request to a URL. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn patch<U: IntoUrl>(&self, url: U) -> RequestBuilder { self.request(Method::PATCH, url) } /// Convenience method to make a `DELETE` request to a URL. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn delete<U: IntoUrl>(&self, url: U) -> RequestBuilder { self.request(Method::DELETE, url) } /// Convenience method to make a `HEAD` request to a URL. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn head<U: IntoUrl>(&self, url: U) -> RequestBuilder { self.request(Method::HEAD, url) } /// Start building a `Request` with the `Method` and `Url`. /// /// Returns a `RequestBuilder`, which will allow setting headers and /// request body before sending. /// /// # Errors /// /// This method fails whenever supplied `Url` cannot be parsed. pub fn request<U: IntoUrl>(&self, method: Method, url: U) -> RequestBuilder { let req = url .into_url() .map(move |url| Request::new(method, url)); RequestBuilder::new(self.clone(), req) } /// Executes a `Request`. /// /// A `Request` can be built manually with `Request::new()` or obtained /// from a RequestBuilder with `RequestBuilder::build()`. /// /// You should prefer to use the `RequestBuilder` and /// `RequestBuilder::send()`. /// /// # Errors /// /// This method fails if there was an error while sending request, /// redirect loop was detected or redirect limit was exhausted. pub fn execute(&self, request: Request) -> ::Result<Response> { self.inner.execute_request(request) } } impl fmt::Debug for Client { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Client") //.field("gzip", &self.inner.gzip) //.field("redirect_policy", &self.inner.redirect_policy) //.field("referer", &self.inner.referer) .finish() } } impl fmt::Debug for ClientBuilder { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("ClientBuilder") .finish() } } #[derive(Clone)] struct ClientHandle { timeout: Timeout, inner: Arc<InnerClientHandle> } type ThreadSender = mpsc::UnboundedSender<(async_impl::Request, oneshot::Sender<::Result<async_impl::Response>>)>; struct InnerClientHandle { tx: Option<ThreadSender>, thread: Option<thread::JoinHandle<()>> } impl Drop for InnerClientHandle { fn drop(&mut self) { self.tx.take(); self.thread.take().map(|h| h.join()); } } impl ClientHandle { fn new(builder: ClientBuilder) -> ::Result<ClientHandle> { let timeout = builder.timeout; let builder = builder.inner; let (tx, rx) = mpsc::unbounded(); let (spawn_tx, spawn_rx) = oneshot::channel::<::Result<()>>(); let handle = try_!(thread::Builder::new().name("reqwest-internal-sync-runtime".into()).spawn(move || { use tokio::runtime::current_thread::Runtime; let built = (|| { let rt = try_!(Runtime::new()); let client = builder.build()?; Ok((rt, client)) })(); let (mut rt, client) = match built { Ok((rt, c)) => { if let Err(_) = spawn_tx.send(Ok(())) { return; } (rt, c) }, Err(e) => { let _ = spawn_tx.send(Err(e)); return; } }; let work = rx.for_each(move |(req, tx)| { let tx: oneshot::Sender<::Result<async_impl::Response>> = tx; let task = client.execute(req) .then(move |x| tx.send(x).map_err(|_| ())); ::tokio::spawn(task); Ok(()) }); // work is Future<(), ()>, and our closure will never return Err rt.block_on(work) .expect("runtime unexpected error"); })); wait::timeout(spawn_rx, timeout.0).expect("runtime thread cancelled")?; let inner_handle = Arc::new(InnerClientHandle { tx: Some(tx), thread: Some(handle) }); Ok(ClientHandle { timeout: timeout, inner: inner_handle, }) } fn execute_request(&self, req: Request) -> ::Result<Response> { let (tx, rx) = oneshot::channel(); let (req, body) = req.into_async(); let url = req.url().clone(); self.inner.tx .as_ref() .expect("core thread exited early") .unbounded_send((req, tx)) .expect("core thread panicked"); let write = if let Some(body) = body { Either::A(body.send()) //try_!(body.send(self.timeout.0), &url); } else { Either::B(future::ok(())) }; let rx = rx.map_err(|_canceled| { // The only possible reason there would be a Canceled error // is if the thread running the event loop panicked. We could return // an Err here, like a BrokenPipe, but the Client is not // recoverable. Additionally, the panic in the other thread // is not normal, and should likely be propagated. panic!("event loop thread panicked"); }); let fut = write.join(rx).map(|((), res)| res); let res = match wait::timeout(fut, self.timeout.0) { Ok(res) => res, Err(wait::Waited::TimedOut) => return Err(::error::timedout(Some(url))), Err(wait::Waited::Err(err)) => { return Err(err.with_url(url)); } }; res.map(|res| { Response::new(res, self.timeout.0, KeepCoreThreadAlive(Some(self.inner.clone()))) }) } } #[derive(Clone, Copy)] struct Timeout(Option<Duration>); impl Default for Timeout { fn default() -> Timeout { // default mentioned in ClientBuilder::timeout() doc comment Timeout(Some(Duration::from_secs(30))) } } pub(crate) struct KeepCoreThreadAlive(Option<Arc<InnerClientHandle>>); impl KeepCoreThreadAlive { pub(crate) fn empty() -> KeepCoreThreadAlive { KeepCoreThreadAlive(None) } }
31.394286
114
0.569955
895d78318284f8530103f23d9c41ee0fd3b9ecf6
1,332
use gtk4::{TextView, Align, TextBuffer}; use std::{rc::Rc}; use crate::manes_cpu; thread_local!( static MANES_CPU_REGS_TEXTVIEW: Rc<TextView> = Rc::new({ TextView::builder() .name("cpuregstextview") .editable(false) .accepts_tab(false) .halign(Align::Fill) .valign(Align::Fill) .monospace(true) .focusable(false) .can_target(false) .buffer(&TextBuffer::builder().text(cpu_register_curr_state().as_str()).build()) .build() }); ); pub fn manes_cpu_regs_textview() -> Rc<TextView> { MANES_CPU_REGS_TEXTVIEW.with(|x| x.clone()) } pub fn cpu_register_curr_state() -> String { let rc_cpu = manes_cpu(); let cpu = rc_cpu.as_ref().borrow(); let mut content = String::new(); content.push_str("[CPU Registers]\n\n"); content.push_str(format!(" A: {0:02X} [{0:03}] \n", cpu.a).as_str()); content.push_str(format!(" X: {0:02X} [{0:03}] \n", cpu.x).as_str()); content.push_str(format!(" Y: {0:02X} [{0:03}] \n", cpu.y).as_str()); content.push_str(format!("PC: {0:02X} [{0:03}] \n", cpu.pc).as_str()); content.push_str(format!("SP: {0:02X} [{0:03}] \n", cpu.sp).as_str()); content.push_str(format!("FL: {0:02X} [{0:03}] \n", cpu.flags).as_str()); content }
33.3
92
0.577327
148d200a22ed302be9a63416fc6ffc59b86a523b
5,417
//! An immutable map constructed at compile time. use std::prelude::v1::*; use core::borrow::Borrow; use core::ops::Index; use core::slice; use core::fmt; use core::iter::IntoIterator; use phf_shared::{self, PhfHash, HashKey}; use crate::Slice; /// An immutable map constructed at compile time. /// /// ## Note /// /// The fields of this struct are public so that they may be initialized by the /// `phf_map!` macro and code generation. They are subject to change at any /// time and should never be accessed directly. pub struct Map<K: 'static, V: 'static> { #[doc(hidden)] pub key: HashKey, #[doc(hidden)] pub disps: Slice<(u32, u32)>, #[doc(hidden)] pub entries: Slice<(K, V)>, } impl<K, V> fmt::Debug for Map<K, V> where K: fmt::Debug, V: fmt::Debug { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fmt.debug_map().entries(self.entries()).finish() } } impl<'a, K, V, T: ?Sized> Index<&'a T> for Map<K, V> where T: Eq + PhfHash, K: Borrow<T> { type Output = V; fn index(&self, k: &'a T) -> &V { self.get(k).expect("invalid key") } } impl<K, V> Map<K, V> { /// Returns true if the `Map` is empty. pub fn is_empty(&self) -> bool { self.len() == 0 } /// Returns the number of entries in the `Map`. pub fn len(&self) -> usize { self.entries.len() } /// Determines if `key` is in the `Map`. pub fn contains_key<T: ?Sized>(&self, key: &T) -> bool where T: Eq + PhfHash, K: Borrow<T> { self.get(key).is_some() } /// Returns a reference to the value that `key` maps to. pub fn get<T: ?Sized>(&self, key: &T) -> Option<&V> where T: Eq + PhfHash, K: Borrow<T> { self.get_entry(key).map(|e| e.1) } /// Returns a reference to the map's internal static instance of the given /// key. /// /// This can be useful for interning schemes. pub fn get_key<T: ?Sized>(&self, key: &T) -> Option<&K> where T: Eq + PhfHash, K: Borrow<T> { self.get_entry(key).map(|e| e.0) } /// Like `get`, but returns both the key and the value. pub fn get_entry<T: ?Sized>(&self, key: &T) -> Option<(&K, &V)> where T: Eq + PhfHash, K: Borrow<T> { if self.disps.len() == 0 { return None; } //Prevent panic on empty map let hashes = phf_shared::hash(key, &self.key); let index = phf_shared::get_index(&hashes, &*self.disps, self.entries.len()); let entry = &self.entries[index as usize]; let b: &T = entry.0.borrow(); if b == key { Some((&entry.0, &entry.1)) } else { None } } /// Returns an iterator over the key/value pairs in the map. /// /// Entries are returned in an arbitrary but fixed order. pub fn entries<'a>(&'a self) -> Entries<'a, K, V> { Entries { iter: self.entries.iter() } } /// Returns an iterator over the keys in the map. /// /// Keys are returned in an arbitrary but fixed order. pub fn keys<'a>(&'a self) -> Keys<'a, K, V> { Keys { iter: self.entries() } } /// Returns an iterator over the values in the map. /// /// Values are returned in an arbitrary but fixed order. pub fn values<'a>(&'a self) -> Values<'a, K, V> { Values { iter: self.entries() } } } impl<'a, K, V> IntoIterator for &'a Map<K, V> { type Item = (&'a K, &'a V); type IntoIter = Entries<'a, K, V>; fn into_iter(self) -> Entries<'a, K, V> { self.entries() } } /// An iterator over the key/value pairs in a `Map`. pub struct Entries<'a, K: 'a, V: 'a> { iter: slice::Iter<'a, (K, V)>, } impl<'a, K, V> Iterator for Entries<'a, K, V> { type Item = (&'a K, &'a V); fn next(&mut self) -> Option<(&'a K, &'a V)> { self.iter.next().map(|&(ref k, ref v)| (k, v)) } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } impl<'a, K, V> DoubleEndedIterator for Entries<'a, K, V> { fn next_back(&mut self) -> Option<(&'a K, &'a V)> { self.iter.next_back().map(|e| (&e.0, &e.1)) } } impl<'a, K, V> ExactSizeIterator for Entries<'a, K, V> {} /// An iterator over the keys in a `Map`. pub struct Keys<'a, K: 'a, V: 'a> { iter: Entries<'a, K, V>, } impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; fn next(&mut self) -> Option<&'a K> { self.iter.next().map(|e| e.0) } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> { fn next_back(&mut self) -> Option<&'a K> { self.iter.next_back().map(|e| e.0) } } impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> {} /// An iterator over the values in a `Map`. pub struct Values<'a, K: 'a, V: 'a> { iter: Entries<'a, K, V>, } impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; fn next(&mut self) -> Option<&'a V> { self.iter.next().map(|e| e.1) } fn size_hint(&self) -> (usize, Option<usize>) { self.iter.size_hint() } } impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> { fn next_back(&mut self) -> Option<&'a V> { self.iter.next_back().map(|e| e.1) } } impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> {}
26.950249
90
0.54532
7ab10b2aa66b867dd7edaf1a59adcdbd5f976410
598
// aux-build:coherence_inherent_cc_lib.rs // Tests that methods that implement a trait cannot be invoked // unless the trait is imported. extern crate coherence_inherent_cc_lib; mod Import { // Trait is in scope here: use coherence_inherent_cc_lib::TheStruct; use coherence_inherent_cc_lib::TheTrait; fn call_the_fn(s: &TheStruct) { s.the_fn(); } } mod NoImport { // Trait is not in scope here: use coherence_inherent_cc_lib::TheStruct; fn call_the_fn(s: &TheStruct) { s.the_fn(); //~ ERROR no method named `the_fn` found } } fn main() {}
21.357143
62
0.683946
792016902aebe925b42f2e11bd7682f9c59263c3
117,002
//! Atomic types //! //! Atomic types provide primitive shared-memory communication between //! threads, and are the building blocks of other concurrent //! types. //! //! This module defines atomic versions of a select number of primitive //! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`], //! [`AtomicI8`], [`AtomicU16`], etc. //! Atomic types present operations that, when used correctly, synchronize //! updates between threads. //! //! Each method takes an [`Ordering`] which represents the strength of //! the memory barrier for that operation. These orderings are the //! same as the [C++20 atomic orderings][1]. For more information see the [nomicon][2]. //! //! [1]: https://en.cppreference.com/w/cpp/atomic/memory_order //! [2]: ../../../nomicon/atomics.html //! //! Atomic variables are safe to share between threads (they implement [`Sync`]) //! but they do not themselves provide the mechanism for sharing and follow the //! [threading model](../../../std/thread/index.html#the-threading-model) of Rust. //! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an //! atomically-reference-counted shared pointer). //! //! [arc]: ../../../std/sync/struct.Arc.html //! //! Atomic types may be stored in static variables, initialized using //! the constant initializers like [`AtomicBool::new`]. Atomic statics //! are often used for lazy global initialization. //! //! # Portability //! //! All atomic types in this module are guaranteed to be [lock-free] if they're //! available. This means they don't internally acquire a global mutex. Atomic //! types and operations are not guaranteed to be wait-free. This means that //! operations like `fetch_or` may be implemented with a compare-and-swap loop. //! //! Atomic operations may be implemented at the instruction layer with //! larger-size atomics. For example some platforms use 4-byte atomic //! instructions to implement `AtomicI8`. Note that this emulation should not //! have an impact on correctness of code, it's just something to be aware of. //! //! The atomic types in this module might not be available on all platforms. The //! atomic types here are all widely available, however, and can generally be //! relied upon existing. Some notable exceptions are: //! //! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or //! `AtomicI64` types. //! * ARM platforms like `armv5te` that aren't for Linux only provide `load` //! and `store` operations, and do not support Compare and Swap (CAS) //! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux, //! these CAS operations are implemented via [operating system support], which //! may come with a performance penalty. //! * ARM targets with `thumbv6m` only provide `load` and `store` operations, //! and do not support Compare and Swap (CAS) operations, such as `swap`, //! `fetch_add`, etc. //! //! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt //! //! Note that future platforms may be added that also do not have support for //! some atomic operations. Maximally portable code will want to be careful //! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are //! generally the most portable, but even then they're not available everywhere. //! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although //! `core` does not. //! //! Currently you'll need to use `#[cfg(target_arch)]` primarily to //! conditionally compile in code with atomics. There is an unstable //! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future. //! //! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm //! //! # Examples //! //! A simple spinlock: //! //! ``` //! use std::sync::Arc; //! use std::sync::atomic::{AtomicUsize, Ordering}; //! use std::{hint, thread}; //! //! fn main() { //! let spinlock = Arc::new(AtomicUsize::new(1)); //! //! let spinlock_clone = Arc::clone(&spinlock); //! let thread = thread::spawn(move|| { //! spinlock_clone.store(0, Ordering::SeqCst); //! }); //! //! // Wait for the other thread to release the lock //! while spinlock.load(Ordering::SeqCst) != 0 { //! hint::spin_loop(); //! } //! //! if let Err(panic) = thread.join() { //! println!("Thread had an error: {:?}", panic); //! } //! } //! ``` //! //! Keep a global count of live threads: //! //! ``` //! use std::sync::atomic::{AtomicUsize, Ordering}; //! //! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0); //! //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst); //! println!("live threads: {}", old_thread_count + 1); //! ``` #![stable(feature = "rust1", since = "1.0.0")] #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))] #![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))] #![rustc_diagnostic_item = "atomic_mod"] use self::Ordering::*; use crate::cell::UnsafeCell; use crate::fmt; use crate::intrinsics; use crate::hint::spin_loop; /// A boolean type which can be safely shared between threads. /// /// This type has the same in-memory representation as a [`bool`]. /// /// **Note**: This type is only available on platforms that support atomic /// loads and stores of `u8`. #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "AtomicBool"] #[repr(C, align(1))] pub struct AtomicBool { v: UnsafeCell<u8>, } #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl const Default for AtomicBool { /// Creates an `AtomicBool` initialized to `false`. #[inline] fn default() -> Self { Self::new(false) } } // Send is implicitly implemented for AtomicBool. #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "rust1", since = "1.0.0")] unsafe impl Sync for AtomicBool {} /// A raw pointer type which can be safely shared between threads. /// /// This type has the same in-memory representation as a `*mut T`. /// /// **Note**: This type is only available on platforms that support atomic /// loads and stores of pointers. Its size depends on the target pointer's size. #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] #[cfg_attr(target_pointer_width = "16", repr(C, align(2)))] #[cfg_attr(target_pointer_width = "32", repr(C, align(4)))] #[cfg_attr(target_pointer_width = "64", repr(C, align(8)))] pub struct AtomicPtr<T> { p: UnsafeCell<*mut T>, } #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl<T> const Default for AtomicPtr<T> { /// Creates a null `AtomicPtr<T>`. fn default() -> AtomicPtr<T> { AtomicPtr::new(crate::ptr::null_mut()) } } #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T> Send for AtomicPtr<T> {} #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<T> Sync for AtomicPtr<T> {} /// Atomic memory orderings /// /// Memory orderings specify the way atomic operations synchronize memory. /// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the /// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`] /// operations synchronize other memory while additionally preserving a total order of such /// operations across all threads. /// /// Rust's memory orderings are [the same as those of /// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order). /// /// For more information see the [nomicon]. /// /// [nomicon]: ../../../nomicon/atomics.html #[stable(feature = "rust1", since = "1.0.0")] #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[non_exhaustive] #[rustc_diagnostic_item = "Ordering"] pub enum Ordering { /// No ordering constraints, only atomic operations. /// /// Corresponds to [`memory_order_relaxed`] in C++20. /// /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering #[stable(feature = "rust1", since = "1.0.0")] Relaxed, /// When coupled with a store, all previous operations become ordered /// before any load of this value with [`Acquire`] (or stronger) ordering. /// In particular, all previous writes become visible to all threads /// that perform an [`Acquire`] (or stronger) load of this value. /// /// Notice that using this ordering for an operation that combines loads /// and stores leads to a [`Relaxed`] load operation! /// /// This ordering is only applicable for operations that can perform a store. /// /// Corresponds to [`memory_order_release`] in C++20. /// /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering #[stable(feature = "rust1", since = "1.0.0")] Release, /// When coupled with a load, if the loaded value was written by a store operation with /// [`Release`] (or stronger) ordering, then all subsequent operations /// become ordered after that store. In particular, all subsequent loads will see data /// written before the store. /// /// Notice that using this ordering for an operation that combines loads /// and stores leads to a [`Relaxed`] store operation! /// /// This ordering is only applicable for operations that can perform a load. /// /// Corresponds to [`memory_order_acquire`] in C++20. /// /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering #[stable(feature = "rust1", since = "1.0.0")] Acquire, /// Has the effects of both [`Acquire`] and [`Release`] together: /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering. /// /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up /// not performing any store and hence it has just [`Acquire`] ordering. However, /// `AcqRel` will never perform [`Relaxed`] accesses. /// /// This ordering is only applicable for operations that combine both loads and stores. /// /// Corresponds to [`memory_order_acq_rel`] in C++20. /// /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering #[stable(feature = "rust1", since = "1.0.0")] AcqRel, /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store /// operations, respectively) with the additional guarantee that all threads see all /// sequentially consistent operations in the same order. /// /// Corresponds to [`memory_order_seq_cst`] in C++20. /// /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering #[stable(feature = "rust1", since = "1.0.0")] SeqCst, } /// An [`AtomicBool`] initialized to `false`. #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated( since = "1.34.0", reason = "the `new` function is now preferred", suggestion = "AtomicBool::new(false)" )] pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); #[cfg(target_has_atomic_load_store = "8")] impl AtomicBool { /// Creates a new `AtomicBool`. /// /// # Examples /// /// ``` /// use std::sync::atomic::AtomicBool; /// /// let atomic_true = AtomicBool::new(true); /// let atomic_false = AtomicBool::new(false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")] #[must_use] pub const fn new(v: bool) -> AtomicBool { AtomicBool { v: UnsafeCell::new(v as u8) } } /// Returns a mutable reference to the underlying [`bool`]. /// /// This is safe because the mutable reference guarantees that no other threads are /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let mut some_bool = AtomicBool::new(true); /// assert_eq!(*some_bool.get_mut(), true); /// *some_bool.get_mut() = false; /// assert_eq!(some_bool.load(Ordering::SeqCst), false); /// ``` #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn get_mut(&mut self) -> &mut bool { // SAFETY: the mutable reference guarantees unique ownership. unsafe { &mut *(self.v.get() as *mut bool) } } /// Get atomic access to a `&mut bool`. /// /// # Examples /// /// ``` /// #![feature(atomic_from_mut)] /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let mut some_bool = true; /// let a = AtomicBool::from_mut(&mut some_bool); /// a.store(false, Ordering::Relaxed); /// assert_eq!(some_bool, false); /// ``` #[inline] #[cfg(target_has_atomic_equal_alignment = "8")] #[unstable(feature = "atomic_from_mut", issue = "76314")] pub fn from_mut(v: &mut bool) -> &mut Self { // SAFETY: the mutable reference guarantees unique ownership, and // alignment of both `bool` and `Self` is 1. unsafe { &mut *(v as *mut bool as *mut Self) } } /// Consumes the atomic and returns the contained value. /// /// This is safe because passing `self` by value guarantees that no other threads are /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` /// use std::sync::atomic::AtomicBool; /// /// let some_bool = AtomicBool::new(true); /// assert_eq!(some_bool.into_inner(), true); /// ``` #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")] pub const fn into_inner(self) -> bool { self.v.into_inner() != 0 } /// Loads a value from the bool. /// /// `load` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Release`] or [`AcqRel`]. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let some_bool = AtomicBool::new(true); /// /// assert_eq!(some_bool.load(Ordering::Relaxed), true); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> bool { // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_load(self.v.get(), order) != 0 } } /// Stores a value into the bool. /// /// `store` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Acquire`] or [`AcqRel`]. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let some_bool = AtomicBool::new(true); /// /// some_bool.store(false, Ordering::Relaxed); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, val: bool, order: Ordering) { // SAFETY: any data races are prevented by atomic intrinsics and the raw // pointer passed in is valid because we got it from a reference. unsafe { atomic_store(self.v.get(), val as u8, order); } } /// Stores a value into the bool, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let some_bool = AtomicBool::new(true); /// /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn swap(&self, val: bool, order: Ordering) -> bool { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } } /// Stores a value into the [`bool`] if the current value is the same as the `current` value. /// /// The return value is always the previous value. If it is equal to `current`, then the value /// was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Notice that even when using [`AcqRel`], the operation /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it /// happens, and using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Migrating to `compare_exchange` and `compare_exchange_weak` /// /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds, /// which allows the compiler to generate better assembly code when the compare and swap /// is used in a loop. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let some_bool = AtomicBool::new(true); /// /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated( since = "1.50.0", reason = "Use `compare_exchange` or `compare_exchange_weak` instead" )] #[cfg(target_has_atomic = "8")] pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool { match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { Ok(x) => x, Err(x) => x, } } /// Stores a value into the [`bool`] if the current value is the same as the `current` value. /// /// The return value is a result indicating whether the new value was written and containing /// the previous value. On success this value is guaranteed to be equal to `current`. /// /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the successful load /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let some_bool = AtomicBool::new(true); /// /// assert_eq!(some_bool.compare_exchange(true, /// false, /// Ordering::Acquire, /// Ordering::Relaxed), /// Ok(true)); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// /// assert_eq!(some_bool.compare_exchange(true, true, /// Ordering::SeqCst, /// Ordering::Acquire), /// Err(false)); /// assert_eq!(some_bool.load(Ordering::Relaxed), false); /// ``` #[inline] #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] #[doc(alias = "compare_and_swap")] #[cfg(target_has_atomic = "8")] pub fn compare_exchange( &self, current: bool, new: bool, success: Ordering, failure: Ordering, ) -> Result<bool, bool> { // SAFETY: data races are prevented by atomic intrinsics. match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure) } { Ok(x) => Ok(x != 0), Err(x) => Err(x != 0), } } /// Stores a value into the [`bool`] if the current value is the same as the `current` value. /// /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the /// comparison succeeds, which can result in more efficient code on some platforms. The /// return value is a result indicating whether the new value was written and containing the /// previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the successful load /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let val = AtomicBool::new(false); /// /// let new = true; /// let mut old = val.load(Ordering::Relaxed); /// loop { /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { /// Ok(_) => break, /// Err(x) => old = x, /// } /// } /// ``` #[inline] #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] #[doc(alias = "compare_and_swap")] #[cfg(target_has_atomic = "8")] pub fn compare_exchange_weak( &self, current: bool, new: bool, success: Ordering, failure: Ordering, ) -> Result<bool, bool> { // SAFETY: data races are prevented by atomic intrinsics. match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure) } { Ok(x) => Ok(x != 0), Err(x) => Err(x != 0), } } /// Logical "and" with a boolean value. /// /// Performs a logical "and" operation on the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. /// /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), false); /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), true); /// /// let foo = AtomicBool::new(false); /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false); /// assert_eq!(foo.load(Ordering::SeqCst), false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } } /// Logical "nand" with a boolean value. /// /// Performs a logical "nand" operation on the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. /// /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), true); /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0); /// assert_eq!(foo.load(Ordering::SeqCst), false); /// /// let foo = AtomicBool::new(false); /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false); /// assert_eq!(foo.load(Ordering::SeqCst), true); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { // We can't use atomic_nand here because it can result in a bool with // an invalid value. This happens because the atomic operation is done // with an 8-bit integer internally, which would set the upper 7 bits. // So we just use fetch_xor or swap instead. if val { // !(x & true) == !x // We must invert the bool. self.fetch_xor(true, order) } else { // !(x & false) == true // We must set the bool to true. self.swap(true, order) } } /// Logical "or" with a boolean value. /// /// Performs a logical "or" operation on the current value and the argument `val`, and sets the /// new value to the result. /// /// Returns the previous value. /// /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), true); /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), true); /// /// let foo = AtomicBool::new(false); /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false); /// assert_eq!(foo.load(Ordering::SeqCst), false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } } /// Logical "xor" with a boolean value. /// /// Performs a logical "xor" operation on the current value and the argument `val`, and sets /// the new value to the result. /// /// Returns the previous value. /// /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), true); /// /// let foo = AtomicBool::new(true); /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true); /// assert_eq!(foo.load(Ordering::SeqCst), false); /// /// let foo = AtomicBool::new(false); /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false); /// assert_eq!(foo.load(Ordering::SeqCst), false); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } } /// Returns a mutable pointer to the underlying [`bool`]. /// /// Doing non-atomic reads and writes on the resulting integer can be a data race. /// This method is mostly useful for FFI, where the function signature may use /// `*mut bool` instead of `&AtomicBool`. /// /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the /// atomic types work with interior mutability. All modifications of an atomic change the value /// through a shared reference, and can do so safely as long as they use atomic operations. Any /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same /// restriction: operations on it must be atomic. /// /// # Examples /// /// ```ignore (extern-declaration) /// # fn main() { /// use std::sync::atomic::AtomicBool; /// extern "C" { /// fn my_atomic_op(arg: *mut bool); /// } /// /// let mut atomic = AtomicBool::new(true); /// unsafe { /// my_atomic_op(atomic.as_mut_ptr()); /// } /// # } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] pub fn as_mut_ptr(&self) -> *mut bool { self.v.get() as *mut bool } /// Fetches the value, and applies a function to it that returns an optional /// new value. Returns a `Result` of `Ok(previous_value)` if the function /// returned `Some(_)`, else `Err(previous_value)`. /// /// Note: This may call the function multiple times if the value has been /// changed from other threads in the meantime, as long as the function /// returns `Some(_)`, but the function will have been applied only once to /// the stored value. /// /// `fetch_update` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. The first describes the required ordering for /// when the operation finally succeeds while the second describes the /// required ordering for loads. These correspond to the success and failure /// orderings of [`AtomicBool::compare_exchange`] respectively. /// /// Using [`Acquire`] as success ordering makes the store part of this /// operation [`Relaxed`], and using [`Release`] makes the final successful /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the /// success ordering. /// /// **Note:** This method is only available on platforms that support atomic /// operations on `u8`. /// /// # Examples /// /// ```rust /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let x = AtomicBool::new(false); /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false)); /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false)); /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true)); /// assert_eq!(x.load(Ordering::SeqCst), false); /// ``` #[inline] #[stable(feature = "atomic_fetch_update", since = "1.53.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_update<F>( &self, set_order: Ordering, fetch_order: Ordering, mut f: F, ) -> Result<bool, bool> where F: FnMut(bool) -> Option<bool>, { let mut prev = self.load(fetch_order); while let Some(next) = f(prev) { match self.compare_exchange_weak(prev, next, set_order, fetch_order) { x @ Ok(_) => return x, Err(next_prev) => prev = next_prev, } } Err(prev) } } #[cfg(target_has_atomic_load_store = "ptr")] impl<T> AtomicPtr<T> { /// Creates a new `AtomicPtr`. /// /// # Examples /// /// ``` /// use std::sync::atomic::AtomicPtr; /// /// let ptr = &mut 5; /// let atomic_ptr = AtomicPtr::new(ptr); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")] pub const fn new(p: *mut T) -> AtomicPtr<T> { AtomicPtr { p: UnsafeCell::new(p) } } /// Returns a mutable reference to the underlying pointer. /// /// This is safe because the mutable reference guarantees that no other threads are /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let mut data = 10; /// let mut atomic_ptr = AtomicPtr::new(&mut data); /// let mut other_data = 5; /// *atomic_ptr.get_mut() = &mut other_data; /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5); /// ``` #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn get_mut(&mut self) -> &mut *mut T { self.p.get_mut() } /// Get atomic access to a pointer. /// /// # Examples /// /// ``` /// #![feature(atomic_from_mut)] /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let mut data = 123; /// let mut some_ptr = &mut data as *mut i32; /// let a = AtomicPtr::from_mut(&mut some_ptr); /// let mut other_data = 456; /// a.store(&mut other_data, Ordering::Relaxed); /// assert_eq!(unsafe { *some_ptr }, 456); /// ``` #[inline] #[cfg(target_has_atomic_equal_alignment = "ptr")] #[unstable(feature = "atomic_from_mut", issue = "76314")] pub fn from_mut(v: &mut *mut T) -> &mut Self { use crate::mem::align_of; let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()]; // SAFETY: // - the mutable reference guarantees unique ownership. // - the alignment of `*mut T` and `Self` is the same on all platforms // supported by rust, as verified above. unsafe { &mut *(v as *mut *mut T as *mut Self) } } /// Consumes the atomic and returns the contained value. /// /// This is safe because passing `self` by value guarantees that no other threads are /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` /// use std::sync::atomic::AtomicPtr; /// /// let mut data = 5; /// let atomic_ptr = AtomicPtr::new(&mut data); /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5); /// ``` #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")] pub const fn into_inner(self) -> *mut T { self.p.into_inner() } /// Loads a value from the pointer. /// /// `load` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Release`] or [`AcqRel`]. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// /// let value = some_ptr.load(Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> *mut T { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_load(self.p.get(), order) } } /// Stores a value into the pointer. /// /// `store` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Acquire`] or [`AcqRel`]. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// /// let other_ptr = &mut 10; /// /// some_ptr.store(other_ptr, Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, ptr: *mut T, order: Ordering) { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_store(self.p.get(), ptr, order); } } /// Stores a value into the pointer, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on pointers. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// /// let other_ptr = &mut 10; /// /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "ptr")] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_swap(self.p.get(), ptr, order) } } /// Stores a value into the pointer if the current value is the same as the `current` value. /// /// The return value is always the previous value. If it is equal to `current`, then the value /// was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Notice that even when using [`AcqRel`], the operation /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it /// happens, and using [`Release`] makes the load part [`Relaxed`]. /// /// **Note:** This method is only available on platforms that support atomic /// operations on pointers. /// /// # Migrating to `compare_exchange` and `compare_exchange_weak` /// /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds, /// which allows the compiler to generate better assembly code when the compare and swap /// is used in a loop. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// /// let other_ptr = &mut 10; /// /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_deprecated( since = "1.50.0", reason = "Use `compare_exchange` or `compare_exchange_weak` instead" )] #[cfg(target_has_atomic = "ptr")] pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T { match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { Ok(x) => x, Err(x) => x, } } /// Stores a value into the pointer if the current value is the same as the `current` value. /// /// The return value is a result indicating whether the new value was written and containing /// the previous value. On success this value is guaranteed to be equal to `current`. /// /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the successful load /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note:** This method is only available on platforms that support atomic /// operations on pointers. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// /// let other_ptr = &mut 10; /// /// let value = some_ptr.compare_exchange(ptr, other_ptr, /// Ordering::SeqCst, Ordering::Relaxed); /// ``` #[inline] #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] #[cfg(target_has_atomic = "ptr")] pub fn compare_exchange( &self, current: *mut T, new: *mut T, success: Ordering, failure: Ordering, ) -> Result<*mut T, *mut T> { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) } } /// Stores a value into the pointer if the current value is the same as the `current` value. /// /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the /// comparison succeeds, which can result in more efficient code on some platforms. The /// return value is a result indicating whether the new value was written and containing the /// previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the successful load /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note:** This method is only available on platforms that support atomic /// operations on pointers. /// /// # Examples /// /// ``` /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let some_ptr = AtomicPtr::new(&mut 5); /// /// let new = &mut 10; /// let mut old = some_ptr.load(Ordering::Relaxed); /// loop { /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { /// Ok(_) => break, /// Err(x) => old = x, /// } /// } /// ``` #[inline] #[stable(feature = "extended_compare_and_swap", since = "1.10.0")] #[cfg(target_has_atomic = "ptr")] pub fn compare_exchange_weak( &self, current: *mut T, new: *mut T, success: Ordering, failure: Ordering, ) -> Result<*mut T, *mut T> { // SAFETY: This intrinsic is unsafe because it operates on a raw pointer // but we know for sure that the pointer is valid (we just got it from // an `UnsafeCell` that we have by reference) and the atomic operation // itself allows us to safely mutate the `UnsafeCell` contents. unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) } } /// Fetches the value, and applies a function to it that returns an optional /// new value. Returns a `Result` of `Ok(previous_value)` if the function /// returned `Some(_)`, else `Err(previous_value)`. /// /// Note: This may call the function multiple times if the value has been /// changed from other threads in the meantime, as long as the function /// returns `Some(_)`, but the function will have been applied only once to /// the stored value. /// /// `fetch_update` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. The first describes the required ordering for /// when the operation finally succeeds while the second describes the /// required ordering for loads. These correspond to the success and failure /// orderings of [`AtomicPtr::compare_exchange`] respectively. /// /// Using [`Acquire`] as success ordering makes the store part of this /// operation [`Relaxed`], and using [`Release`] makes the final successful /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the /// success ordering. /// /// **Note:** This method is only available on platforms that support atomic /// operations on pointers. /// /// # Examples /// /// ```rust /// use std::sync::atomic::{AtomicPtr, Ordering}; /// /// let ptr: *mut _ = &mut 5; /// let some_ptr = AtomicPtr::new(ptr); /// /// let new: *mut _ = &mut 10; /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr)); /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| { /// if x == ptr { /// Some(new) /// } else { /// None /// } /// }); /// assert_eq!(result, Ok(ptr)); /// assert_eq!(some_ptr.load(Ordering::SeqCst), new); /// ``` #[inline] #[stable(feature = "atomic_fetch_update", since = "1.53.0")] #[cfg(target_has_atomic = "ptr")] pub fn fetch_update<F>( &self, set_order: Ordering, fetch_order: Ordering, mut f: F, ) -> Result<*mut T, *mut T> where F: FnMut(*mut T) -> Option<*mut T>, { let mut prev = self.load(fetch_order); while let Some(next) = f(prev) { match self.compare_exchange_weak(prev, next, set_order, fetch_order) { x @ Ok(_) => return x, Err(next_prev) => prev = next_prev, } } Err(prev) } } #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "atomic_bool_from", since = "1.24.0")] #[rustc_const_unstable(feature = "const_convert", issue = "88674")] impl const From<bool> for AtomicBool { /// Converts a `bool` into an `AtomicBool`. /// /// # Examples /// /// ``` /// use std::sync::atomic::AtomicBool; /// let atomic_bool = AtomicBool::from(true); /// assert_eq!(format!("{:?}", atomic_bool), "true") /// ``` #[inline] fn from(b: bool) -> Self { Self::new(b) } } #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "atomic_from", since = "1.23.0")] #[rustc_const_unstable(feature = "const_convert", issue = "88674")] impl<T> const From<*mut T> for AtomicPtr<T> { #[inline] fn from(p: *mut T) -> Self { Self::new(p) } } #[allow(unused_macros)] // This macro ends up being unused on some architectures. macro_rules! if_not_8_bit { (u8, $($tt:tt)*) => { "" }; (i8, $($tt:tt)*) => { "" }; ($_:ident, $($tt:tt)*) => { $($tt)* }; } #[cfg(target_has_atomic_load_store = "8")] macro_rules! atomic_int { ($cfg_cas:meta, $cfg_align:meta, $stable:meta, $stable_cxchg:meta, $stable_debug:meta, $stable_access:meta, $stable_from:meta, $stable_nand:meta, $const_stable:meta, $stable_init_const:meta, $s_int_type:literal, $extra_feature:expr, $min_fn:ident, $max_fn:ident, $align:expr, $atomic_new:expr, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. /// /// This type has the same in-memory representation as the underlying /// integer type, [` #[doc = $s_int_type] /// `]. For more about the differences between atomic types and /// non-atomic types as well as information about the portability of /// this type, please see the [module-level documentation]. /// /// **Note:** This type is only available on platforms that support /// atomic loads and stores of [` #[doc = $s_int_type] /// `]. /// /// [module-level documentation]: crate::sync::atomic #[$stable] #[repr(C, align($align))] pub struct $atomic_type { v: UnsafeCell<$int_type>, } /// An atomic integer initialized to `0`. #[$stable_init_const] #[rustc_deprecated( since = "1.34.0", reason = "the `new` function is now preferred", suggestion = $atomic_new, )] pub const $atomic_init: $atomic_type = $atomic_type::new(0); #[$stable] #[rustc_const_unstable(feature = "const_default_impls", issue = "87864")] impl const Default for $atomic_type { #[inline] fn default() -> Self { Self::new(Default::default()) } } #[$stable_from] #[rustc_const_unstable(feature = "const_num_from_num", issue = "87852")] impl const From<$int_type> for $atomic_type { #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")] #[inline] fn from(v: $int_type) -> Self { Self::new(v) } } #[$stable_debug] impl fmt::Debug for $atomic_type { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) } } // Send is implicitly implemented. #[$stable] unsafe impl Sync for $atomic_type {} impl $atomic_type { /// Creates a new atomic integer. /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")] /// #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")] /// ``` #[inline] #[$stable] #[$const_stable] #[must_use] pub const fn new(v: $int_type) -> Self { Self {v: UnsafeCell::new(v)} } /// Returns a mutable reference to the underlying integer. /// /// This is safe because the mutable reference guarantees that no other threads are /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")] /// assert_eq!(*some_var.get_mut(), 10); /// *some_var.get_mut() = 5; /// assert_eq!(some_var.load(Ordering::SeqCst), 5); /// ``` #[inline] #[$stable_access] pub fn get_mut(&mut self) -> &mut $int_type { self.v.get_mut() } #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")] /// #[doc = if_not_8_bit! { $int_type, concat!( "**Note:** This function is only available on targets where `", stringify!($int_type), "` has an alignment of ", $align, " bytes." ) }] /// /// # Examples /// /// ``` /// #![feature(atomic_from_mut)] #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// /// let mut some_int = 123; #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")] /// a.store(100, Ordering::Relaxed); /// assert_eq!(some_int, 100); /// ``` /// #[inline] #[$cfg_align] #[unstable(feature = "atomic_from_mut", issue = "76314")] pub fn from_mut(v: &mut $int_type) -> &mut Self { use crate::mem::align_of; let [] = [(); align_of::<Self>() - align_of::<$int_type>()]; // SAFETY: // - the mutable reference guarantees unique ownership. // - the alignment of `$int_type` and `Self` is the // same, as promised by $cfg_align and verified above. unsafe { &mut *(v as *mut $int_type as *mut Self) } } /// Consumes the atomic and returns the contained value. /// /// This is safe because passing `self` by value guarantees that no other threads are /// concurrently accessing the atomic data. /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")] /// #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")] /// assert_eq!(some_var.into_inner(), 5); /// ``` #[inline] #[$stable_access] #[rustc_const_unstable(feature = "const_cell_into_inner", issue = "78729")] pub const fn into_inner(self) -> $int_type { self.v.into_inner() } /// Loads a value from the atomic integer. /// /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation. /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Release`] or [`AcqRel`]. /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")] /// /// assert_eq!(some_var.load(Ordering::Relaxed), 5); /// ``` #[inline] #[$stable] pub fn load(&self, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_load(self.v.get(), order) } } /// Stores a value into the atomic integer. /// /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation. /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`]. /// /// # Panics /// /// Panics if `order` is [`Acquire`] or [`AcqRel`]. /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")] /// /// some_var.store(10, Ordering::Relaxed); /// assert_eq!(some_var.load(Ordering::Relaxed), 10); /// ``` #[inline] #[$stable] pub fn store(&self, val: $int_type, order: Ordering) { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_store(self.v.get(), val, order); } } /// Stores a value into the atomic integer, returning the previous value. /// /// `swap` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")] /// /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); /// ``` #[inline] #[$stable] #[$cfg_cas] pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_swap(self.v.get(), val, order) } } /// Stores a value into the atomic integer if the current value is the same as /// the `current` value. /// /// The return value is always the previous value. If it is equal to `current`, then the /// value was updated. /// /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory /// ordering of this operation. Notice that even when using [`AcqRel`], the operation /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics. /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it /// happens, and using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Migrating to `compare_exchange` and `compare_exchange_weak` /// /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for /// memory orderings: /// /// Original | Success | Failure /// -------- | ------- | ------- /// Relaxed | Relaxed | Relaxed /// Acquire | Acquire | Acquire /// Release | Release | Relaxed /// AcqRel | AcqRel | Acquire /// SeqCst | SeqCst | SeqCst /// /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds, /// which allows the compiler to generate better assembly code when the compare and swap /// is used in a loop. /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")] /// /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5); /// assert_eq!(some_var.load(Ordering::Relaxed), 10); /// /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10); /// assert_eq!(some_var.load(Ordering::Relaxed), 10); /// ``` #[inline] #[$stable] #[rustc_deprecated( since = "1.50.0", reason = "Use `compare_exchange` or `compare_exchange_weak` instead") ] #[$cfg_cas] pub fn compare_and_swap(&self, current: $int_type, new: $int_type, order: Ordering) -> $int_type { match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) { Ok(x) => x, Err(x) => x, } } /// Stores a value into the atomic integer if the current value is the same as /// the `current` value. /// /// The return value is a result indicating whether the new value was written and /// containing the previous value. On success this value is guaranteed to be equal to /// `current`. /// /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the successful load /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")] /// /// assert_eq!(some_var.compare_exchange(5, 10, /// Ordering::Acquire, /// Ordering::Relaxed), /// Ok(5)); /// assert_eq!(some_var.load(Ordering::Relaxed), 10); /// /// assert_eq!(some_var.compare_exchange(6, 12, /// Ordering::SeqCst, /// Ordering::Acquire), /// Err(10)); /// assert_eq!(some_var.load(Ordering::Relaxed), 10); /// ``` #[inline] #[$stable_cxchg] #[$cfg_cas] pub fn compare_exchange(&self, current: $int_type, new: $int_type, success: Ordering, failure: Ordering) -> Result<$int_type, $int_type> { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } } /// Stores a value into the atomic integer if the current value is the same as /// the `current` value. /// #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")] /// this function is allowed to spuriously fail even /// when the comparison succeeds, which can result in more efficient code on some /// platforms. The return value is a result indicating whether the new value was /// written and containing the previous value. /// /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory /// ordering of this operation. `success` describes the required ordering for the /// read-modify-write operation that takes place if the comparison with `current` succeeds. /// `failure` describes the required ordering for the load operation that takes place when /// the comparison fails. Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the successful load /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")] /// /// let mut old = val.load(Ordering::Relaxed); /// loop { /// let new = old * 2; /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) { /// Ok(_) => break, /// Err(x) => old = x, /// } /// } /// ``` #[inline] #[$stable_cxchg] #[$cfg_cas] pub fn compare_exchange_weak(&self, current: $int_type, new: $int_type, success: Ordering, failure: Ordering) -> Result<$int_type, $int_type> { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) } } /// Adds to the current value, returning the previous value. /// /// This operation wraps around on overflow. /// /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")] /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0); /// assert_eq!(foo.load(Ordering::SeqCst), 10); /// ``` #[inline] #[$stable] #[$cfg_cas] pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_add(self.v.get(), val, order) } } /// Subtracts from the current value, returning the previous value. /// /// This operation wraps around on overflow. /// /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")] /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20); /// assert_eq!(foo.load(Ordering::SeqCst), 10); /// ``` #[inline] #[$stable] #[$cfg_cas] pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_sub(self.v.get(), val, order) } } /// Bitwise "and" with the current value. /// /// Performs a bitwise "and" operation on the current value and the argument `val`, and /// sets the new value to the result. /// /// Returns the previous value. /// /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")] /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101); /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001); /// ``` #[inline] #[$stable] #[$cfg_cas] pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_and(self.v.get(), val, order) } } /// Bitwise "nand" with the current value. /// /// Performs a bitwise "nand" operation on the current value and the argument `val`, and /// sets the new value to the result. /// /// Returns the previous value. /// /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")] /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13); /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); /// ``` #[inline] #[$stable_nand] #[$cfg_cas] pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_nand(self.v.get(), val, order) } } /// Bitwise "or" with the current value. /// /// Performs a bitwise "or" operation on the current value and the argument `val`, and /// sets the new value to the result. /// /// Returns the previous value. /// /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")] /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101); /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111); /// ``` #[inline] #[$stable] #[$cfg_cas] pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_or(self.v.get(), val, order) } } /// Bitwise "xor" with the current value. /// /// Performs a bitwise "xor" operation on the current value and the argument `val`, and /// sets the new value to the result. /// /// Returns the previous value. /// /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")] /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101); /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110); /// ``` #[inline] #[$stable] #[$cfg_cas] pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_xor(self.v.get(), val, order) } } /// Fetches the value, and applies a function to it that returns an optional /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else /// `Err(previous_value)`. /// /// Note: This may call the function multiple times if the value has been changed from other threads in /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied /// only once to the stored value. /// /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation. /// The first describes the required ordering for when the operation finally succeeds while the second /// describes the required ordering for loads. These correspond to the success and failure orderings of #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")] /// respectively. /// /// Using [`Acquire`] as success ordering makes the store part /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`] /// and must be equivalent to or weaker than the success ordering. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ```rust #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")] /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7)); /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7)); /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8)); /// assert_eq!(x.load(Ordering::SeqCst), 9); /// ``` #[inline] #[stable(feature = "no_more_cas", since = "1.45.0")] #[$cfg_cas] pub fn fetch_update<F>(&self, set_order: Ordering, fetch_order: Ordering, mut f: F) -> Result<$int_type, $int_type> where F: FnMut($int_type) -> Option<$int_type> { let mut prev = self.load(fetch_order); while let Some(next) = f(prev) { match self.compare_exchange_weak(prev, next, set_order, fetch_order) { x @ Ok(_) => return x, Err(next_prev) => prev = next_prev } } Err(prev) } /// Maximum with the current value. /// /// Finds the maximum of the current value and the argument `val`, and /// sets the new value to the result. /// /// Returns the previous value. /// /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")] /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23); /// assert_eq!(foo.load(Ordering::SeqCst), 42); /// ``` /// /// If you want to obtain the maximum value in one step, you can use the following: /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")] /// let bar = 42; /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar); /// assert!(max_foo == 42); /// ``` #[inline] #[stable(feature = "atomic_min_max", since = "1.45.0")] #[$cfg_cas] pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { $max_fn(self.v.get(), val, order) } } /// Minimum with the current value. /// /// Finds the minimum of the current value and the argument `val`, and /// sets the new value to the result. /// /// Returns the previous value. /// /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering /// of this operation. All ordering modes are possible. Note that using /// [`Acquire`] makes the store part of this operation [`Relaxed`], and /// using [`Release`] makes the load part [`Relaxed`]. /// /// **Note**: This method is only available on platforms that support atomic operations on #[doc = concat!("[`", $s_int_type, "`].")] /// /// # Examples /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")] /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23); /// assert_eq!(foo.load(Ordering::Relaxed), 23); /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23); /// assert_eq!(foo.load(Ordering::Relaxed), 22); /// ``` /// /// If you want to obtain the minimum value in one step, you can use the following: /// /// ``` #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")] /// let bar = 12; /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar); /// assert_eq!(min_foo, 12); /// ``` #[inline] #[stable(feature = "atomic_min_max", since = "1.45.0")] #[$cfg_cas] pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { // SAFETY: data races are prevented by atomic intrinsics. unsafe { $min_fn(self.v.get(), val, order) } } /// Returns a mutable pointer to the underlying integer. /// /// Doing non-atomic reads and writes on the resulting integer can be a data race. /// This method is mostly useful for FFI, where the function signature may use #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")] /// /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the /// atomic types work with interior mutability. All modifications of an atomic change the value /// through a shared reference, and can do so safely as long as they use atomic operations. Any /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same /// restriction: operations on it must be atomic. /// /// # Examples /// /// ```ignore (extern-declaration) /// # fn main() { #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")] /// /// extern "C" { #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")] /// } /// #[doc = concat!("let mut atomic = ", stringify!($atomic_type), "::new(1);")] /// // SAFETY: Safe as long as `my_atomic_op` is atomic. /// unsafe { /// my_atomic_op(atomic.as_mut_ptr()); /// } /// # } /// ``` #[inline] #[unstable(feature = "atomic_mut_ptr", reason = "recently added", issue = "66893")] pub fn as_mut_ptr(&self) -> *mut $int_type { self.v.get() } } } } #[cfg(target_has_atomic_load_store = "8")] atomic_int! { cfg(target_has_atomic = "8"), cfg(target_has_atomic_equal_alignment = "8"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i8", "", atomic_min, atomic_max, 1, "AtomicI8::new(0)", i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic_load_store = "8")] atomic_int! { cfg(target_has_atomic = "8"), cfg(target_has_atomic_equal_alignment = "8"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u8", "", atomic_umin, atomic_umax, 1, "AtomicU8::new(0)", u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic_load_store = "16")] atomic_int! { cfg(target_has_atomic = "16"), cfg(target_has_atomic_equal_alignment = "16"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i16", "", atomic_min, atomic_max, 2, "AtomicI16::new(0)", i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic_load_store = "16")] atomic_int! { cfg(target_has_atomic = "16"), cfg(target_has_atomic_equal_alignment = "16"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u16", "", atomic_umin, atomic_umax, 2, "AtomicU16::new(0)", u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic_load_store = "32")] atomic_int! { cfg(target_has_atomic = "32"), cfg(target_has_atomic_equal_alignment = "32"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i32", "", atomic_min, atomic_max, 4, "AtomicI32::new(0)", i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic_load_store = "32")] atomic_int! { cfg(target_has_atomic = "32"), cfg(target_has_atomic_equal_alignment = "32"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u32", "", atomic_umin, atomic_umax, 4, "AtomicU32::new(0)", u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic_load_store = "64")] atomic_int! { cfg(target_has_atomic = "64"), cfg(target_has_atomic_equal_alignment = "64"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i64", "", atomic_min, atomic_max, 8, "AtomicI64::new(0)", i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic_load_store = "64")] atomic_int! { cfg(target_has_atomic = "64"), cfg(target_has_atomic_equal_alignment = "64"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), stable(feature = "integer_atomics_stable", since = "1.34.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u64", "", atomic_umin, atomic_umax, 8, "AtomicU64::new(0)", u64 AtomicU64 ATOMIC_U64_INIT } #[cfg(target_has_atomic_load_store = "128")] atomic_int! { cfg(target_has_atomic = "128"), cfg(target_has_atomic_equal_alignment = "128"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i128", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, 16, "AtomicI128::new(0)", i128 AtomicI128 ATOMIC_I128_INIT } #[cfg(target_has_atomic_load_store = "128")] atomic_int! { cfg(target_has_atomic = "128"), cfg(target_has_atomic_equal_alignment = "128"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u128", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, 16, "AtomicU128::new(0)", u128 AtomicU128 ATOMIC_U128_INIT } macro_rules! atomic_int_ptr_sized { ( $($target_pointer_width:literal $align:literal)* ) => { $( #[cfg(target_has_atomic_load_store = "ptr")] #[cfg(target_pointer_width = $target_pointer_width)] atomic_int! { cfg(target_has_atomic = "ptr"), cfg(target_has_atomic_equal_alignment = "ptr"), stable(feature = "rust1", since = "1.0.0"), stable(feature = "extended_compare_and_swap", since = "1.10.0"), stable(feature = "atomic_debug", since = "1.3.0"), stable(feature = "atomic_access", since = "1.15.0"), stable(feature = "atomic_from", since = "1.23.0"), stable(feature = "atomic_nand", since = "1.27.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"), stable(feature = "rust1", since = "1.0.0"), "isize", "", atomic_min, atomic_max, $align, "AtomicIsize::new(0)", isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic_load_store = "ptr")] #[cfg(target_pointer_width = $target_pointer_width)] atomic_int! { cfg(target_has_atomic = "ptr"), cfg(target_has_atomic_equal_alignment = "ptr"), stable(feature = "rust1", since = "1.0.0"), stable(feature = "extended_compare_and_swap", since = "1.10.0"), stable(feature = "atomic_debug", since = "1.3.0"), stable(feature = "atomic_access", since = "1.15.0"), stable(feature = "atomic_from", since = "1.23.0"), stable(feature = "atomic_nand", since = "1.27.0"), rustc_const_stable(feature = "const_integer_atomics", since = "1.24.0"), stable(feature = "rust1", since = "1.0.0"), "usize", "", atomic_umin, atomic_umax, $align, "AtomicUsize::new(0)", usize AtomicUsize ATOMIC_USIZE_INIT } )* }; } atomic_int_ptr_sized! { "16" 2 "32" 4 "64" 8 } #[inline] #[cfg(target_has_atomic = "8")] fn strongest_failure_ordering(order: Ordering) -> Ordering { match order { Release => Relaxed, Relaxed => Relaxed, SeqCst => SeqCst, Acquire => Acquire, AcqRel => Acquire, } } #[inline] unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) { // SAFETY: the caller must uphold the safety contract for `atomic_store`. unsafe { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), SeqCst => intrinsics::atomic_store(dst, val), Acquire => panic!("there is no such thing as an acquire store"), AcqRel => panic!("there is no such thing as an acquire/release store"), } } } #[inline] unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_load`. unsafe { match order { Acquire => intrinsics::atomic_load_acq(dst), Relaxed => intrinsics::atomic_load_relaxed(dst), SeqCst => intrinsics::atomic_load(dst), Release => panic!("there is no such thing as a release load"), AcqRel => panic!("there is no such thing as an acquire/release load"), } } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_swap`. unsafe { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), SeqCst => intrinsics::atomic_xchg(dst, val), } } } /// Returns the previous value (like __sync_fetch_and_add). #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_add`. unsafe { match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), SeqCst => intrinsics::atomic_xadd(dst, val), } } } /// Returns the previous value (like __sync_fetch_and_sub). #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_sub`. unsafe { match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), SeqCst => intrinsics::atomic_xsub(dst, val), } } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_compare_exchange<T: Copy>( dst: *mut T, old: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T> { // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`. let (val, ok) = unsafe { match (success, failure) { (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new), (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new), (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new), (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new), (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new), (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new), (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new), (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new), (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new), (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), (_, Release) => panic!("there is no such thing as a release failure ordering"), _ => panic!("a failure ordering can't be stronger than a success ordering"), } }; if ok { Ok(val) } else { Err(val) } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_compare_exchange_weak<T: Copy>( dst: *mut T, old: T, new: T, success: Ordering, failure: Ordering, ) -> Result<T, T> { // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`. let (val, ok) = unsafe { match (success, failure) { (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new), (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new), (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new), (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new), (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new), (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new), (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new), (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new), (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new), (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"), (_, Release) => panic!("there is no such thing as a release failure ordering"), _ => panic!("a failure ordering can't be stronger than a success ordering"), } }; if ok { Ok(val) } else { Err(val) } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_and` unsafe { match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), AcqRel => intrinsics::atomic_and_acqrel(dst, val), Relaxed => intrinsics::atomic_and_relaxed(dst, val), SeqCst => intrinsics::atomic_and(dst, val), } } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_nand` unsafe { match order { Acquire => intrinsics::atomic_nand_acq(dst, val), Release => intrinsics::atomic_nand_rel(dst, val), AcqRel => intrinsics::atomic_nand_acqrel(dst, val), Relaxed => intrinsics::atomic_nand_relaxed(dst, val), SeqCst => intrinsics::atomic_nand(dst, val), } } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_or` unsafe { match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), AcqRel => intrinsics::atomic_or_acqrel(dst, val), Relaxed => intrinsics::atomic_or_relaxed(dst, val), SeqCst => intrinsics::atomic_or(dst, val), } } } #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_xor` unsafe { match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), AcqRel => intrinsics::atomic_xor_acqrel(dst, val), Relaxed => intrinsics::atomic_xor_relaxed(dst, val), SeqCst => intrinsics::atomic_xor(dst, val), } } } /// returns the max value (signed comparison) #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_max` unsafe { match order { Acquire => intrinsics::atomic_max_acq(dst, val), Release => intrinsics::atomic_max_rel(dst, val), AcqRel => intrinsics::atomic_max_acqrel(dst, val), Relaxed => intrinsics::atomic_max_relaxed(dst, val), SeqCst => intrinsics::atomic_max(dst, val), } } } /// returns the min value (signed comparison) #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_min` unsafe { match order { Acquire => intrinsics::atomic_min_acq(dst, val), Release => intrinsics::atomic_min_rel(dst, val), AcqRel => intrinsics::atomic_min_acqrel(dst, val), Relaxed => intrinsics::atomic_min_relaxed(dst, val), SeqCst => intrinsics::atomic_min(dst, val), } } } /// returns the max value (unsigned comparison) #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_umax` unsafe { match order { Acquire => intrinsics::atomic_umax_acq(dst, val), Release => intrinsics::atomic_umax_rel(dst, val), AcqRel => intrinsics::atomic_umax_acqrel(dst, val), Relaxed => intrinsics::atomic_umax_relaxed(dst, val), SeqCst => intrinsics::atomic_umax(dst, val), } } } /// returns the min value (unsigned comparison) #[inline] #[cfg(target_has_atomic = "8")] unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T { // SAFETY: the caller must uphold the safety contract for `atomic_umin` unsafe { match order { Acquire => intrinsics::atomic_umin_acq(dst, val), Release => intrinsics::atomic_umin_rel(dst, val), AcqRel => intrinsics::atomic_umin_acqrel(dst, val), Relaxed => intrinsics::atomic_umin_relaxed(dst, val), SeqCst => intrinsics::atomic_umin(dst, val), } } } /// An atomic fence. /// /// Depending on the specified order, a fence prevents the compiler and CPU from /// reordering certain types of memory operations around it. /// That creates synchronizes-with relationships between it and atomic operations /// or fences in other threads. /// /// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes /// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there /// exist operations X and Y, both operating on some atomic object 'M' such /// that A is sequenced before X, Y is synchronized before B and Y observes /// the change to M. This provides a happens-before dependence between A and B. /// /// ```text /// Thread 1 Thread 2 /// /// fence(Release); A -------------- /// x.store(3, Relaxed); X --------- | /// | | /// | | /// -------------> Y if x.load(Relaxed) == 3 { /// |-------> B fence(Acquire); /// ... /// } /// ``` /// /// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize /// with a fence. /// /// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`] /// and [`Release`] semantics, participates in the global program order of the /// other [`SeqCst`] operations and/or fences. /// /// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings. /// /// # Panics /// /// Panics if `order` is [`Relaxed`]. /// /// # Examples /// /// ``` /// use std::sync::atomic::AtomicBool; /// use std::sync::atomic::fence; /// use std::sync::atomic::Ordering; /// /// // A mutual exclusion primitive based on spinlock. /// pub struct Mutex { /// flag: AtomicBool, /// } /// /// impl Mutex { /// pub fn new() -> Mutex { /// Mutex { /// flag: AtomicBool::new(false), /// } /// } /// /// pub fn lock(&self) { /// // Wait until the old value is `false`. /// while self /// .flag /// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed) /// .is_err() /// {} /// // This fence synchronizes-with store in `unlock`. /// fence(Ordering::Acquire); /// } /// /// pub fn unlock(&self) { /// self.flag.store(false, Ordering::Release); /// } /// } /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] #[rustc_diagnostic_item = "fence"] pub fn fence(order: Ordering) { // SAFETY: using an atomic fence is safe. unsafe { match order { Acquire => intrinsics::atomic_fence_acq(), Release => intrinsics::atomic_fence_rel(), AcqRel => intrinsics::atomic_fence_acqrel(), SeqCst => intrinsics::atomic_fence(), Relaxed => panic!("there is no such thing as a relaxed fence"), } } } /// A compiler memory fence. /// /// `compiler_fence` does not emit any machine code, but restricts the kinds /// of memory re-ordering the compiler is allowed to do. Specifically, depending on /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads /// or writes from before or after the call to the other side of the call to /// `compiler_fence`. Note that it does **not** prevent the *hardware* /// from doing such re-ordering. This is not a problem in a single-threaded, /// execution context, but when other threads may modify memory at the same /// time, stronger synchronization primitives such as [`fence`] are required. /// /// The re-ordering prevented by the different ordering semantics are: /// /// - with [`SeqCst`], no re-ordering of reads and writes across this point is allowed. /// - with [`Release`], preceding reads and writes cannot be moved past subsequent writes. /// - with [`Acquire`], subsequent reads and writes cannot be moved ahead of preceding reads. /// - with [`AcqRel`], both of the above rules are enforced. /// /// `compiler_fence` is generally only useful for preventing a thread from /// racing *with itself*. That is, if a given thread is executing one piece /// of code, and is then interrupted, and starts executing code elsewhere /// (while still in the same thread, and conceptually still on the same /// core). In traditional programs, this can only occur when a signal /// handler is registered. In more low-level code, such situations can also /// arise when handling interrupts, when implementing green threads with /// pre-emption, etc. Curious readers are encouraged to read the Linux kernel's /// discussion of [memory barriers]. /// /// # Panics /// /// Panics if `order` is [`Relaxed`]. /// /// # Examples /// /// Without `compiler_fence`, the `assert_eq!` in following code /// is *not* guaranteed to succeed, despite everything happening in a single thread. /// To see why, remember that the compiler is free to swap the stores to /// `IMPORTANT_VARIABLE` and `IS_READY` since they are both /// `Ordering::Relaxed`. If it does, and the signal handler is invoked right /// after `IS_READY` is updated, then the signal handler will see /// `IS_READY=1`, but `IMPORTANT_VARIABLE=0`. /// Using a `compiler_fence` remedies this situation. /// /// ``` /// use std::sync::atomic::{AtomicBool, AtomicUsize}; /// use std::sync::atomic::Ordering; /// use std::sync::atomic::compiler_fence; /// /// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0); /// static IS_READY: AtomicBool = AtomicBool::new(false); /// /// fn main() { /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed); /// // prevent earlier writes from being moved beyond this point /// compiler_fence(Ordering::Release); /// IS_READY.store(true, Ordering::Relaxed); /// } /// /// fn signal_handler() { /// if IS_READY.load(Ordering::Relaxed) { /// assert_eq!(IMPORTANT_VARIABLE.load(Ordering::Relaxed), 42); /// } /// } /// ``` /// /// [memory barriers]: https://www.kernel.org/doc/Documentation/memory-barriers.txt #[inline] #[stable(feature = "compiler_fences", since = "1.21.0")] #[rustc_diagnostic_item = "compiler_fence"] pub fn compiler_fence(order: Ordering) { // SAFETY: using an atomic fence is safe. unsafe { match order { Acquire => intrinsics::atomic_singlethreadfence_acq(), Release => intrinsics::atomic_singlethreadfence_rel(), AcqRel => intrinsics::atomic_singlethreadfence_acqrel(), SeqCst => intrinsics::atomic_singlethreadfence(), Relaxed => panic!("there is no such thing as a relaxed compiler fence"), } } } #[cfg(target_has_atomic_load_store = "8")] #[stable(feature = "atomic_debug", since = "1.3.0")] impl fmt::Debug for AtomicBool { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) } } #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "atomic_debug", since = "1.3.0")] impl<T> fmt::Debug for AtomicPtr<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.load(Ordering::SeqCst), f) } } #[cfg(target_has_atomic_load_store = "ptr")] #[stable(feature = "atomic_pointer", since = "1.24.0")] impl<T> fmt::Pointer for AtomicPtr<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f) } } /// Signals the processor that it is inside a busy-wait spin-loop ("spin lock"). /// /// This function is deprecated in favor of [`hint::spin_loop`]. /// /// [`hint::spin_loop`]: crate::hint::spin_loop #[inline] #[stable(feature = "spin_loop_hint", since = "1.24.0")] #[rustc_deprecated(since = "1.51.0", reason = "use hint::spin_loop instead")] pub fn spin_loop_hint() { spin_loop() }
41.652545
120
0.573588
0a68bd6f8e49b0ef10ceb7b0bc501f02a46ebb88
1,013
//! Utilities for interacting with Cosmos DB's permissions system. //! //! You can learn more about how the system works [here](https://docs.microsoft.com/rest/api/cosmos-db/permissions). mod authorization_token; mod permission; mod permission_token; pub use authorization_token::AuthorizationToken; pub use authorization_token::AuthorizationTokenParsingError; pub use permission::{Permission, PermissionMode}; pub use permission_token::PermissionToken; pub use permission_token::PermissionTokenParsingError; use crate::headers; use azure_core::AddAsHeader; use http::request::Builder; /// The amount of time before authorization expires #[derive(Debug, Clone, Copy)] pub struct ExpirySeconds(u64); impl ExpirySeconds { /// Create an `ExpirySeconds` from a `u64` pub fn new(secs: u64) -> Self { Self(secs) } } impl AddAsHeader for ExpirySeconds { fn add_as_header(&self, builder: Builder) -> Builder { builder.header(headers::HEADER_DOCUMENTDB_EXPIRY_SECONDS, self.0) } }
29.794118
116
0.75617
d98d243b61431c7f67a416183f6bd22a254ab10f
1,735
use crate::model::following::FollowRequest; use serde::Serialize; #[derive(Serialize, Default, Debug, Clone)] #[serde(rename_all = "camelCase")] pub struct Request {} impl misskey_core::Request for Request { type Response = Vec<FollowRequest>; const ENDPOINT: &'static str = "following/requests/list"; } #[cfg(test)] mod tests { use super::Request; use crate::test::{ClientExt, TestClient}; #[tokio::test] async fn request() { let client = TestClient::new(); let (new_user, new_client) = client.admin.create_user().await; new_client .test(crate::endpoint::i::update::Request { name: None, description: None, lang: None, location: None, birthday: None, avatar_id: None, banner_id: None, fields: None, is_locked: Some(true), #[cfg(feature = "12-63-0")] is_explorable: None, careful_bot: None, auto_accept_followed: None, is_bot: None, is_cat: None, #[cfg(not(feature = "12-55-0"))] auto_watch: None, inject_featured_note: None, always_mark_nsfw: None, pinned_page_id: None, muted_words: None, #[cfg(feature = "12-60-0")] no_crawle: None, }) .await; client .user .test(crate::endpoint::following::create::Request { user_id: new_user.id, }) .await; new_client.test(Request::default()).await; } }
28.442623
70
0.500288
4b58c33de08715d1e59a524d51cacc95e679a035
2,088
use unicode_segmentation::UnicodeSegmentation; use super::{Context, Module, RootModuleConfig}; use crate::configs::git_branch::GitBranchConfig; /// Creates a module with the Git branch in the current directory /// /// Will display the branch name if the current directory is a git repo pub fn module<'a>(context: &'a Context) -> Option<Module<'a>> { let mut module = context.new_module("git_branch"); let config = GitBranchConfig::try_load(module.config); module.set_style(config.style); module.get_prefix().set_value("on "); let truncation_symbol = get_graphemes(config.truncation_symbol, 1); module.create_segment("symbol", &config.symbol); // TODO: Once error handling is implemented, warn the user if their config // truncation length is nonsensical let len = if config.truncation_length <= 0 { log::warn!( "\"truncation_length\" should be a positive value, found {}", config.truncation_length ); std::usize::MAX } else { config.truncation_length as usize }; let repo = context.get_repo().ok()?; // bare repos don't have a branch name, so `repo.branch.as_ref` would return None, // but git treats "master" as the default branch name let default_branch = String::from("master"); let branch_name = repo.branch.as_ref().unwrap_or(&default_branch); let truncated_graphemes = get_graphemes(&branch_name, len); // The truncation symbol should only be added if we truncated let truncated_and_symbol = if len < graphemes_len(&branch_name) { truncated_graphemes + &truncation_symbol } else { truncated_graphemes }; module.create_segment( "name", &config.branch_name.with_value(&truncated_and_symbol), ); Some(module) } fn get_graphemes(text: &str, length: usize) -> String { UnicodeSegmentation::graphemes(text, true) .take(length) .collect::<Vec<&str>>() .concat() } fn graphemes_len(text: &str) -> usize { UnicodeSegmentation::graphemes(&text[..], true).count() }
33.142857
86
0.673851
d525e42220d1c2dbf2208f59d14e903cfa780878
3,229
/// Callback function that serves as the parameter of /// [`Trace::trace`](trait.Trace.html#method.trace). pub type Tracer<'a> = dyn FnMut(*const ()) + 'a; /// Defines how the cycle collector should collect a type. /// /// ## Customized `Drop` implementation /// /// The [`Drop`] implementation should avoid dereferencing other [`Cc<T>`] /// objects. Failing to do so might cause panic or undefined behavior. For /// example, `T1` has a field `Cc<T2>`. The collector might have already /// dropped `T2` by the time `T1::drop` runs. /// /// The `Drop` implementation should also be careful about cloning /// (resurrecting) [`Cc<T>`] objects. If it must do so, the `trace` /// implementation should match by avoiding visiting those cloned objects. /// /// ## The `'static` bound /// /// Types tracked by the collector can potentially be kept alive forever. /// Therefore types with non-static references are not allowed. pub trait Trace: 'static { /// Define how to visit values referred by this value. /// /// For example, if `self.x` is a value referred by `self`, /// call `self.x.trace(tracer)` to visit it. /// /// The values that are visited must match the `Drop::drop` /// implementation. Otherwise memory leak or panic might /// happen. After the panic, dereferencing already collected /// `Cc<T>` can trigger: /// - Undefined behavior on release build. /// - Another panic on debug build. /// /// Ideally this can be generated by the compiler, since the /// compiler already knows how to generate `Drop::drop`. fn trace(&self, tracer: &mut Tracer) { let _ = tracer; } /// Whether this type should be tracked by the collector. /// /// Types that might include `Cc<T>` where `T` can form a cycle should /// be tracked. This allows the collector to visit the `Cc` values from /// its parents and count references correctly. /// /// If a type `T` is tracked, `Cc<T>` will be 3 `usize` larger and the /// collector will check them. /// /// For example, /// /// - `Vec<u8>` is not tracked. It does include any kind of `Cc<T>`. /// - `Box<Cc<u8>>` is not tracked. It includes `Cc<u8>` but `u8` cannot /// create cycles. /// - `Box<dyn Trace>` is tracked. The trait object can be anything, /// including any kinds of types that contains a `Cc<T>`. /// /// Usually, concrete Rust types can opt-out the cycle collector. /// There are a few exceptions: /// /// - Trait objects, and types containing trait objects. Trait objects /// can be anything so they should be tracked. /// - Recursive types. Such as, `struct S(RefCell<Option<Rc<Box<S>>>>)`. /// Those types need an explicit name like `S`, and a manual /// implementation of the [`Trace`](trait.Trace.html) trait. /// That implementation should make `is_type_tracked` return `true` /// directly. /// /// This is an optimization for performance. When in-doubt, return `true` /// for correctness. fn is_type_tracked() -> bool where Self: Sized, { // Fallback implementation: Opt-in the collector for correctness. return true; } }
40.873418
77
0.638588
1111c1db3ddacbdb4d7fd5f692e4f8754799e9d7
2,738
#[doc = "Reader of register WA"] pub type R = crate::R<u32, super::WA>; #[doc = "Writer for register WA"] pub type W = crate::W<u32, super::WA>; #[doc = "Register WA `reset()`'s with value 0"] impl crate::ResetValue for super::WA { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Write access to peripheral region n detected\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum WA_A { #[doc = "0: Event not generated"] NOTGENERATED = 0, #[doc = "1: Event generated"] GENERATED = 1, } impl From<WA_A> for bool { #[inline(always)] fn from(variant: WA_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `WA`"] pub type WA_R = crate::R<bool, WA_A>; impl WA_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> WA_A { match self.bits { false => WA_A::NOTGENERATED, true => WA_A::GENERATED, } } #[doc = "Checks if the value of the field is `NOTGENERATED`"] #[inline(always)] pub fn is_not_generated(&self) -> bool { *self == WA_A::NOTGENERATED } #[doc = "Checks if the value of the field is `GENERATED`"] #[inline(always)] pub fn is_generated(&self) -> bool { *self == WA_A::GENERATED } } #[doc = "Write proxy for field `WA`"] pub struct WA_W<'a> { w: &'a mut W, } impl<'a> WA_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: WA_A) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Event not generated"] #[inline(always)] pub fn not_generated(self) -> &'a mut W { self.variant(WA_A::NOTGENERATED) } #[doc = "Event generated"] #[inline(always)] pub fn generated(self) -> &'a mut W { self.variant(WA_A::GENERATED) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl R { #[doc = "Bit 0 - Write access to peripheral region n detected"] #[inline(always)] pub fn wa(&self) -> WA_R { WA_R::new((self.bits & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Write access to peripheral region n detected"] #[inline(always)] pub fn wa(&mut self) -> WA_W { WA_W { w: self } } }
26.843137
76
0.549671
2268a62bd1ebbe661cafb78a17afb2dd1a6b9f1c
3,093
/*! A submodule for testing literal blocks. Copyright © 2020 Santtu Söderholm */ use super::*; #[cfg(test)] #[test] fn literal_block_01() { let src = " :: > This is a literal block of text, > indicated by the \"::\" at the end of last paragraph. " .lines() .map(|s| s.to_string()) .collect::<Vec<String>>(); let mut doctree = DocTree::new(PathBuf::from("test")); let mut parser = Parser::new(&src, doctree, 0, 0, State::Body, 0); doctree = parser.parse().unwrap_tree(); doctree = doctree.walk_to_root(); doctree.print_tree(); match doctree .shared_child(0).unwrap().shared_data() { TreeNodeType::LiteralBlock { text } => { assert_eq!(text.as_str(), "This is a literal block of text,\nindicated by the \"::\" at the end of last paragraph.") } _ => panic!(), } } #[test] fn literal_block_02() { let src = r#" :: An indented literal block with multiple lines Even more indent here. And even more... Return to original level of indentation This line ends the literal block, as its indentation is on the same level as that of the literal block indicator "::". "# .lines() .map(|s| s.to_string()) .collect::<Vec<String>>(); let mut doctree = DocTree::new(PathBuf::from("test")); let mut parser = Parser::new(&src, doctree, 0, 0, State::Body, 0); doctree = parser.parse().unwrap_tree(); doctree = doctree.walk_to_root(); doctree.print_tree(); match doctree .shared_child(0).unwrap().shared_data() { TreeNodeType::LiteralBlock { text } => { assert_eq!(text.as_str(), "An indented literal block with\nmultiple lines\n\n Even more indent here.\n\n And even more...\nReturn to original level of indentation\n") } _ => panic!(), } } #[test] fn code_01() { let src = r#" .. code:: python :number lines: 3 :name: reference-name :class: some-class and-another-one def shout(text): print(text + "!") def main(): text = "abcde" shout(text) This paragraph ends the literal block. "# .lines() .map(|s| s.to_string()) .collect::<Vec<String>>(); let mut doctree = DocTree::new(PathBuf::from("test")); let mut parser = Parser::new(&src, doctree, 0, 0, State::Body, 0); doctree = parser.parse().unwrap_tree(); doctree = doctree.walk_to_root(); doctree.print_tree(); match doctree .shared_child(0).unwrap().shared_data() { TreeNodeType::Code { text, language, number_lines, name, class, } => { assert_eq!(text.as_str(), "def shout(text):\n print(text + \"!\")\n\ndef main():\n text = \"abcde\"\n shout(text)\n"); assert_eq!(language.as_ref().unwrap().as_str(), "python"); assert_eq!(name.as_ref().unwrap().as_str(), "reference-name"); assert_eq!( class.as_ref().unwrap().as_str(), "some-class and-another-one" ); } _ => panic!(), } }
23.976744
182
0.57711
79d80d6363310f9e1f5d1c39a2ff4c3783eba01e
736
pub struct Solution {} // ------------------------------- solution begin ------------------------------- impl Solution { pub fn reverse(x: i32) -> i32 { let mut ret = 0i32; let mut x_clone = x; while x_clone != 0 { let i = x_clone % 10; let x1 = ret.overflowing_mul(10); if x1.1 == true { return 0; } let x2 = x1.0.overflowing_add(i); if x2.1 == true { return 0; } ret = x2.0; x_clone = x_clone / 10; } ret } } // ------------------------------- solution end --------------------------------- fn main() { let input: i32 = 123; let output = Solution::reverse(input); println!("Input: {}", input); println!("Output: {}", output); }
22.30303
81
0.429348
16ac2043406ee5f13da0238916d2899427ddba39
16,389
// Copyright 2019-2021 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any // person obtaining a copy of this software and associated // documentation files (the "Software"), to deal in the // Software without restriction, including without // limitation the rights to use, copy, modify, merge, // publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software // is furnished to do so, subject to the following // conditions: // // The above copyright notice and this permission notice // shall be included in all copies or substantial portions // of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF // ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED // TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A // PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT // SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY // CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR // IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. //! Handles and monitors JSONRPC v2 method calls and subscriptions //! //! Definitions: //! //! - RequestId: request ID in the JSONRPC-v2 specification //! > **Note**: The spec allow number, string or null but this crate only supports numbers. //! - SubscriptionId: unique ID generated by server use std::collections::{hash_map::Entry, HashMap}; use crate::Error; use futures_channel::{mpsc, oneshot}; use jsonrpsee_types::SubscriptionId; use rustc_hash::FxHashMap; use serde_json::value::Value as JsonValue; #[derive(Debug)] enum Kind { PendingMethodCall(PendingCallOneshot), PendingSubscription((RequestId, PendingSubscriptionOneshot, UnsubscribeMethod)), Subscription((RequestId, SubscriptionSink, UnsubscribeMethod)), } #[derive(Debug)] /// Indicates the status of a given request/response. pub(crate) enum RequestStatus { /// The method call is waiting for a response, PendingMethodCall, /// The subscription is waiting for a response to become an active subscription. PendingSubscription, /// An active subscription. Subscription, /// Invalid request ID. Invalid, } type PendingCallOneshot = Option<oneshot::Sender<Result<JsonValue, Error>>>; type PendingBatchOneshot = oneshot::Sender<Result<Vec<JsonValue>, Error>>; type PendingSubscriptionOneshot = oneshot::Sender<Result<(mpsc::Receiver<JsonValue>, SubscriptionId<'static>), Error>>; type SubscriptionSink = mpsc::Sender<JsonValue>; type UnsubscribeMethod = String; type RequestId = u64; #[derive(Debug)] /// Batch state. pub(crate) struct BatchState { /// Order that the request was performed in. pub(crate) order: FxHashMap<RequestId, usize>, /// Oneshot send back. pub(crate) send_back: PendingBatchOneshot, } #[derive(Debug, Default)] /// Manages and monitors JSONRPC v2 method calls and subscriptions. pub(crate) struct RequestManager { /// List of requests that are waiting for a response from the server. // NOTE: FnvHashMap is used here because RequestId is not under the caller's control and is known to be a short // key. requests: FxHashMap<RequestId, Kind>, /// Reverse lookup, to find a request ID in constant time by `subscription ID` instead of looking through all /// requests. subscriptions: HashMap<SubscriptionId<'static>, RequestId>, /// Pending batch requests batches: FxHashMap<Vec<RequestId>, BatchState>, /// Registered Methods for incoming notifications notification_handlers: HashMap<String, SubscriptionSink>, } impl RequestManager { /// Create a new `RequestManager`. pub(crate) fn new() -> Self { Self::default() } /// Tries to insert a new pending call. /// /// Returns `Ok` if the pending request was successfully inserted otherwise `Err`. pub(crate) fn insert_pending_call( &mut self, id: RequestId, send_back: PendingCallOneshot, ) -> Result<(), PendingCallOneshot> { if let Entry::Vacant(v) = self.requests.entry(id) { v.insert(Kind::PendingMethodCall(send_back)); Ok(()) } else { Err(send_back) } } /// Tries to insert a new batch request /// /// Returns `Ok` if the pending request was successfully inserted otherwise `Err`. pub(crate) fn insert_pending_batch( &mut self, mut batch: Vec<RequestId>, send_back: PendingBatchOneshot, ) -> Result<(), PendingBatchOneshot> { let mut order = FxHashMap::with_capacity_and_hasher(batch.len(), Default::default()); for (idx, batch_id) in batch.iter().enumerate() { order.insert(*batch_id, idx); } batch.sort_unstable(); if let Entry::Vacant(v) = self.batches.entry(batch) { v.insert(BatchState { order, send_back }); Ok(()) } else { Err(send_back) } } /// Tries to insert a new pending subscription and reserves a slot for a "potential" unsubscription request. /// /// Returns `Ok` if the pending request was successfully inserted otherwise `Err`. pub(crate) fn insert_pending_subscription( &mut self, sub_req_id: RequestId, unsub_req_id: RequestId, send_back: PendingSubscriptionOneshot, unsubscribe_method: UnsubscribeMethod, ) -> Result<(), PendingSubscriptionOneshot> { // The request IDs are not in the manager and the `sub_id` and `unsub_id` are not equal. if !self.requests.contains_key(&sub_req_id) && !self.requests.contains_key(&unsub_req_id) && sub_req_id != unsub_req_id { self.requests.insert(sub_req_id, Kind::PendingSubscription((unsub_req_id, send_back, unsubscribe_method))); self.requests.insert(unsub_req_id, Kind::PendingMethodCall(None)); Ok(()) } else { Err(send_back) } } /// Tries to insert a new subscription. /// /// Returns `Ok` if the pending request was successfully inserted otherwise `Err`. pub(crate) fn insert_subscription( &mut self, sub_req_id: RequestId, unsub_req_id: RequestId, subscription_id: SubscriptionId<'static>, send_back: SubscriptionSink, unsubscribe_method: UnsubscribeMethod, ) -> Result<(), SubscriptionSink> { if let (Entry::Vacant(request), Entry::Vacant(subscription)) = (self.requests.entry(sub_req_id), self.subscriptions.entry(subscription_id)) { request.insert(Kind::Subscription((unsub_req_id, send_back, unsubscribe_method))); subscription.insert(sub_req_id); Ok(()) } else { Err(send_back) } } /// Inserts a handler for incoming notifications pub(crate) fn insert_notification_handler( &mut self, method: &str, send_back: SubscriptionSink, ) -> Result<(), Error> { if let Entry::Vacant(handle) = self.notification_handlers.entry(method.to_owned()) { handle.insert(send_back); Ok(()) } else { Err(Error::MethodAlreadyRegistered(method.to_owned())) } } /// Removes a notification handler pub(crate) fn remove_notification_handler(&mut self, method: String) -> Result<(), Error> { if self.notification_handlers.remove(&method).is_some() { Ok(()) } else { Err(Error::UnregisteredNotification(method)) } } /// Tries to complete a pending subscription. /// /// Returns `Some` if the subscription was completed otherwise `None`. pub(crate) fn complete_pending_subscription( &mut self, request_id: RequestId, ) -> Option<(RequestId, PendingSubscriptionOneshot, UnsubscribeMethod)> { match self.requests.entry(request_id) { Entry::Occupied(request) if matches!(request.get(), Kind::PendingSubscription(_)) => { let (_req_id, kind) = request.remove_entry(); if let Kind::PendingSubscription(send_back) = kind { Some(send_back) } else { unreachable!("Pending subscription is Pending subscription checked above; qed"); } } _ => None, } } /// Tries to complete a pending batch request /// /// Returns `Some` if the subscription was completed otherwise `None`. pub(crate) fn complete_pending_batch(&mut self, batch: Vec<RequestId>) -> Option<BatchState> { match self.batches.entry(batch) { Entry::Occupied(request) => { let (_digest, state) = request.remove_entry(); Some(state) } _ => None, } } /// Tries to complete a pending call.. /// /// Returns `Some` if the call was completed otherwise `None`. pub(crate) fn complete_pending_call(&mut self, request_id: RequestId) -> Option<PendingCallOneshot> { match self.requests.entry(request_id) { Entry::Occupied(request) if matches!(request.get(), Kind::PendingMethodCall(_)) => { let (_req_id, kind) = request.remove_entry(); if let Kind::PendingMethodCall(send_back) = kind { Some(send_back) } else { unreachable!("Pending call is Pending call checked above; qed"); } } _ => None, } } /// Tries to remove a subscription. /// /// Returns `Some` if the subscription was removed otherwise `None`. pub(crate) fn remove_subscription( &mut self, request_id: RequestId, subscription_id: SubscriptionId<'static>, ) -> Option<(RequestId, SubscriptionSink, UnsubscribeMethod, SubscriptionId)> { match (self.requests.entry(request_id), self.subscriptions.entry(subscription_id)) { (Entry::Occupied(request), Entry::Occupied(subscription)) if matches!(request.get(), Kind::Subscription(_)) => { let (_req_id, kind) = request.remove_entry(); let (sub_id, _req_id) = subscription.remove_entry(); if let Kind::Subscription((unsub_req_id, send_back, unsub)) = kind { Some((unsub_req_id, send_back, unsub, sub_id)) } else { unreachable!("Subscription is Subscription checked above; qed"); } } _ => None, } } /// Returns the status of a request ID pub(crate) fn request_status(&mut self, id: &RequestId) -> RequestStatus { self.requests.get(id).map_or(RequestStatus::Invalid, |kind| match kind { Kind::PendingMethodCall(_) => RequestStatus::PendingMethodCall, Kind::PendingSubscription(_) => RequestStatus::PendingSubscription, Kind::Subscription(_) => RequestStatus::Subscription, }) } /// Get a mutable reference to underlying `Sink` in order to send messages to the subscription. /// /// Returns `Some` if the `request_id` was registered as a subscription otherwise `None`. pub(crate) fn as_subscription_mut(&mut self, request_id: &RequestId) -> Option<&mut SubscriptionSink> { if let Some(Kind::Subscription((_, sink, _))) = self.requests.get_mut(request_id) { Some(sink) } else { None } } /// Get a mutable reference to underlying `Sink` in order to send incoming notifications to the subscription. /// /// Returns `Some` if the `method` was registered as a NotificationHandler otherwise `None`. pub(crate) fn as_notification_handler_mut(&mut self, method: String) -> Option<&mut SubscriptionSink> { self.notification_handlers.get_mut(&method) } /// Reverse lookup to get the request ID for a subscription ID. /// /// Returns `Some` if the subscription ID was registered as a subscription otherwise `None`. pub(crate) fn get_request_id_by_subscription_id(&self, sub_id: &SubscriptionId) -> Option<RequestId> { self.subscriptions.get(sub_id).copied() } } #[cfg(test)] mod tests { use super::{Error, RequestManager}; use futures_channel::{mpsc, oneshot}; use jsonrpsee_types::SubscriptionId; use serde_json::Value as JsonValue; #[test] fn insert_remove_pending_request_works() { let (request_tx, _) = oneshot::channel::<Result<JsonValue, Error>>(); let mut manager = RequestManager::new(); assert!(manager.insert_pending_call(0, Some(request_tx)).is_ok()); assert!(manager.complete_pending_call(0).is_some()); } #[test] fn insert_remove_subscription_works() { let (pending_sub_tx, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (sub_tx, _) = mpsc::channel::<JsonValue>(1); let mut manager = RequestManager::new(); assert!(manager.insert_pending_subscription(1, 2, pending_sub_tx, "unsubscribe_method".into()).is_ok()); let (unsub_req_id, _send_back_oneshot, unsubscribe_method) = manager.complete_pending_subscription(1).unwrap(); assert_eq!(unsub_req_id, 2); assert!(manager .insert_subscription(1, 2, SubscriptionId::Str("uniq_id_from_server".into()), sub_tx, unsubscribe_method) .is_ok()); assert!(manager.as_subscription_mut(&1).is_some()); assert!(manager.remove_subscription(1, SubscriptionId::Str("uniq_id_from_server".into())).is_some()); } #[test] fn insert_subscription_with_same_sub_and_unsub_id_should_err() { let (tx1, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (tx2, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (tx3, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (tx4, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let mut manager = RequestManager::new(); assert!(manager.insert_pending_subscription(1, 1, tx1, "unsubscribe_method".into()).is_err()); assert!(manager.insert_pending_subscription(0, 1, tx2, "unsubscribe_method".into()).is_ok()); assert!( manager.insert_pending_subscription(99, 0, tx3, "unsubscribe_method".into()).is_err(), "unsub request ID already occupied" ); assert!( manager.insert_pending_subscription(99, 1, tx4, "unsubscribe_method".into()).is_err(), "sub request ID already occupied" ); } #[test] fn pending_method_call_faulty() { let (request_tx1, _) = oneshot::channel::<Result<JsonValue, Error>>(); let (request_tx2, _) = oneshot::channel::<Result<JsonValue, Error>>(); let (pending_sub_tx, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (sub_tx, _) = mpsc::channel::<JsonValue>(1); let mut manager = RequestManager::new(); assert!(manager.insert_pending_call(0, Some(request_tx1)).is_ok()); assert!(manager.insert_pending_call(0, Some(request_tx2)).is_err()); assert!(manager.insert_pending_subscription(0, 1, pending_sub_tx, "beef".to_string()).is_err()); assert!(manager.insert_subscription(0, 99, SubscriptionId::Num(137), sub_tx, "bibimbap".to_string()).is_err()); assert!(manager.remove_subscription(0, SubscriptionId::Num(137)).is_none()); assert!(manager.complete_pending_subscription(0).is_none()); assert!(manager.complete_pending_call(0).is_some()); } #[test] fn pending_subscription_faulty() { let (request_tx, _) = oneshot::channel::<Result<JsonValue, Error>>(); let (pending_sub_tx1, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (pending_sub_tx2, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (sub_tx, _) = mpsc::channel::<JsonValue>(1); let mut manager = RequestManager::new(); assert!(manager.insert_pending_subscription(99, 100, pending_sub_tx1, "beef".to_string()).is_ok()); assert!(manager.insert_pending_call(99, Some(request_tx)).is_err()); assert!(manager.insert_pending_subscription(99, 1337, pending_sub_tx2, "vegan".to_string()).is_err()); assert!(manager.insert_subscription(99, 100, SubscriptionId::Num(0), sub_tx, "bibimbap".to_string()).is_err()); assert!(manager.remove_subscription(99, SubscriptionId::Num(0)).is_none()); assert!(manager.complete_pending_call(99).is_none()); assert!(manager.complete_pending_subscription(99).is_some()); } #[test] fn active_subscriptions_faulty() { let (request_tx, _) = oneshot::channel::<Result<JsonValue, Error>>(); let (pending_sub_tx, _) = oneshot::channel::<Result<(mpsc::Receiver<JsonValue>, SubscriptionId), Error>>(); let (sub_tx1, _) = mpsc::channel::<JsonValue>(1); let (sub_tx2, _) = mpsc::channel::<JsonValue>(1); let mut manager = RequestManager::new(); assert!(manager.insert_subscription(3, 4, SubscriptionId::Num(0), sub_tx1, "bibimbap".to_string()).is_ok()); assert!(manager.insert_subscription(3, 4, SubscriptionId::Num(1), sub_tx2, "bibimbap".to_string()).is_err()); assert!(manager.insert_pending_subscription(3, 4, pending_sub_tx, "beef".to_string()).is_err()); assert!(manager.insert_pending_call(3, Some(request_tx)).is_err()); assert!(manager.remove_subscription(3, SubscriptionId::Num(7)).is_none()); assert!(manager.complete_pending_call(3).is_none()); assert!(manager.complete_pending_subscription(3).is_none()); assert!(manager.remove_subscription(3, SubscriptionId::Num(1)).is_none()); assert!(manager.remove_subscription(3, SubscriptionId::Num(0)).is_some()); } }
38.744681
119
0.721276
edb479ceabe26db56d5913246c48b63b9956331b
6,483
//! Implementation of binary .wtns file parser/serializer. //! According to https://github.com/iden3/snarkjs/blob/master/src/wtns_utils.js use std::io::{Error, ErrorKind, Read, Result, Write}; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; const MAGIC: &[u8; 4] = b"wtns"; #[derive(Debug, PartialEq)] pub struct WtnsFile<const FS: usize> { pub version: u32, pub header: Header<FS>, pub witness: Witness<FS>, } impl<const FS: usize> WtnsFile<FS> { pub fn from_vec(witness: Vec<FieldElement<FS>>, prime: FieldElement<FS>) -> Self { WtnsFile { version: 1, header: Header { field_size: FS as u32, prime, witness_len: witness.len() as u32, }, witness: Witness(witness), } } pub fn read<R: Read>(mut r: R) -> Result<Self> { let mut magic = [0u8; 4]; r.read_exact(&mut magic)?; if magic != *MAGIC { return Err(Error::new(ErrorKind::InvalidData, "Invalid magic number")); } let version = r.read_u32::<LittleEndian>()?; if version > 2 { return Err(Error::new(ErrorKind::InvalidData, "Unsupported version")); } let num_sections = r.read_u32::<LittleEndian>()?; if num_sections > 2 { return Err(Error::new( ErrorKind::InvalidData, "Number of sections >2 is not supported", )); } let header = Header::read(&mut r)?; let witness = Witness::read(&mut r, &header)?; Ok(WtnsFile { version, header, witness, }) } pub fn write<W: Write>(&self, mut w: W) -> Result<()> { w.write_all(MAGIC)?; w.write_u32::<LittleEndian>(self.version)?; w.write_u32::<LittleEndian>(2)?; self.header.write(&mut w)?; self.witness.write(&mut w)?; Ok(()) } } #[derive(Debug, PartialEq)] pub struct Header<const FS: usize> { pub field_size: u32, pub prime: FieldElement<FS>, pub witness_len: u32, } impl<const FS: usize> Header<FS> { pub fn read<R: Read>(mut r: R) -> Result<Self> { let sec_type = SectionType::read(&mut r)?; if sec_type != SectionType::Header { return Err(Error::new( ErrorKind::InvalidData, "Invalid section type: expected header", )); } let sec_size = r.read_u64::<LittleEndian>()?; if sec_size != 4 + FS as u64 + 4 { return Err(Error::new( ErrorKind::InvalidData, "Invalid header section size", )); } let field_size = r.read_u32::<LittleEndian>()?; let prime = FieldElement::read(&mut r)?; if field_size != FS as u32 { return Err(Error::new(ErrorKind::InvalidData, "Wrong field size")); } let witness_len = r.read_u32::<LittleEndian>()?; Ok(Header { field_size, prime, witness_len, }) } pub fn write<W: Write>(&self, mut w: W) -> Result<()> { SectionType::Header.write(&mut w)?; let sec_size = 4 + FS as u64 + 4; w.write_u64::<LittleEndian>(sec_size)?; w.write_u32::<LittleEndian>(FS as u32)?; self.prime.write(&mut w)?; w.write_u32::<LittleEndian>(self.witness_len)?; Ok(()) } } #[derive(Debug, PartialEq)] pub struct Witness<const FS: usize>(pub Vec<FieldElement<FS>>); impl<const FS: usize> Witness<FS> { pub fn read<R: Read>(mut r: R, header: &Header<FS>) -> Result<Self> { let sec_type = SectionType::read(&mut r)?; if sec_type != SectionType::Witness { return Err(Error::new(ErrorKind::InvalidData, "Invalid section type: expected witness")); } let sec_size = r.read_u64::<LittleEndian>()?; if sec_size != header.witness_len as u64 * FS as u64 { return Err(Error::new( ErrorKind::InvalidData, "Invalid witness section size", )); } let mut witness = Vec::with_capacity(header.witness_len as usize); for _ in 0..header.witness_len { witness.push(FieldElement::read(&mut r)?); } Ok(Witness(witness)) } fn write<W: Write>(&self, mut w: W) -> Result<()> { SectionType::Witness.write(&mut w)?; let sec_size = (self.0.len() * FS) as u64; w.write_u64::<LittleEndian>(sec_size)?; for e in &self.0 { e.write(&mut w)?; } Ok(()) } } #[derive(Debug, Eq, PartialEq, Clone, Copy)] #[repr(u32)] pub enum SectionType { Header = 1, Witness = 2, Unknown = u32::MAX, } impl SectionType { fn read<R: Read>(mut r: R) -> Result<Self> { let num = r.read_u32::<LittleEndian>()?; let ty = match num { 1 => SectionType::Header, 2 => SectionType::Witness, _ => SectionType::Unknown, }; Ok(ty) } fn write<W: Write>(&self, mut w: W) -> Result<()> { w.write_u32::<LittleEndian>(*self as u32)?; Ok(()) } } #[derive(Debug, PartialEq, Eq)] pub struct FieldElement<const FS: usize>([u8; FS]); impl<const FS: usize> FieldElement<FS> { pub fn as_bytes(&self) -> &[u8] { &self.0[..] } fn read<R: Read>(mut r: R) -> Result<Self> { let mut buf = [0; FS]; r.read_exact(&mut buf)?; Ok(FieldElement(buf)) } fn write<W: Write>(&self, mut w: W) -> Result<()> { w.write_all(&self.0[..]) } } impl<const FS: usize> From<[u8; FS]> for FieldElement<FS> { fn from(array: [u8; FS]) -> Self { FieldElement(array) } } impl<const FS: usize> std::ops::Deref for FieldElement<FS> { type Target = [u8; FS]; fn deref(&self) -> &Self::Target { &self.0 } } #[cfg(test)] mod tests { use super::*; use std::io::Cursor; const FS: usize = 32; fn fe() -> FieldElement<FS> { FieldElement::from([1,0,1,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]) } #[test] fn test() { let file = WtnsFile::<FS>::from_vec(vec![fe(), fe(), fe()], fe()); let mut data = Vec::new(); file.write(&mut data).unwrap(); let new_file = WtnsFile::read(Cursor::new(data)).unwrap(); assert_eq!(file, new_file); } }
25.932
101
0.531544
89d5642815372ebf4d934a3b0cdd1228b142d9e7
9,734
use clingo::*; use std::cell::RefCell; use std::env; use std::rc::Rc; use std::vec::Vec; fn print_model(model: &Model) { // retrieve the symbols in the model let atoms = model .symbols(&ShowType::SHOWN) .expect("Failed to retrieve symbols in the model."); print!("Model:"); for atom in atoms { // retrieve and print the symbol's string print!(" {}", atom.to_string().unwrap()); } println!(); } fn solve(ctl: &mut Control) { // get a solve handle let mut handle = ctl .solve(&SolveMode::YIELD, &[]) .expect("Failed to retrieve solve handle."); // loop over all models loop { handle.resume().expect("Failed resume on solve handle."); match handle.model() { // print the model Ok(Some(model)) => print_model(model), // stop if there are no more models Ok(None) => break, Err(e) => panic!("Error: {}", e.as_fail()), } } // close the solve handle handle.close().expect("Failed to close solve handle."); } // state information for individual solving threads #[derive(Debug)] struct StateT { // assignment of pigeons to holes // (hole number -> pigeon placement literal or zero) holes: Vec<Option<Literal>>, } // state information for the propagator struct PropagatorT { // mapping from solver literals capturing pigeon placements to hole numbers // (solver literal -> hole number or zero) pigeons: Vec<i32>, // array of states states: Vec<Rc<RefCell<StateT>>>, } // returns the offset'th numeric argument of the function symbol sym fn get_arg(sym: &Symbol, offset: usize) -> Result<i32, ClingoError> { // get the arguments of the function symbol let args = sym.arguments().unwrap(); // get the requested numeric argument args[offset as usize].number() } impl Propagator for PropagatorT { fn init(&mut self, init: &mut PropagateInit) -> bool { // stores the (numeric) maximum of the solver literals capturing pigeon placements // note that the code below assumes that this literal is not negative // which holds for the pigeon problem but not in general let mut max = 0; // the total number of holes pigeons can be assigned too let mut holes = 0; let threads = init.number_of_threads(); // ensure that solve can be called multiple times // for simplicity, the case that additional holes or pigeons to assign are grounded is not // handled here if !self.states.is_empty() { // in principle the number of threads can increase between solve calls by changing the // configuration this case is not handled (elegantly) here if threads > self.states.len() { set_error(ErrorType::Runtime, "more threads than states").unwrap(); } return true; } let s1_holes: Vec<Option<Literal>> = vec![]; let state1 = Rc::new(RefCell::new(StateT { holes: s1_holes })); self.states = vec![state1]; // create place/2 signature to filter symbolic atoms with let sig = Signature::new("place", 2, true).unwrap(); // loop over the place/2 atoms in two passes // the first pass determines the maximum placement literal // the second pass allocates memory for data structures based on the first pass for pass in 0..2 { let mut watches = vec![]; { // the propagator monitors place/2 atoms and dectects conflicting assignments // first get the symbolic atoms handle let atoms = init.symbolic_atoms().unwrap(); // get an iterator for place/2 atoms // (atom order corresponds to grounding order (and is unpredictable)) let mut atoms_iterator = atoms.iter_with_signature(&sig); if pass == 1 { // allocate memory for the assignment literal -> hole mapping self.pigeons = vec![0; max + 1];; } while let Some(item) = atoms_iterator.next() { // get the solver literal for the placement atom let lit = init.solver_literal(item.literal().unwrap()).unwrap(); let lit_id = lit.get_integer() as usize; if pass == 0 { // determine the maximum literal if lit_id > max { max = lit_id; } } else { // extract the hole number from the atom let sym = item.symbol().unwrap(); let h = get_arg(&sym, 1).unwrap(); // initialize the assignemnt literal -> hole mapping self.pigeons[lit_id] = h; // watch the assignment literal watches.push(lit); // update the total number of holes if h + 1 > holes { holes = h + 1; } } } } // watch the assignment literals for lit in watches { init.add_watch(lit).expect("Failed to add watch."); } } // initialize the per solver thread state information for i in 0..threads { // initially no pigeons are assigned to any holes // so the hole -> literal mapping is initialized with zero // which is not a valid literal self.states[i].borrow_mut().holes = vec![None; holes as usize]; } true } fn propagate(&mut self, control: &mut PropagateControl, changes: &[Literal]) -> bool { // get the thread specific state let mut state = self.states[control.thread_id() as usize].borrow_mut(); // apply and check the pigeon assignments done by the solver for &lit in changes.iter() { // a pointer to the previously assigned literal let idx = self.pigeons[lit.get_integer() as usize] as usize; let mut prev = state.holes[idx]; // update the placement if no literal was assigned previously match prev { None => { prev = Some(lit); state.holes[idx] = prev; } // create a conflicting clause and propagate it Some(x) => { // current and previous literal must not hold together let clause: &[Literal] = &[lit.negate(), x.negate()]; // stores the result when adding a clause or propagationg // if result is false propagation must stop for the solver to backtrack // add the clause if !control.add_clause(clause, ClauseType::Learnt).unwrap() { return true; } // propagate it if !control.propagate().unwrap() { return true; } // must not happen because the clause above is conflicting by construction assert!(false); } }; } true } fn undo(&mut self, control: &mut PropagateControl, changes: &[Literal]) -> bool { // get the thread specific state let mut state = self.states[control.thread_id() as usize].borrow_mut(); // undo the assignments made in propagate for &lit in changes.iter() { let hole = self.pigeons[lit.get_integer() as usize] as usize; if let Some(x) = state.holes[hole] { if x == lit { // undo the assignment state.holes[hole] = None; } } } true } } fn main() { // collect clingo options from the command line let options = env::args().skip(1).collect(); // create a propagator with the functions above // using the default implementation for the model check let mut prop = PropagatorT { pigeons: vec![], states: vec![], }; // create a control object and pass command line arguments let option = Control::new(options); match option { Ok(mut ctl) => { // register the propagator ctl.register_propagator(&mut prop, false) .expect("Failed to register propagator."); // add a logic program to the pigeon part // parameters for the pigeon part ctl.add( "pigeon", &vec!["h", "p"], "1 { place(P,H) : H = 1..h } 1 :- P = 1..p.", ) .expect("Failed to add a logic program."); // ground the pigeon part // set the number of holes let arg0 = Symbol::create_number(7); // set the number of pigeons let arg1 = Symbol::create_number(8); let args = vec![arg0, arg1]; // the pigeon program part having the number of holes and pigeons as parameters let part = Part::new("pigeon", &args).unwrap(); let parts = vec![part]; ctl.ground(&parts) .expect("Failed to ground a logic program."); // solve using a model callback solve(&mut ctl); } Err(e) => { panic!("Error: {}", e.as_fail()); } } }
35.396364
98
0.532155
5bdd14374b57374860bff08f0fb19cc605b9fdf5
5,747
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::super::qlib::common::*; use super::super::qlib::linux_def::*; use super::super::qlib::control_msg::*; use super::super::qlib::loader; use super::super::IO_MGR; use super::super::URING_MGR; use super::ucall::*; use super::usocket::*; use super::super::runc::container::container::*; use super::super::vmspace::*; pub fn ReadControlMsg(fd: i32) -> Result<ControlMsg> { let usock = USocket { socket: fd, }; let (mut req, fds) = match usock.GetReq() { Ok((req, fds)) => ((req, fds)), Err(e) => { let err = UCallResp::UCallRespErr(format!("{:?}", e)); usock.SendResp(&err)?; usock.Drop(); return Err(e) } }; let msg = ProcessReqHandler(&mut req, &fds); return msg } pub fn RootContainerStartHandler(start: &RootContainerStart) -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::RootContainerStart(RootProcessStart{ cid: start.cid.to_string(), })); return Ok(msg); } pub fn ExecProcessHandler(execArgs: &mut ExecArgs, fds: &[i32]) -> Result<ControlMsg> { execArgs.SetFds(fds); let mut process = loader::Process::default(); process.ID = execArgs.ContainerID.to_string(); process.Cwd = execArgs.WorkDir.to_string(); process.Args.append(&mut execArgs.Argv); process.Envs.append(&mut execArgs.Envv); process.UID = execArgs.KUID.0; process.GID = execArgs.KGID.0; process.AdditionalGids.append(&mut execArgs.ExtraKGIDs.iter().map(| gid | gid.0).collect()); process.Terminal = execArgs.Terminal; process.ExecId = Some(execArgs.ExecId.clone()); for i in 0..execArgs.Fds.len() { let osfd = execArgs.Fds[i]; //VMSpace::UnblockFd(osfd); let hostfd = IO_MGR.AddFile(osfd); URING_MGR.lock().Addfd(osfd).unwrap(); process.Stdiofds[i] = hostfd; } let msg = ControlMsg::New(Payload::ExecProcess(process)); return Ok(msg); } pub fn PauseHandler() -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::Pause); return Ok(msg) } pub fn UnpauseHandler() -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::Unpause); return Ok(msg) } pub fn PsHandler(cid: &str) -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::Ps(cid.to_string())); return Ok(msg) } pub fn WaitHandler(cid: &str) -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::WaitContainer(cid.to_string())); return Ok(msg) } pub fn WaitAll() -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::WaitAll); return Ok(msg) } pub fn WaitPidHandler(waitpid: &WaitPid) -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::WaitPid(waitpid.clone())); return Ok(msg) } pub fn SignalHandler(signalArgs: &SignalArgs) -> Result<ControlMsg> { if signalArgs.Signo == Signal::SIGKILL && signalArgs.Mode == SignalDeliveryMode::DeliverToAllProcesses { unsafe { // ucallServer::HandleSignal SIGKILL all processes libc::kill(0, 9); panic!("SignalHandler kill whole process") } } let msg = ControlMsg::New(Payload::Signal(signalArgs.clone())); return Ok(msg) } pub fn ContainerDestroyHandler(cid: &String) -> Result<ControlMsg> { let msg = ControlMsg::New(Payload::ContainerDestroy(cid.clone())); return Ok(msg) } pub fn CreateSubContainerHandler(args: &mut CreateArgs, fds: &[i32]) -> Result<ControlMsg> { //set fds back to args, if fds.len() == 1 { args.fds[0] = fds[0] } let msg = ControlMsg::New(Payload::CreateSubContainer(args.clone())); return Ok(msg) } pub fn StartSubContainerHandler(args: &mut StartArgs, fds: &[i32]) -> Result<ControlMsg> { if fds.len() == 3 { args.process.Stdiofds[0] = fds[0]; args.process.Stdiofds[1] = fds[1]; args.process.Stdiofds[2] = fds[2]; } for i in 0..args.process.Stdiofds.len() { let osfd = args.process.Stdiofds[i]; VMSpace::UnblockFd(osfd); let hostfd = IO_MGR.AddFile(osfd); args.process.Stdiofds[i] = hostfd; } let msg = ControlMsg::New(Payload::StartSubContainer(args.clone())); return Ok(msg); } pub fn ProcessReqHandler(req: &mut UCallReq, fds: &[i32]) -> Result<ControlMsg> { let msg = match req { UCallReq::RootContainerStart(start) => RootContainerStartHandler(start)?, UCallReq::ExecProcess(ref mut execArgs) => ExecProcessHandler(execArgs, fds)?, UCallReq::Pause => PauseHandler()?, UCallReq::Unpause => UnpauseHandler()?, UCallReq::Ps(cid) => PsHandler(cid)?, UCallReq::WaitContainer(cid) => WaitHandler(cid)?, UCallReq::WaitPid(waitpid) => WaitPidHandler(waitpid)?, UCallReq::Signal(signalArgs) => SignalHandler(signalArgs)?, UCallReq::ContainerDestroy(cid) => ContainerDestroyHandler(cid)?, UCallReq::CreateSubContainer(args) => CreateSubContainerHandler(args, fds)?, UCallReq::StartSubContainer(args) => StartSubContainerHandler(args, fds)?, UCallReq::WaitAll => WaitAll()?, }; return Ok(msg) }
32.468927
108
0.64973
89158f895c416cbd62672ea84fdfffb0e532a4aa
3,023
//! Wrappers for JSON serialization. mod alert; mod block_template; mod blockchain; mod bytes; mod cell; mod chain_info; mod debug; mod experiment; mod fee_rate; mod fixed_bytes; mod net; mod pool; mod primitive; mod proposal_short_id; mod subscription; mod uints; pub use self::alert::{Alert, AlertId, AlertMessage, AlertPriority}; pub use self::block_template::{ BlockTemplate, CellbaseTemplate, TransactionTemplate, UncleTemplate, }; pub use self::blockchain::{ Block, BlockEconomicState, BlockIssuance, BlockView, CellDep, CellInput, CellOutput, Consensus, DepType, EpochView, Header, HeaderView, MerkleProof, MinerReward, OutPoint, ProposalWindow, Script, ScriptHashType, Status, Transaction, TransactionProof, TransactionView, TransactionWithStatus, TxStatus, UncleBlock, UncleBlockView, }; pub use self::bytes::JsonBytes; pub use self::cell::{CellData, CellInfo, CellWithStatus}; pub use self::chain_info::ChainInfo; pub use self::debug::{ExtraLoggerConfig, MainLoggerConfig}; pub use self::experiment::DryRunResult; pub use self::fee_rate::FeeRateDef; pub use self::fixed_bytes::Byte32; pub use self::net::{ BannedAddr, LocalNode, LocalNodeProtocol, NodeAddress, PeerSyncState, RemoteNode, RemoteNodeProtocol, SyncState, }; pub use self::pool::{ OutputsValidator, PoolTransactionEntry, PoolTransactionReject, RawTxPool, TxPoolIds, TxPoolInfo, TxPoolVerbosity, TxVerbosity, }; pub use self::proposal_short_id::ProposalShortId; pub use self::subscription::Topic; pub use self::uints::{Uint128, Uint32, Uint64}; pub use primitive::{ AsEpochNumberWithFraction, BlockNumber, Capacity, Cycle, EpochNumber, EpochNumberWithFraction, Timestamp, Version, }; pub use serde::{Deserialize, Serialize}; /// This is a wrapper for JSON serialization to select the format between Json and Hex. /// /// ## Examples /// /// `ResponseFormat<BlockView, Block>` returns the block in its Json format or molecule serialized /// Hex format. pub enum ResponseFormat<V, P> { /// Serializes `V` as Json Json(V), /// Serializes `P` as Hex. /// /// `P` is first serialized by molecule into binary. /// /// The binary is then encoded as a 0x-prefixed hex string. Hex(P), } impl<V, P> Serialize for ResponseFormat<V, P> where V: Serialize, P: ckb_types::prelude::Entity, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer, { match self { ResponseFormat::Json(view) => view.serialize(serializer), ResponseFormat::Hex(packed) => { let slice = packed.as_slice(); let mut dst = vec![0u8; slice.len() * 2 + 2]; dst[0] = b'0'; dst[1] = b'x'; faster_hex::hex_encode(slice, &mut dst[2..]) .map_err(|e| serde::ser::Error::custom(&format!("{}", e)))?; serializer.serialize_str(unsafe { ::std::str::from_utf8_unchecked(&dst) }) } } } }
32.505376
99
0.681773
2fce24c0670cf882e51cefe05e0ebd02d4945354
727
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. struct c1<T> { x: T, } impl<T: Copy> c1<T> { fn f1(x: int) { } } fn c1<T: Copy>(x: T) -> c1<T> { c1 { x: x } } impl<T: Copy> c1<T> { fn f2(x: int) { } } pub fn main() { c1::<int>(3).f1(4); c1::<int>(3).f2(4); }
20.194444
68
0.610729
4ab36a324976a92f73deaafb17427bbad30e2bf1
3,421
use xi_term::widgets::view::Chunk; use tui::style::Color; use tui::buffer::Buffer; use tui::backend::TestBackend; use tui::terminal::Terminal; #[test] fn simple_plain() { let backend = TestBackend::new(9, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some line"); f.render_widget(chunk, f.size()); }).unwrap(); let expected = Buffer::with_lines(vec![ "Some line" ]); terminal.backend().assert_buffer(&expected); } #[test] fn simple_plain_has_foreground() { let backend = TestBackend::new(9, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some line").foreground(Some(Color::Cyan)); f.render_widget(chunk, f.size()); }).unwrap(); let mut expected = Buffer::with_lines(vec![ "Some line" ]); for line in 0..9 { expected.get_mut(line, 0).set_fg(Color::Cyan); } terminal.backend().assert_buffer(&expected); } #[test] fn simple_plain_has_background() { let backend = TestBackend::new(9, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some line").background(Some(Color::Cyan)); f.render_widget(chunk, f.size()); }).unwrap(); let mut expected = Buffer::with_lines(vec![ "Some line" ]); for line in 0..9 { expected.get_mut(line, 0).set_bg(Color::Cyan); } terminal.backend().assert_buffer(&expected); } #[test] fn simple_contains_tab() { let backend = TestBackend::new(12, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some\tline"); f.render_widget(chunk, f.size()); }).unwrap(); let expected = Buffer::with_lines(vec![ "Some line" ]); terminal.backend().assert_buffer(&expected); } #[test] fn simple_contains_tab_has_foreground() { let backend = TestBackend::new(12, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some\tline").foreground(Some(Color::Cyan)); f.render_widget(chunk, f.size()); }).unwrap(); let mut expected = Buffer::with_lines(vec![ "Some line" ]); for line in 0..12 { expected.get_mut(line, 0).set_fg(Color::Cyan); } terminal.backend().assert_buffer(&expected); } #[test] fn simple_contains_tab_has_background() { let backend = TestBackend::new(12, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some\tline").background(Some(Color::Cyan)); f.render_widget(chunk, f.size()); }).unwrap(); let mut expected = Buffer::with_lines(vec![ "Some line" ]); for line in 0..12 { expected.get_mut(line, 0).set_bg(Color::Cyan); } terminal.backend().assert_buffer(&expected); } #[test] fn simple_plain_contains_newline() { let backend = TestBackend::new(10, 1); let mut terminal = Terminal::new(backend).unwrap(); terminal.draw(|mut f| { let chunk = Chunk::new("Some\nline"); f.render_widget(chunk, f.size()); }).unwrap(); let expected = Buffer::with_lines(vec![ "Some^Jline" ]); terminal.backend().assert_buffer(&expected); }
26.315385
75
0.609471
fe32a51e81c9ac323cab3852cefd967634658e79
6,512
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Runtime services //! //! The `rt` module provides a narrow set of runtime services, //! including the global heap (exported in `heap`) and unwinding and //! backtrace support. The APIs in this module are highly unstable, //! and should be considered as private implementation details for the //! time being. #![unstable(feature = "std_misc")] // FIXME: this should not be here. #![allow(missing_docs)] #![allow(dead_code)] use marker::Send; use ops::FnOnce; use sys; use thunk::Thunk; use usize; // Reexport some of our utilities which are expected by other crates. pub use self::util::{default_sched_threads, min_stack, running_on_valgrind}; pub use self::unwind::{begin_unwind, begin_unwind_fmt}; // Reexport some functionality from liballoc. pub use alloc::heap; // Simple backtrace functionality (to print on panic) pub mod backtrace; // Internals #[macro_use] mod macros; // These should be refactored/moved/made private over time pub mod util; pub mod unwind; pub mod args; mod at_exit_imp; mod libunwind; /// The default error code of the rust runtime if the main thread panics instead /// of exiting cleanly. pub const DEFAULT_ERROR_CODE: int = 101; #[cfg(any(windows, android))] const OS_DEFAULT_STACK_ESTIMATE: uint = 1 << 20; #[cfg(all(unix, not(android)))] const OS_DEFAULT_STACK_ESTIMATE: uint = 2 * (1 << 20); #[cfg(not(test))] #[lang = "start"] fn lang_start(main: *const u8, argc: int, argv: *const *const u8) -> int { use prelude::v1::*; use mem; use env; use rt; use sys_common::thread_info::{self, NewThread}; use sys_common; use thread::Thread; let something_around_the_top_of_the_stack = 1; let addr = &something_around_the_top_of_the_stack as *const int; let my_stack_top = addr as uint; // FIXME #11359 we just assume that this thread has a stack of a // certain size, and estimate that there's at most 20KB of stack // frames above our current position. const TWENTY_KB: uint = 20000; // saturating-add to sidestep overflow let top_plus_spill = if usize::MAX - TWENTY_KB < my_stack_top { usize::MAX } else { my_stack_top + TWENTY_KB }; // saturating-sub to sidestep underflow let my_stack_bottom = if top_plus_spill < OS_DEFAULT_STACK_ESTIMATE { 0 } else { top_plus_spill - OS_DEFAULT_STACK_ESTIMATE }; let failed = unsafe { // First, make sure we don't trigger any __morestack overflow checks, // and next set up our stack to have a guard page and run through our // own fault handlers if we hit it. sys_common::stack::record_os_managed_stack_bounds(my_stack_bottom, my_stack_top); sys::thread::guard::init(); sys::stack_overflow::init(); // Next, set up the current Thread with the guard information we just // created. Note that this isn't necessary in general for new threads, // but we just do this to name the main thread and to give it correct // info about the stack bounds. let thread: Thread = NewThread::new(Some("<main>".to_string())); thread_info::set((my_stack_bottom, my_stack_top), sys::thread::guard::main(), thread); // By default, some platforms will send a *signal* when a EPIPE error // would otherwise be delivered. This runtime doesn't install a SIGPIPE // handler, causing it to kill the program, which isn't exactly what we // want! // // Hence, we set SIGPIPE to ignore when the program starts up in order // to prevent this problem. #[cfg(windows)] fn ignore_sigpipe() {} #[cfg(unix)] fn ignore_sigpipe() { use libc; use libc::funcs::posix01::signal::signal; unsafe { assert!(signal(libc::SIGPIPE, libc::SIG_IGN) != -1); } } ignore_sigpipe(); // Store our args if necessary in a squirreled away location args::init(argc, argv); // And finally, let's run some code! let res = unwind::try(|| { let main: fn() = mem::transmute(main); main(); }); cleanup(); res.is_err() }; // If the exit code wasn't set, then the try block must have panicked. if failed { rt::DEFAULT_ERROR_CODE } else { env::get_exit_status() as isize } } /// Enqueues a procedure to run when the runtime is cleaned up /// /// The procedure passed to this function will be executed as part of the /// runtime cleanup phase. For normal rust programs, this means that it will run /// after all other threads have exited. /// /// The procedure is *not* executed with a local `Thread` available to it, so /// primitives like logging, I/O, channels, spawning, etc, are *not* available. /// This is meant for "bare bones" usage to clean up runtime details, this is /// not meant as a general-purpose "let's clean everything up" function. /// /// It is forbidden for procedures to register more `at_exit` handlers when they /// are running, and doing so will lead to a process abort. pub fn at_exit<F:FnOnce()+Send+'static>(f: F) { at_exit_imp::push(Thunk::new(f)); } /// One-time runtime cleanup. /// /// This function is unsafe because it performs no checks to ensure that the /// runtime has completely ceased running. It is the responsibility of the /// caller to ensure that the runtime is entirely shut down and nothing will be /// poking around at the internal components. /// /// Invoking cleanup while portions of the runtime are still in use may cause /// undefined behavior. pub unsafe fn cleanup() { args::cleanup(); sys::stack_overflow::cleanup(); // FIXME: (#20012): the resources being cleaned up by at_exit // currently are not prepared for cleanup to happen asynchronously // with detached threads using the resources; for now, we leak. // at_exit_imp::cleanup(); }
35.010753
80
0.663544
21421b1413ac93a2129fcebd3918ca216daf2955
8,248
#![forbid(unsafe_code)] use std::collections::{HashMap, HashSet, VecDeque}; use std::time::{Instant}; //////////////////////////////////////////////////////////////////////////////// /// Represents a tile on a board. A tile can either be empty or a number from 1 to 8. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct Tile(u8); impl Tile { /// Creates a new tile. /// /// # Arguments /// /// * `maybe_value` - Some(1..=8) or None. /// /// # Panics /// /// Panics if value is 0 or > 8. pub fn new(maybe_value: Option<u8>) -> Self { let value = maybe_value.unwrap(); assert_eq!(value > 0 && value < 9, true); return Self(value); } /// Creates an empty tile. pub fn empty() -> Self { return Self(0); } /// Returns `Some(value)` if tile contains a value, otherwise returns `None`. pub fn number(&self) -> Option<u8> { if self.0 == 0 { return None; } return Some(self.0); } /// Returns true if tile does not contain a value. pub fn is_empty(&self) -> bool { // TODO: your code here. if self.0 == 0 { return true; } return false; } } //////////////////////////////////////////////////////////////////////////////// /// Represents a 3x3 board of tiles. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct Board { tiles: [[Tile; 3]; 3], } impl Board { /// Creates a new `Board` from a 3x3 matrix if `Tile`s. /// /// # Panics /// /// Panics if `tiles` contains more than one instance if some tile. pub fn new(tiles: [[Tile; 3]; 3]) -> Self { // TODO: your code here. let set = tiles.iter().flatten().cloned().collect::<HashSet<_>>(); assert_eq!(set.len(), 9); return Board { tiles }; } /// Returns a tile on a given `row` and `col`. /// /// # Panics /// /// Panics if `row` or `col` > 2. pub fn get(&self, row: usize, col: usize) -> Tile { // TODO: your code here. assert_eq!(row < 3 && col < 3, true); return self.tiles[row][col]; } /// Swaps two given tiles. /// /// # Panics /// /// Panics if some of `r1`, `r2`, `c1` or `c2` > 2. pub fn swap(&mut self, r1: usize, c1: usize, r2: usize, c2: usize) { assert_eq!(r1 < 3 && c1 < 3 || r2 < 3 && c2 < 3, true); let temp_tile: Tile = self.tiles[r1][c1]; self.tiles[r1][c1] = self.tiles[r2][c2]; self.tiles[r2][c2] = temp_tile; } /// Parses `Board` from string. /// /// # Arguments /// /// * `s` must be a string in the following format: /// /// ''' /// .12 /// 345 /// 678 /// ''' /// /// # Panics /// /// Panics of `s` is the wrong format or does not represent a valid `Board`. pub fn from_string(s: &str) -> Self { let mut tiles = [[Tile::empty(); 3]; 3]; for (i, line) in s.split('\n').take(3).enumerate() { for (j, chr) in line.chars().take(3).enumerate() { if (chr as u8) == 46 || (chr as u8) > 48 && (chr as u8) < 57 { if chr == '.' { tiles[i][j] = Tile::empty(); } else { tiles[i][j] = Tile::new(Some(chr as u8 - 48)) } } else { panic!("Strange char") } } } let board = Board::new(tiles); return board; } /// Returns a string representation of this board in the following format: /// /// ''' /// .12 /// 345 /// 678 /// ''' pub fn to_string(&self) -> String { let tiles: Vec<Tile> = self.tiles.iter().flatten().cloned().collect(); let mut string: String = String::from(""); for i in 0..tiles.len() { match tiles[i].0 { 1 => string.push('1'), 2 => string.push('2'), 3 => string.push('3'), 4 => string.push('4'), 5 => string.push('5'), 6 => string.push('6'), 7 => string.push('7'), 8 => string.push('8'), 0 => string.push('.'), _ => {} } if i % 3 == 2 && i != 8 { string.push('\n'); } } return string; } /// Returning true if board is solved otherwise false pub fn is_solved(&self) -> bool { let mut is_solved = true; let tiles: Vec<Tile> = self.tiles.iter().flatten().cloned().collect(); for i in 0..tiles.len() { if i+1 != tiles[i].0.into() && i != 8 { is_solved = false; } else if i == 8 && tiles[i].0 != 0 { is_solved = false; } } return is_solved; } } //////////////////////////////////////////////////////////////////////////////// /// Returns the shortest sequence of moves that solves this board. /// That is, a sequence of boards such that each consecutive board can be obtained from /// the previous one via a single swap of an empty tile with some adjacent tile, /// and the final board in the sequence is /// /// ''' /// 123 /// 456 /// 78. /// ''' /// /// If the board is unsolvable, returns `None`. If the board is already solved, /// returns an empty vector. pub fn solve(start: Board) -> Option<Vec<Board>> { let moves: Vec<Vec<usize>> = vec![ vec![1, 3], vec![0, 2, 4], vec![1, 5], vec![0, 4, 6], vec![1, 3, 5, 7], vec![2, 4, 8], vec![3, 7], vec![4, 6, 8], vec![5, 7], ]; let mut queue: VecDeque<Board> = VecDeque::new(); // let mut visited: Vec<Board> = Vec::new(); let mut hash_map: HashMap<Board, Board> = HashMap::new(); if start.is_solved() { println!("INPUT IS SOLVED"); return Some(Vec::new()); } else { println!("INPUT IS NOT SOLVED"); } queue.push_back(start); hash_map.insert(start, start); let start = Instant::now(); while let Some(board) = queue.pop_front() { let tiles = board.tiles.iter().flatten().cloned().collect::<Vec<_>>(); for (i, tile) in tiles.iter().enumerate() { if tile.0 == 0 { for mv in &moves[i] { let r1 = i / 3; let c1 = i % 3; let r2 = *mv / 3; let c2 = *mv % 3; let mut copy_board = board; copy_board.swap(r1, c1, r2, c2); // making new state if !hash_map.contains_key(&copy_board) { // if the state wasn't visited hash_map.insert(copy_board, board); // inserting to hash_map queue.push_back(copy_board); // pushing to queue new state if copy_board.is_solved() { // if we reached to result let mut ans: Vec<Board> = Vec::new(); // vec of path to result let mut current_state: Board = copy_board; ans.push(current_state); loop { match hash_map.get(&current_state) { Some(prev_state) => { if *prev_state != current_state { current_state = *prev_state; ans.insert(0, current_state); } else { break; } } None => break, } } ans.pop(); let duration = start.elapsed(); println!("DURATION{:?}", duration); return Some(ans); } } } } } } return None; }
31.723077
91
0.429074
f724aa0ff94a488ba87df18ec81978a0e63c2c7f
2,628
use std::fmt::{Display, Debug}; pub mod kucherov; pub mod valimaki; pub type Mode = Box<dyn IsMode>; /* "interface" for new filtering and partition schemes. 1. Create any struct that implements these functions 2. Add your new struct to the code in setup.rs so that the solver will use it when the arg is used */ pub trait IsMode: Sync + Display + Debug { /* filtering scheme. Return the number of permitted errors for a query search node with given properties "completed_blocks" : number of fully-matched blocks so far in THIS query search "patt_blocks" : number of blocks the pattern this search is for is divided into "blind_blocks" : number of blocks to the LEFT of this search i.e. not involved in the search */ fn filter_func(&self, completed_blocks : i32, patt_blocks : i32, blind_blocks : i32) -> i32; // partition scheme. For a pattern of given length and alg parameters, return a vector of block lengths. order will be respected fn get_block_lengths(&self, patt_len : i32, err_rate : f32, thresh : i32) -> Vec<i32>; // return true IFF a node with the properties represented by the args should generate candidates fn candidate_condition(&self,generous_overlap_len : i32, completed_blocks : i32, thresh : i32, errors : i32 ) -> bool; // The pattern will only create query searches for pattern-block-sequence suffixes of this length or more fn get_fewest_suff_blocks(&self) -> i32; // Used by testing.rs for the cargo testing fn get_guaranteed_extra_blocks(&self) -> i32; } /* Add your custom modes in this switch statement so that they will be used when the solver is run with the appropriate -m flag arg. */ pub fn get_mode(arg : &str) -> Mode { let tokens : Vec<&str> = arg.split('_').collect(); if tokens.len() == 0 { panic!("") } let mode_args = &tokens[1..]; match tokens[0] { "valimaki" => Box::new(valimaki::ValimakiMode::new()), "kucherov" => Box::new(kucherov::KucherovMode::new(mode_args)), /* NEW MODE OPTIONS GO IN THIS BLOCK CATCH the name you want it to be associated with, whatever you like. return a box contining your IsMode-implementing struct like this: your_mod_rs_file::YourStruct::new(mode_args) ("IsMode" trait is defined above) You can also leave out the mode_args if your new() is defined as requiring no parameter. */ // YOUR MODES GO HERE ^^^^ _ => panic!("No mode with the given name found!"), } } pub fn default_mode() -> Mode { Box::new(kucherov::KucherovMode::new(&vec!["2"])) }
37.542857
132
0.681126
f780343f4fa63758e64cda7c2f1dedba50ef4249
10,547
use crate::builtins::{ asyncgenerator, builtinfunc, bytearray, bytes, classmethod, code, complex, coroutine, dict, enumerate, filter, float, frame, function, generator, genericalias, getset, int, iter, list, map, mappingproxy, memory, module, namespace, object, property, pybool, pystr, pysuper, pytype::{self, PyTypeRef}, range, set, singletons, slice, staticmethod, traceback, tuple, weakproxy, weakref, zip, }; use crate::{PyContext, StaticType}; /// Holder of references to builtin types. #[derive(Debug, Clone)] pub struct TypeZoo { pub async_generator: PyTypeRef, pub async_generator_asend: PyTypeRef, pub async_generator_athrow: PyTypeRef, pub async_generator_wrapped_value: PyTypeRef, pub bytes_type: PyTypeRef, pub bytes_iterator_type: PyTypeRef, pub bytearray_type: PyTypeRef, pub bytearray_iterator_type: PyTypeRef, pub bool_type: PyTypeRef, pub callable_iterator: PyTypeRef, pub cell_type: PyTypeRef, pub classmethod_type: PyTypeRef, pub code_type: PyTypeRef, pub coroutine_type: PyTypeRef, pub coroutine_wrapper_type: PyTypeRef, pub dict_type: PyTypeRef, pub enumerate_type: PyTypeRef, pub filter_type: PyTypeRef, pub float_type: PyTypeRef, pub frame_type: PyTypeRef, pub frozenset_type: PyTypeRef, pub generator_type: PyTypeRef, pub int_type: PyTypeRef, pub iter_type: PyTypeRef, pub reverse_iter_type: PyTypeRef, pub complex_type: PyTypeRef, pub list_type: PyTypeRef, pub list_iterator_type: PyTypeRef, pub list_reverseiterator_type: PyTypeRef, pub str_iterator_type: PyTypeRef, pub dict_keyiterator_type: PyTypeRef, pub dict_reversekeyiterator_type: PyTypeRef, pub dict_valueiterator_type: PyTypeRef, pub dict_reversevalueiterator_type: PyTypeRef, pub dict_itemiterator_type: PyTypeRef, pub dict_reverseitemiterator_type: PyTypeRef, pub dict_keys_type: PyTypeRef, pub dict_values_type: PyTypeRef, pub dict_items_type: PyTypeRef, pub map_type: PyTypeRef, pub memoryview_type: PyTypeRef, pub tuple_type: PyTypeRef, pub tuple_iterator_type: PyTypeRef, pub set_type: PyTypeRef, pub set_iterator_type: PyTypeRef, pub staticmethod_type: PyTypeRef, pub super_type: PyTypeRef, pub str_type: PyTypeRef, pub range_type: PyTypeRef, pub range_iterator_type: PyTypeRef, pub longrange_iterator_type: PyTypeRef, pub slice_type: PyTypeRef, pub type_type: PyTypeRef, pub zip_type: PyTypeRef, pub function_type: PyTypeRef, pub builtin_function_or_method_type: PyTypeRef, pub method_descriptor_type: PyTypeRef, pub property_type: PyTypeRef, pub getset_type: PyTypeRef, pub module_type: PyTypeRef, pub namespace_type: PyTypeRef, pub bound_method_type: PyTypeRef, pub weakref_type: PyTypeRef, pub weakproxy_type: PyTypeRef, pub mappingproxy_type: PyTypeRef, pub traceback_type: PyTypeRef, pub object_type: PyTypeRef, pub ellipsis_type: PyTypeRef, pub none_type: PyTypeRef, pub not_implemented_type: PyTypeRef, pub generic_alias_type: PyTypeRef, } impl TypeZoo { pub(crate) fn init() -> Self { let (type_type, object_type) = crate::pyobjectrc::init_type_hierarchy(); Self { // the order matters for type, object and int type_type: pytype::PyType::init_manually(type_type).clone(), object_type: object::PyBaseObject::init_manually(object_type).clone(), int_type: int::PyInt::init_bare_type().clone(), // types exposed as builtins bool_type: pybool::PyBool::init_bare_type().clone(), bytearray_type: bytearray::PyByteArray::init_bare_type().clone(), bytes_type: bytes::PyBytes::init_bare_type().clone(), classmethod_type: classmethod::PyClassMethod::init_bare_type().clone(), complex_type: complex::PyComplex::init_bare_type().clone(), dict_type: dict::PyDict::init_bare_type().clone(), enumerate_type: enumerate::PyEnumerate::init_bare_type().clone(), float_type: float::PyFloat::init_bare_type().clone(), frozenset_type: set::PyFrozenSet::init_bare_type().clone(), filter_type: filter::PyFilter::init_bare_type().clone(), list_type: list::PyList::init_bare_type().clone(), map_type: map::PyMap::init_bare_type().clone(), memoryview_type: memory::PyMemoryView::init_bare_type().clone(), property_type: property::PyProperty::init_bare_type().clone(), range_type: range::PyRange::init_bare_type().clone(), set_type: set::PySet::init_bare_type().clone(), slice_type: slice::PySlice::init_bare_type().clone(), staticmethod_type: staticmethod::PyStaticMethod::init_bare_type().clone(), str_type: pystr::PyStr::init_bare_type().clone(), super_type: pysuper::PySuper::init_bare_type().clone(), tuple_type: tuple::PyTuple::init_bare_type().clone(), zip_type: zip::PyZip::init_bare_type().clone(), // hidden internal types. is this really need to be cached here? async_generator: asyncgenerator::PyAsyncGen::init_bare_type().clone(), async_generator_asend: asyncgenerator::PyAsyncGenASend::init_bare_type().clone(), async_generator_athrow: asyncgenerator::PyAsyncGenAThrow::init_bare_type().clone(), async_generator_wrapped_value: asyncgenerator::PyAsyncGenWrappedValue::init_bare_type() .clone(), bound_method_type: function::PyBoundMethod::init_bare_type().clone(), builtin_function_or_method_type: builtinfunc::PyBuiltinFunction::init_bare_type() .clone(), bytearray_iterator_type: bytearray::PyByteArrayIterator::init_bare_type().clone(), bytes_iterator_type: bytes::PyBytesIterator::init_bare_type().clone(), callable_iterator: iter::PyCallableIterator::init_bare_type().clone(), cell_type: function::PyCell::init_bare_type().clone(), code_type: code::PyCode::init_bare_type().clone(), coroutine_type: coroutine::PyCoroutine::init_bare_type().clone(), coroutine_wrapper_type: coroutine::PyCoroutineWrapper::init_bare_type().clone(), dict_keys_type: dict::PyDictKeys::init_bare_type().clone(), dict_values_type: dict::PyDictValues::init_bare_type().clone(), dict_items_type: dict::PyDictItems::init_bare_type().clone(), dict_keyiterator_type: dict::PyDictKeyIterator::init_bare_type().clone(), dict_reversekeyiterator_type: dict::PyDictReverseKeyIterator::init_bare_type().clone(), dict_valueiterator_type: dict::PyDictValueIterator::init_bare_type().clone(), dict_reversevalueiterator_type: dict::PyDictReverseValueIterator::init_bare_type() .clone(), dict_itemiterator_type: dict::PyDictItemIterator::init_bare_type().clone(), dict_reverseitemiterator_type: dict::PyDictReverseItemIterator::init_bare_type() .clone(), ellipsis_type: slice::PyEllipsis::init_bare_type().clone(), frame_type: crate::frame::Frame::init_bare_type().clone(), function_type: function::PyFunction::init_bare_type().clone(), generator_type: generator::PyGenerator::init_bare_type().clone(), getset_type: getset::PyGetSet::init_bare_type().clone(), iter_type: iter::PySequenceIterator::init_bare_type().clone(), reverse_iter_type: enumerate::PyReverseSequenceIterator::init_bare_type().clone(), list_iterator_type: list::PyListIterator::init_bare_type().clone(), list_reverseiterator_type: list::PyListReverseIterator::init_bare_type().clone(), mappingproxy_type: mappingproxy::PyMappingProxy::init_bare_type().clone(), module_type: module::PyModule::init_bare_type().clone(), namespace_type: namespace::PyNamespace::init_bare_type().clone(), range_iterator_type: range::PyRangeIterator::init_bare_type().clone(), longrange_iterator_type: range::PyLongRangeIterator::init_bare_type().clone(), set_iterator_type: set::PySetIterator::init_bare_type().clone(), str_iterator_type: pystr::PyStrIterator::init_bare_type().clone(), traceback_type: traceback::PyTraceback::init_bare_type().clone(), tuple_iterator_type: tuple::PyTupleIterator::init_bare_type().clone(), weakproxy_type: weakproxy::PyWeakProxy::init_bare_type().clone(), weakref_type: weakref::PyWeak::init_bare_type().clone(), method_descriptor_type: builtinfunc::PyBuiltinMethod::init_bare_type().clone(), none_type: singletons::PyNone::init_bare_type().clone(), not_implemented_type: singletons::PyNotImplemented::init_bare_type().clone(), generic_alias_type: genericalias::PyGenericAlias::init_bare_type().clone(), } } /// Fill attributes of builtin types. pub(crate) fn extend(context: &PyContext) { pytype::init(context); object::init(context); list::init(context); set::init(context); tuple::init(context); dict::init(context); builtinfunc::init(context); function::init(context); staticmethod::init(context); classmethod::init(context); generator::init(context); coroutine::init(context); asyncgenerator::init(context); int::init(context); float::init(context); complex::init(context); bytes::init(context); bytearray::init(context); property::init(context); getset::init(context); memory::init(context); pystr::init(context); range::init(context); slice::init(context); pysuper::init(context); iter::init(context); enumerate::init(context); filter::init(context); map::init(context); zip::init(context); pybool::init(context); code::init(context); frame::init(context); weakref::init(context); weakproxy::init(context); singletons::init(context); module::init(context); namespace::init(context); mappingproxy::init(context); traceback::init(context); genericalias::init(context); } }
48.380734
99
0.677918
766b329782964e80c827c35cca755eb45bea58e7
13,906
//! AES-256-GCM-SIV tests #[macro_use] mod common; use self::common::TestVector; use aes_gcm_siv::aead::{generic_array::GenericArray, Aead, NewAead, Payload}; use aes_gcm_siv::Aes256GcmSiv; /// Test vectors from RFC8452 Appendix C.2. AEAD_AES_256_GCM_SIV /// <https://tools.ietf.org/html/rfc8452#appendix-C.2> const TEST_VECTORS: &[TestVector<[u8; 32]>] = &[ TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"", ciphertext: b"\x07\xf5\xf4\x16\x9b\xbf\x55\xa8\x40\x0c\xd4\x7e\xa6\xfd\x40\x0f" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"\x01\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\xc2\xef\x32\x8e\x5c\x71\xc8\x3b\x84\x31\x22\x13\x0f\x73\x64\xb7\x61\xe0\xb9\x74\x27\xe3\xdf\x28" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x9a\xab\x2a\xeb\x3f\xaa\x0a\x34\xae\xa8\xe2\xb1\x8c\xa5\x0d\xa9\xae\x65\x59\xe4\x8f\xd1\x0f\x6e\x5c\x9c\xa1\x7e" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x85\xa0\x1b\x63\x02\x5b\xa1\x9b\x7f\xd3\xdd\xfc\x03\x3b\x3e\x76\xc9\xea\xc6\xfa\x70\x09\x42\x70\x2e\x90\x86\x23\x83\xc6\xc3\x66" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x4a\x6a\x9d\xb4\xc8\xc6\x54\x92\x01\xb9\xed\xb5\x30\x06\xcb\xa8\x21\xec\x9c\xf8\x50\x94\x8a\x7c\x86\xc6\x8a\xc7\x53\x9d\x02\x7f\xe8\x19\xe6\x3a\xbc\xd0\x20\xb0\x06\xa9\x76\x39\x76\x32\xeb\x5d" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\xc0\x0d\x12\x18\x93\xa9\xfa\x60\x3f\x48\xcc\xc1\xca\x3c\x57\xce\x74\x99\x24\x5e\xa0\x04\x6d\xb1\x6c\x53\xc7\xc6\x6f\xe7\x17\xe3\x9c\xf6\xc7\x48\x83\x7b\x61\xf6\xee\x3a\xdc\xee\x17\x53\x4e\xd5\x79\x0b\xc9\x68\x80\xa9\x9b\xa8\x04\xbd\x12\xc0\xe6\xa2\x2c\xc4" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"", plaintext: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\xc2\xd5\x16\x0a\x1f\x86\x83\x83\x49\x10\xac\xda\xfc\x41\xfb\xb1\x63\x2d\x4a\x35\x3e\x8b\x90\x5e\xc9\xa5\x49\x9a\xc3\x4f\x96\xc7\xe1\x04\x9e\xb0\x80\x88\x38\x91\xa4\xdb\x8c\xaa\xa1\xf9\x9d\xd0\x04\xd8\x04\x87\x54\x07\x35\x23\x4e\x37\x44\x51\x2c\x6f\x90\xce\x11\x28\x64\xc2\x69\xfc\x0d\x9d\x88\xc6\x1f\xa4\x7e\x39\xaa\x08" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01", plaintext: b"\x02\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x1d\xe2\x29\x67\x23\x7a\x81\x32\x91\x21\x3f\x26\x7e\x3b\x45\x2f\x02\xd0\x1a\xe3\x3e\x4e\xc8\x54" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01", plaintext: b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x16\x3d\x6f\x9c\xc1\xb3\x46\xcd\x45\x3a\x2e\x4c\xc1\xa4\xa1\x9a\xe8\x00\x94\x1c\xcd\xc5\x7c\xc8\x41\x3c\x27\x7f" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01", plaintext: b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\xc9\x15\x45\x82\x3c\xc2\x4f\x17\xdb\xb0\xe9\xe8\x07\xd5\xec\x17\xb2\x92\xd2\x8f\xf6\x11\x89\xe8\xe4\x9f\x38\x75\xef\x91\xaf\xf7" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01", plaintext: b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x07\xda\xd3\x64\xbf\xc2\xb9\xda\x89\x11\x6d\x7b\xef\x6d\xaa\xaf\x6f\x25\x55\x10\xaa\x65\x4f\x92\x0a\xc8\x1b\x94\xe8\xba\xd3\x65\xae\xa1\xba\xd1\x27\x02\xe1\x96\x56\x04\x37\x4a\xab\x96\xdb\xbc" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01", plaintext: b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\xc6\x7a\x1f\x0f\x56\x7a\x51\x98\xaa\x1f\xcc\x8e\x3f\x21\x31\x43\x36\xf7\xf5\x1c\xa8\xb1\xaf\x61\xfe\xac\x35\xa8\x64\x16\xfa\x47\xfb\xca\x3b\x5f\x74\x9c\xdf\x56\x45\x27\xf2\x31\x4f\x42\xfe\x25\x03\x33\x27\x42\xb2\x28\xc6\x47\x17\x36\x16\xcf\xd4\x4c\x54\xeb" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01", plaintext: b"\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", ciphertext: b"\x67\xfd\x45\xe1\x26\xbf\xb9\xa7\x99\x30\xc4\x3a\xad\x2d\x36\x96\x7d\x3f\x0e\x4d\x21\x7c\x1e\x55\x1f\x59\x72\x78\x70\xbe\xef\xc9\x8c\xb9\x33\xa8\xfc\xe9\xde\x88\x7b\x1e\x40\x79\x99\x88\xdb\x1f\xc3\xf9\x18\x80\xed\x40\x5b\x2d\xd2\x98\x31\x88\x58\x46\x7c\x89\x5b\xde\x02\x85\x03\x7c\x5d\xe8\x1e\x5b\x57\x0a\x04\x9b\x62\xa0" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", plaintext: b"\x02\x00\x00\x00", ciphertext: b"\x22\xb3\xf4\xcd\x18\x35\xe5\x17\x74\x1d\xfd\xdc\xcf\xa0\x7f\xa4\x66\x1b\x74\xcf" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00", plaintext: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00", ciphertext: b"\x43\xdd\x01\x63\xcd\xb4\x8f\x9f\xe3\x21\x2b\xf6\x1b\x20\x19\x76\x06\x7f\x34\x2b\xb8\x79\xad\x97\x6d\x82\x42\xac\xc1\x88\xab\x59\xca\xbf\xe3\x07" }, TestVector { key: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", nonce: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", aad: b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00", plaintext: b"\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00", ciphertext: b"\x46\x24\x01\x72\x4b\x5c\xe6\x58\x8d\x5a\x54\xaa\xe5\x37\x55\x13\xa0\x75\xcf\xcd\xf5\x04\x21\x12\xaa\x29\x68\x5c\x91\x2f\xc2\x05\x65\x43" }, TestVector { key: b"\xe6\x60\x21\xd5\xeb\x8e\x4f\x40\x66\xd4\xad\xb9\xc3\x35\x60\xe4\xf4\x6e\x44\xbb\x3d\xa0\x01\x5c\x94\xf7\x08\x87\x36\x86\x42\x00", nonce: b"\xe0\xea\xf5\x28\x4d\x88\x4a\x0e\x77\xd3\x16\x46", aad: b"", plaintext: b"", ciphertext: b"\x16\x9f\xbb\x2f\xbf\x38\x9a\x99\x5f\x63\x90\xaf\x22\x22\x8a\x62" }, TestVector { key: b"\xba\xe8\xe3\x7f\xc8\x34\x41\xb1\x60\x34\x56\x6b\x7a\x80\x6c\x46\xbb\x91\xc3\xc5\xae\xdb\x64\xa6\xc5\x90\xbc\x84\xd1\xa5\xe2\x69", nonce: b"\xe4\xb4\x78\x01\xaf\xc0\x57\x7e\x34\x69\x9b\x9e", aad: b"\x4f\xbd\xc6\x6f\x14", plaintext: b"\x67\x1f\xdd", ciphertext: b"\x0e\xac\xcb\x93\xda\x9b\xb8\x13\x33\xae\xe0\xc7\x85\xb2\x40\xd3\x19\x71\x9d" }, TestVector { key: b"\x65\x45\xfc\x88\x0c\x94\xa9\x51\x98\x87\x42\x96\xd5\xcc\x1f\xd1\x61\x32\x0b\x69\x20\xce\x07\x78\x7f\x86\x74\x3b\x27\x5d\x1a\xb3", nonce: b"\x2f\x6d\x1f\x04\x34\xd8\x84\x8c\x11\x77\x44\x1f", aad: b"\x67\x87\xf3\xea\x22\xc1\x27\xaa\xf1\x95", plaintext: b"\x19\x54\x95\x86\x0f\x04", ciphertext: b"\xa2\x54\xda\xd4\xf3\xf9\x6b\x62\xb8\x4d\xc4\x0c\x84\x63\x6a\x5e\xc1\x20\x20\xec\x8c\x2c" }, TestVector { key: b"\xd1\x89\x47\x28\xb3\xfe\xd1\x47\x3c\x52\x8b\x84\x26\xa5\x82\x99\x59\x29\xa1\x49\x9e\x9a\xd8\x78\x0c\x8d\x63\xd0\xab\x41\x49\xc0", nonce: b"\x9f\x57\x2c\x61\x4b\x47\x45\x91\x44\x74\xe7\xc7", aad: b"\x48\x9c\x8f\xde\x2b\xe2\xcf\x97\xe7\x4e\x93\x2d\x4e\xd8\x7d", plaintext: b"\xc9\x88\x2e\x53\x86\xfd\x9f\x92\xec", ciphertext: b"\x0d\xf9\xe3\x08\x67\x82\x44\xc4\x4b\xc0\xfd\x3d\xc6\x62\x8d\xfe\x55\xeb\xb0\xb9\xfb\x22\x95\xc8\xc2" }, TestVector { key: b"\xa4\x41\x02\x95\x2e\xf9\x4b\x02\xb8\x05\x24\x9b\xac\x80\xe6\xf6\x14\x55\xbf\xac\x83\x08\xa2\xd4\x0d\x8c\x84\x51\x17\x80\x82\x35", nonce: b"\x5c\x9e\x94\x0f\xea\x2f\x58\x29\x50\xa7\x0d\x5a", aad: b"\x0d\xa5\x52\x10\xcc\x1c\x1b\x0a\xbd\xe3\xb2\xf2\x04\xd1\xe9\xf8\xb0\x6b\xc4\x7f", plaintext: b"\x1d\xb2\x31\x6f\xd5\x68\x37\x8d\xa1\x07\xb5\x2b", ciphertext: b"\x8d\xbe\xb9\xf7\x25\x5b\xf5\x76\x9d\xd5\x66\x92\x40\x40\x99\xc2\x58\x7f\x64\x97\x9f\x21\x82\x67\x06\xd4\x97\xd5" }, TestVector { key: b"\x97\x45\xb3\xd1\xae\x06\x55\x6f\xb6\xaa\x78\x90\xbe\xbc\x18\xfe\x6b\x3d\xb4\xda\x3d\x57\xaa\x94\x84\x2b\x98\x03\xa9\x6e\x07\xfb", nonce: b"\x6d\xe7\x18\x60\xf7\x62\xeb\xfb\xd0\x82\x84\xe4", aad: b"\xf3\x7d\xe2\x1c\x7f\xf9\x01\xcf\xe8\xa6\x96\x15\xa9\x3f\xdf\x7a\x98\xca\xd4\x81\x79\x62\x45\x70\x9f", plaintext: b"\x21\x70\x2d\xe0\xde\x18\xba\xa9\xc9\x59\x62\x91\xb0\x84\x66", ciphertext: b"\x79\x35\x76\xdf\xa5\xc0\xf8\x87\x29\xa7\xed\x3c\x2f\x1b\xff\xb3\x08\x0d\x28\xf6\xeb\xb5\xd3\x64\x8c\xe9\x7b\xd5\xba\x67\xfd" }, TestVector { key: b"\xb1\x88\x53\xf6\x8d\x83\x36\x40\xe4\x2a\x3c\x02\xc2\x5b\x64\x86\x9e\x14\x6d\x7b\x23\x39\x87\xbd\xdf\xc2\x40\x87\x1d\x75\x76\xf7", nonce: b"\x02\x8e\xc6\xeb\x5e\xa7\xe2\x98\x34\x2a\x94\xd4", aad: b"\x9c\x21\x59\x05\x8b\x1f\x0f\xe9\x14\x33\xa5\xbd\xc2\x0e\x21\x4e\xab\x7f\xec\xef\x44\x54\xa1\x0e\xf0\x65\x7d\xf2\x1a\xc7", plaintext: b"\xb2\x02\xb3\x70\xef\x97\x68\xec\x65\x61\xc4\xfe\x6b\x7e\x72\x96\xfa\x85", ciphertext: b"\x85\x7e\x16\xa6\x49\x15\xa7\x87\x63\x76\x87\xdb\x4a\x95\x19\x63\x5c\xdd\x45\x4f\xc2\xa1\x54\xfe\xa9\x1f\x83\x63\xa3\x9f\xec\x7d\x0a\x49" }, TestVector { key: b"\x3c\x53\x5d\xe1\x92\xea\xed\x38\x22\xa2\xfb\xbe\x2c\xa9\xdf\xc8\x82\x55\xe1\x4a\x66\x1b\x8a\xa8\x2c\xc5\x42\x36\x09\x3b\xbc\x23", nonce: b"\x68\x80\x89\xe5\x55\x40\xdb\x18\x72\x50\x4e\x1c", aad: b"\x73\x43\x20\xcc\xc9\xd9\xbb\xbb\x19\xcb\x81\xb2\xaf\x4e\xcb\xc3\xe7\x28\x34\x32\x1f\x7a\xa0\xf7\x0b\x72\x82\xb4\xf3\x3d\xf2\x3f\x16\x75\x41", plaintext: b"\xce\xd5\x32\xce\x41\x59\xb0\x35\x27\x7d\x4d\xfb\xb7\xdb\x62\x96\x8b\x13\xcd\x4e\xec", ciphertext: b"\x62\x66\x60\xc2\x6e\xa6\x61\x2f\xb1\x7a\xd9\x1e\x8e\x76\x76\x39\xed\xd6\xc9\xfa\xee\x9d\x6c\x70\x29\x67\x5b\x89\xea\xf4\xba\x1d\xed\x1a\x28\x65\x94" }, ]; tests!(Aes256GcmSiv, TEST_VECTORS);
75.576087
343
0.661513
1e9500878a7d08f5cf16b1cd529bbcc3ceda4251
3,012
use git_url::Scheme; use crate::parse::{assert_url_and, assert_url_roundtrip, url}; #[test] fn file_path_with_protocol() -> crate::Result { assert_url_roundtrip( "file:///path/to/git", url(Scheme::File, None, None, None, b"/path/to/git"), ) } #[test] fn file_path_without_protocol() -> crate::Result { let url = assert_url_and("/path/to/git", url(Scheme::File, None, None, None, b"/path/to/git"))?.to_string(); assert_eq!(url, "file:///path/to/git"); Ok(()) } #[test] fn no_username_expansion_for_file_paths_without_protocol() -> crate::Result { let url = assert_url_and("~/path/to/git", url(Scheme::File, None, None, None, b"~/path/to/git"))?.to_string(); assert_eq!(url, "file://~/path/to/git"); Ok(()) } #[test] fn no_username_expansion_for_file_paths_with_protocol() -> crate::Result { assert_url_roundtrip( "file://~username/path/to/git", url(Scheme::File, None, None, None, b"~username/path/to/git"), ) } #[test] fn non_utf8_file_path_without_protocol() -> crate::Result { let parsed = git_url::parse(b"/path/to\xff/git")?; assert_eq!(parsed, url(Scheme::File, None, None, None, b"/path/to\xff/git",)); assert_eq!( parsed.to_string(), "file:///path/to�/git", "non-unicode is made unicode safe" ); Ok(()) } #[test] fn relative_file_path_without_protocol() -> crate::Result { let parsed = assert_url_and( "../../path/to/git", url(Scheme::File, None, None, None, b"../../path/to/git"), )? .to_string(); assert_eq!(parsed, "file://../../path/to/git"); let url = assert_url_and("path/to/git", url(Scheme::File, None, None, None, b"path/to/git"))?.to_string(); assert_eq!(url, "file://path/to/git"); Ok(()) } #[test] fn interior_relative_file_path_without_protocol() -> crate::Result { let url = assert_url_and( "/abs/path/../../path/to/git", url(Scheme::File, None, None, None, b"/abs/path/../../path/to/git"), )? .to_string(); assert_eq!(url, "file:///abs/path/../../path/to/git"); Ok(()) } mod windows { use git_url::Scheme; use crate::parse::{assert_url_and, assert_url_roundtrip, url}; #[test] fn file_path_without_protocol() -> crate::Result { let url = assert_url_and("x:/path/to/git", url(Scheme::File, None, None, None, b"x:/path/to/git"))?.to_string(); assert_eq!(url, "file://x:/path/to/git"); Ok(()) } #[test] fn file_path_with_backslashes_without_protocol() -> crate::Result { let url = assert_url_and( "x:\\path\\to\\git", url(Scheme::File, None, None, None, b"x:\\path\\to\\git"), )? .to_string(); assert_eq!(url, "file://x:\\path\\to\\git"); Ok(()) } #[test] fn file_path_with_protocol() -> crate::Result { assert_url_roundtrip( "file://x:/path/to/git", url(Scheme::File, None, None, None, b"x:/path/to/git"), ) } }
29.821782
120
0.589641
2f5261c4dc4a2584a0f85267e6b78a2c4cb09b7d
10,586
use std::process::Command; use std::env; use std::path::{PathBuf, Path}; use build_helper::output; fn detect_llvm_link() -> (&'static str, &'static str) { // Force the link mode we want, preferring static by default, but // possibly overridden by `configure --enable-llvm-link-shared`. if env::var_os("LLVM_LINK_SHARED").is_some() { ("dylib", "--link-shared") } else { ("static", "--link-static") } } fn main() { if env::var_os("RUST_CHECK").is_some() { // If we're just running `check`, there's no need for LLVM to be built. println!("cargo:rerun-if-env-changed=RUST_CHECK"); return; } build_helper::restore_library_path(); let target = env::var("TARGET").expect("TARGET was not set"); let llvm_config = env::var_os("LLVM_CONFIG") .map(PathBuf::from) .unwrap_or_else(|| { if let Some(dir) = env::var_os("CARGO_TARGET_DIR").map(PathBuf::from) { let to_test = dir.parent() .unwrap() .parent() .unwrap() .join(&target) .join("llvm/bin/llvm-config"); if Command::new(&to_test).output().is_ok() { return to_test; } } PathBuf::from("llvm-config") }); println!("cargo:rerun-if-changed={}", llvm_config.display()); println!("cargo:rerun-if-env-changed=LLVM_CONFIG"); // Test whether we're cross-compiling LLVM. This is a pretty rare case // currently where we're producing an LLVM for a different platform than // what this build script is currently running on. // // In that case, there's no guarantee that we can actually run the target, // so the build system works around this by giving us the LLVM_CONFIG for // the host platform. This only really works if the host LLVM and target // LLVM are compiled the same way, but for us that's typically the case. // // We *want* detect this cross compiling situation by asking llvm-config // what it's host-target is. If that's not the TARGET, then we're cross // compiling. Unfortunately `llvm-config` seems either be buggy, or we're // misconfiguring it, because the `i686-pc-windows-gnu` build of LLVM will // report itself with a `--host-target` of `x86_64-pc-windows-gnu`. This // tricks us into thinking we're doing a cross build when we aren't, so // havoc ensues. // // In any case, if we're cross compiling, this generally just means that we // can't trust all the output of llvm-config becaues it might be targeted // for the host rather than the target. As a result a bunch of blocks below // are gated on `if !is_crossed` let target = env::var("TARGET").expect("TARGET was not set"); let host = env::var("HOST").expect("HOST was not set"); let is_crossed = target != host; let mut optional_components = vec!["x86", "arm", "aarch64", "amdgpu", "mips", "powerpc", "systemz", "jsbackend", "webassembly", "msp430", "sparc", "nvptx"]; let mut version_cmd = Command::new(&llvm_config); version_cmd.arg("--version"); let version_output = output(&mut version_cmd); let mut parts = version_output.split('.').take(2) .filter_map(|s| s.parse::<u32>().ok()); let (major, _minor) = if let (Some(major), Some(minor)) = (parts.next(), parts.next()) { (major, minor) } else { (3, 9) }; if major > 3 { optional_components.push("hexagon"); } if major > 6 { optional_components.push("riscv"); } // FIXME: surely we don't need all these components, right? Stuff like mcjit // or interpreter the compiler itself never uses. let required_components = &["ipo", "bitreader", "bitwriter", "linker", "asmparser", "mcjit", "lto", "interpreter", "instrumentation"]; let components = output(Command::new(&llvm_config).arg("--components")); let mut components = components.split_whitespace().collect::<Vec<_>>(); components.retain(|c| optional_components.contains(c) || required_components.contains(c)); for component in required_components { if !components.contains(component) { panic!("require llvm component {} but wasn't found", component); } } for component in components.iter() { println!("cargo:rustc-cfg=llvm_component=\"{}\"", component); } // Link in our own LLVM shims, compiled with the same flags as LLVM let mut cmd = Command::new(&llvm_config); cmd.arg("--cxxflags"); let mut cxxflags = output(&mut cmd); cxxflags.push_str("-I /usr/include/c++/v1"); let mut cfg = cc::Build::new(); cfg.warnings(false); for flag in cxxflags.split_whitespace() { // Ignore flags like `-m64` when we're doing a cross build if is_crossed && flag.starts_with("-m") { continue; } if flag.starts_with("-flto") { continue; } // -Wdate-time is not supported by the netbsd cross compiler if is_crossed && target.contains("netbsd") && flag.contains("date-time") { continue; } cfg.flag(flag); } for component in &components { let mut flag = String::from("LLVM_COMPONENT_"); flag.push_str(&component.to_uppercase()); cfg.define(&flag, None); } println!("cargo:rerun-if-changed-env=LLVM_RUSTLLVM"); if env::var_os("LLVM_RUSTLLVM").is_some() { cfg.define("LLVM_RUSTLLVM", None); } build_helper::rerun_if_changed_anything_in_dir(Path::new("../rustllvm")); cfg.file("../rustllvm/PassWrapper.cpp") .file("../rustllvm/RustWrapper.cpp") .file("../rustllvm/ArchiveWrapper.cpp") .file("../rustllvm/Linker.cpp") .cpp(true) .cpp_link_stdlib(None) // we handle this below .compile("rustllvm"); let (llvm_kind, llvm_link_arg) = detect_llvm_link(); // Link in all LLVM libraries, if we're uwring the "wrong" llvm-config then // we don't pick up system libs because unfortunately they're for the host // of llvm-config, not the target that we're attempting to link. let mut cmd = Command::new(&llvm_config); cmd.arg(llvm_link_arg).arg("--libs"); if !is_crossed { cmd.arg("--system-libs"); } cmd.args(&components); for lib in output(&mut cmd).split_whitespace() { let name = if lib.starts_with("-l") { &lib[2..] } else if lib.starts_with("-") { &lib[1..] } else if Path::new(lib).exists() { // On MSVC llvm-config will print the full name to libraries, but // we're only interested in the name part let name = Path::new(lib).file_name().unwrap().to_str().unwrap(); name.trim_end_matches(".lib") } else if lib.ends_with(".lib") { // Some MSVC libraries just come up with `.lib` tacked on, so chop // that off lib.trim_end_matches(".lib") } else { continue; }; // Don't need or want this library, but LLVM's CMake build system // doesn't provide a way to disable it, so filter it here even though we // may or may not have built it. We don't reference anything from this // library and it otherwise may just pull in extra dependencies on // libedit which we don't want if name == "LLVMLineEditor" { continue; } let kind = if name.starts_with("LLVM") { llvm_kind } else { "dylib" }; println!("cargo:rustc-link-lib={}={}", kind, name); } // LLVM ldflags // // If we're a cross-compile of LLVM then unfortunately we can't trust these // ldflags (largely where all the LLVM libs are located). Currently just // hack around this by replacing the host triple with the target and pray // that those -L directories are the same! let mut cmd = Command::new(&llvm_config); cmd.arg(llvm_link_arg).arg("--ldflags"); for lib in output(&mut cmd).split_whitespace() { if lib.starts_with("-LIBPATH:") { println!("cargo:rustc-link-search=native={}", &lib[9..]); } else if is_crossed { if lib.starts_with("-L") { println!("cargo:rustc-link-search=native={}", lib[2..].replace(&host, &target)); } } else if lib.starts_with("-l") { println!("cargo:rustc-link-lib={}", &lib[2..]); } else if lib.starts_with("-L") { println!("cargo:rustc-link-search=native={}", &lib[2..]); } } let llvm_static_stdcpp = env::var_os("LLVM_STATIC_STDCPP"); let llvm_use_libcxx = env::var_os("LLVM_USE_LIBCXX"); let stdcppname = if target.contains("openbsd") { // llvm-config on OpenBSD doesn't mention stdlib=libc++ "c++" } else if target.contains("freebsd") { "c++" } else if target.contains("darwin") { "c++" } else if target.contains("netbsd") && llvm_static_stdcpp.is_some() { // NetBSD uses a separate library when relocation is required "stdc++_pic" } else if llvm_use_libcxx.is_some() { "c++" } else { "stdc++" }; // C++ runtime library if !target.contains("msvc") { if let Some(s) = llvm_static_stdcpp { assert!(!cxxflags.contains("stdlib=libc++")); let path = PathBuf::from(s); println!("cargo:rustc-link-search=native={}", path.parent().unwrap().display()); println!("cargo:rustc-link-lib=static={}", stdcppname); } else if cxxflags.contains("stdlib=libc++") { println!("cargo:rustc-link-lib=c++"); } else { println!("cargo:rustc-link-lib={}", stdcppname); } } // LLVM requires symbols from this library, but apparently they're not printed // during llvm-config? if target.contains("windows-gnu") { println!("cargo:rustc-link-lib=static-nobundle=gcc_s"); println!("cargo:rustc-link-lib=static-nobundle=pthread"); println!("cargo:rustc-link-lib=dylib=uuid"); } }
37.807143
94
0.5768
e273cc4cad85da9bf31753f189751d0b8defffae
1,783
pub struct IconPunchClock { props: crate::Props, } impl yew::Component for IconPunchClock { type Properties = crate::Props; type Message = (); fn create(props: Self::Properties, _: yew::prelude::ComponentLink<Self>) -> Self { Self { props } } fn update(&mut self, _: Self::Message) -> yew::prelude::ShouldRender { true } fn change(&mut self, _: Self::Properties) -> yew::prelude::ShouldRender { false } fn view(&self) -> yew::prelude::Html { yew::prelude::html! { <svg class=self.props.class.unwrap_or("") width=self.props.size.unwrap_or(24).to_string() height=self.props.size.unwrap_or(24).to_string() viewBox="0 0 24 24" fill=self.props.fill.unwrap_or("none") stroke=self.props.color.unwrap_or("currentColor") stroke-width=self.props.stroke_width.unwrap_or(2).to_string() stroke-linecap=self.props.stroke_linecap.unwrap_or("round") stroke-linejoin=self.props.stroke_linejoin.unwrap_or("round") > <svg xmlns="http://www.w3.org/2000/svg" enable-background="new 0 0 24 24" height="24" viewBox="0 0 24 24" width="24"><g><rect fill="none" height="24" width="24"/></g><g><g><path d="M19,6h-1V1H6v5H5C3.9,6,3,6.9,3,8v12c0,1.1,0.9,2,2,2h14c1.1,0,2-0.9,2-2V8C21,6.9,20.1,6,19,6z M8,3h8v3H8V3z M19,20H5V8 h14V20z"/><path d="M12,9c-2.76,0-5,2.24-5,5s2.24,5,5,5c2.76,0,5-2.24,5-5S14.76,9,12,9z M12,17.5c-1.93,0-3.5-1.57-3.5-3.5 s1.57-3.5,3.5-3.5s3.5,1.57,3.5,3.5S13.93,17.5,12,17.5z"/><polygon points="12.5,11.5 11.5,11.5 11.5,14.21 13.14,15.85 13.85,15.14 12.5,13.79"/></g></g></svg> </svg> } } }
38.76087
588
0.582165
ac8de799f6f3848e303dcfb8bf1d72374e8dd34f
8,012
/// ClientState defines a solo machine client that tracks the current consensus /// state and if the client is frozen. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ClientState { /// latest sequence of the client state #[prost(uint64, tag="1")] pub sequence: u64, /// frozen sequence of the solo machine #[prost(bool, tag="2")] pub is_frozen: bool, #[prost(message, optional, tag="3")] pub consensus_state: ::core::option::Option<ConsensusState>, /// when set to true, will allow governance to update a solo machine client. /// The client will be unfrozen if it is frozen. #[prost(bool, tag="4")] pub allow_update_after_proposal: bool, } /// ConsensusState defines a solo machine consensus state. The sequence of a /// consensus state is contained in the "height" key used in storing the /// consensus state. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConsensusState { /// public key of the solo machine #[prost(message, optional, tag="1")] pub public_key: ::core::option::Option<::prost_types::Any>, /// diversifier allows the same public key to be re-used across different solo /// machine clients (potentially on different chains) without being considered /// misbehaviour. #[prost(string, tag="2")] pub diversifier: ::prost::alloc::string::String, #[prost(uint64, tag="3")] pub timestamp: u64, } /// Header defines a solo machine consensus header #[derive(Clone, PartialEq, ::prost::Message)] pub struct Header { /// sequence to update solo machine public key at #[prost(uint64, tag="1")] pub sequence: u64, #[prost(uint64, tag="2")] pub timestamp: u64, #[prost(bytes="vec", tag="3")] pub signature: ::prost::alloc::vec::Vec<u8>, #[prost(message, optional, tag="4")] pub new_public_key: ::core::option::Option<::prost_types::Any>, #[prost(string, tag="5")] pub new_diversifier: ::prost::alloc::string::String, } /// Misbehaviour defines misbehaviour for a solo machine which consists /// of a sequence and two signatures over different messages at that sequence. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Misbehaviour { #[prost(string, tag="1")] pub client_id: ::prost::alloc::string::String, #[prost(uint64, tag="2")] pub sequence: u64, #[prost(message, optional, tag="3")] pub signature_one: ::core::option::Option<SignatureAndData>, #[prost(message, optional, tag="4")] pub signature_two: ::core::option::Option<SignatureAndData>, } /// SignatureAndData contains a signature and the data signed over to create that /// signature. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignatureAndData { #[prost(bytes="vec", tag="1")] pub signature: ::prost::alloc::vec::Vec<u8>, #[prost(enumeration="DataType", tag="2")] pub data_type: i32, #[prost(bytes="vec", tag="3")] pub data: ::prost::alloc::vec::Vec<u8>, #[prost(uint64, tag="4")] pub timestamp: u64, } /// TimestampedSignatureData contains the signature data and the timestamp of the /// signature. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimestampedSignatureData { #[prost(bytes="vec", tag="1")] pub signature_data: ::prost::alloc::vec::Vec<u8>, #[prost(uint64, tag="2")] pub timestamp: u64, } /// SignBytes defines the signed bytes used for signature verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SignBytes { #[prost(uint64, tag="1")] pub sequence: u64, #[prost(uint64, tag="2")] pub timestamp: u64, #[prost(string, tag="3")] pub diversifier: ::prost::alloc::string::String, /// type of the data used #[prost(enumeration="DataType", tag="4")] pub data_type: i32, /// marshaled data #[prost(bytes="vec", tag="5")] pub data: ::prost::alloc::vec::Vec<u8>, } /// HeaderData returns the SignBytes data for update verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct HeaderData { /// header public key #[prost(message, optional, tag="1")] pub new_pub_key: ::core::option::Option<::prost_types::Any>, /// header diversifier #[prost(string, tag="2")] pub new_diversifier: ::prost::alloc::string::String, } /// ClientStateData returns the SignBytes data for client state verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ClientStateData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(message, optional, tag="2")] pub client_state: ::core::option::Option<::prost_types::Any>, } /// ConsensusStateData returns the SignBytes data for consensus state /// verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConsensusStateData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(message, optional, tag="2")] pub consensus_state: ::core::option::Option<::prost_types::Any>, } /// ConnectionStateData returns the SignBytes data for connection state /// verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ConnectionStateData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(message, optional, tag="2")] pub connection: ::core::option::Option<super::super::super::core::connection::v1::ConnectionEnd>, } /// ChannelStateData returns the SignBytes data for channel state /// verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ChannelStateData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(message, optional, tag="2")] pub channel: ::core::option::Option<super::super::super::core::channel::v1::Channel>, } /// PacketCommitmentData returns the SignBytes data for packet commitment /// verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PacketCommitmentData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(bytes="vec", tag="2")] pub commitment: ::prost::alloc::vec::Vec<u8>, } /// PacketAcknowledgementData returns the SignBytes data for acknowledgement /// verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PacketAcknowledgementData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(bytes="vec", tag="2")] pub acknowledgement: ::prost::alloc::vec::Vec<u8>, } /// PacketReceiptAbsenceData returns the SignBytes data for /// packet receipt absence verification. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PacketReceiptAbsenceData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, } /// NextSequenceRecvData returns the SignBytes data for verification of the next /// sequence to be received. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NextSequenceRecvData { #[prost(bytes="vec", tag="1")] pub path: ::prost::alloc::vec::Vec<u8>, #[prost(uint64, tag="2")] pub next_seq_recv: u64, } /// DataType defines the type of solo machine proof being created. This is done /// to preserve uniqueness of different data sign byte encodings. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum DataType { /// Default State UninitializedUnspecified = 0, /// Data type for client state verification ClientState = 1, /// Data type for consensus state verification ConsensusState = 2, /// Data type for connection state verification ConnectionState = 3, /// Data type for channel state verification ChannelState = 4, /// Data type for packet commitment verification PacketCommitment = 5, /// Data type for packet acknowledgement verification PacketAcknowledgement = 6, /// Data type for packet receipt absence verification PacketReceiptAbsence = 7, /// Data type for next sequence recv verification NextSequenceRecv = 8, /// Data type for header verification Header = 9, }
39.082927
101
0.67661
b95e4cb80ecbafee26df5a06246241b411640418
4,885
use epaint::ahash::AHashMap; use crate::{math::Rect, paint::Shape, Id, *}; /// Different layer categories #[derive(Clone, Copy, Debug, Hash, Eq, PartialEq, Ord, PartialOrd)] #[cfg_attr(feature = "persistence", derive(serde::Deserialize, serde::Serialize))] pub enum Order { /// Painted behind all floating windows Background, /// Normal moveable windows that you reorder by click Middle, /// Popups, menus etc that should always be painted on top of windows /// Foreground objects can also have tooltips Foreground, /// Things floating on top of everything else, like tooltips. /// You cannot interact with these. Tooltip, /// Debug layer, always painted last / on top Debug, } impl Order { const COUNT: usize = 5; const ALL: [Order; Self::COUNT] = [ Self::Background, Self::Middle, Self::Foreground, Self::Tooltip, Self::Debug, ]; pub fn allow_interaction(&self) -> bool { match self { Self::Background | Self::Middle | Self::Foreground | Self::Debug => true, Self::Tooltip => false, } } } /// An identifier for a paint layer. /// Also acts as an identifier for [`Area`]:s. #[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)] #[cfg_attr(feature = "persistence", derive(serde::Deserialize, serde::Serialize))] pub struct LayerId { pub order: Order, pub id: Id, } impl LayerId { pub fn new(order: Order, id: Id) -> Self { Self { order, id } } pub fn debug() -> Self { Self { order: Order::Debug, id: Id::new("debug"), } } pub fn background() -> Self { Self { order: Order::Background, id: Id::background(), } } pub fn allow_interaction(&self) -> bool { self.order.allow_interaction() } } /// A unique identifier of a specific [`Shape`] in a [`PaintList`]. #[derive(Clone, Copy, PartialEq)] pub struct ShapeIdx(usize); /// A list of [`Shape`]s paired with a clip rectangle. #[derive(Clone, Default)] pub struct PaintList(Vec<(Rect, Shape)>); impl PaintList { pub fn is_empty(&self) -> bool { self.0.is_empty() } /// Returns the index of the new [`Shape`] that can be used with `PaintList::set`. pub fn add(&mut self, clip_rect: Rect, shape: Shape) -> ShapeIdx { let idx = ShapeIdx(self.0.len()); self.0.push((clip_rect, shape)); idx } pub fn extend(&mut self, clip_rect: Rect, mut shapes: Vec<Shape>) { self.0 .extend(shapes.drain(..).map(|shape| (clip_rect, shape))) } /// Modify an existing [`Shape`]. /// /// Sometimes you want to paint a frame behind some contents, but don't know how large the frame needs to be /// until the contents have been added, and therefor also painted to the `PaintList`. /// /// The solution is to allocate a `Shape` using `let idx = paint_list.add(cr, Shape::Noop);` /// and then later setting it using `paint_list.set(idx, cr, frame);`. pub fn set(&mut self, idx: ShapeIdx, clip_rect: Rect, shape: Shape) { assert!(idx.0 < self.0.len()); self.0[idx.0] = (clip_rect, shape); } /// Translate each [`Shape`] and clip rectangle by this much, in-place pub fn translate(&mut self, delta: Vec2) { for (clip_rect, shape) in &mut self.0 { *clip_rect = clip_rect.translate(delta); shape.translate(delta); } } } #[derive(Clone, Default)] pub(crate) struct GraphicLayers([AHashMap<Id, PaintList>; Order::COUNT]); impl GraphicLayers { pub fn list(&mut self, layer_id: LayerId) -> &mut PaintList { self.0[layer_id.order as usize] .entry(layer_id.id) .or_default() } pub fn drain( &mut self, area_order: &[LayerId], ) -> impl ExactSizeIterator<Item = (Rect, Shape)> { let mut all_shapes: Vec<_> = Default::default(); for &order in &Order::ALL { let order_map = &mut self.0[order as usize]; // If a layer is empty at the start of the frame // the nobody has added to it, and it is old and defunct. // Free it to save memory: order_map.retain(|_, list| !list.is_empty()); // First do the layers part of area_order: for layer_id in area_order { if layer_id.order == order { if let Some(shapes) = order_map.get_mut(&layer_id.id) { all_shapes.extend(shapes.0.drain(..)); } } } // Also draw areas that are missing in `area_order`: for shapes in order_map.values_mut() { all_shapes.extend(shapes.0.drain(..)); } } all_shapes.into_iter() } }
30.341615
112
0.578506
ebf0ae3687d3ae3201703e5cac2f414a7494bedc
1,829
// Copyright 2020-2021 The Datafuse Authors. // // SPDX-License-Identifier: Apache-2.0. use std::fmt; use common_datablocks::DataBlock; use common_datavalues::DataColumnarValue; use common_datavalues::DataSchema; use common_datavalues::DataType; use common_datavalues::DataValue; use common_exception::Result; use crate::IFunction; #[derive(Clone)] pub struct AliasFunction { depth: usize, alias: String, func: Box<dyn IFunction> } impl AliasFunction { pub fn try_create(alias: String, func: Box<dyn IFunction>) -> Result<Box<dyn IFunction>> { Ok(Box::new(AliasFunction { depth: 0, alias, func })) } } impl IFunction for AliasFunction { fn name(&self) -> &str { "AliasFunction" } fn return_type(&self, input_schema: &DataSchema) -> Result<DataType> { self.func.return_type(input_schema) } fn nullable(&self, input_schema: &DataSchema) -> Result<bool> { self.func.nullable(input_schema) } fn eval(&self, block: &DataBlock) -> Result<DataColumnarValue> { self.func.eval(block) } fn set_depth(&mut self, depth: usize) { self.depth = depth; } fn accumulate(&mut self, block: &DataBlock) -> Result<()> { self.func.accumulate(block) } fn accumulate_result(&self) -> Result<Vec<DataValue>> { self.func.accumulate_result() } fn merge(&mut self, states: &[DataValue]) -> Result<()> { self.func.merge(states) } fn merge_result(&self) -> Result<DataValue> { self.func.merge_result() } fn is_aggregator(&self) -> bool { self.func.is_aggregator() } } impl fmt::Display for AliasFunction { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:#}", self.alias) } }
22.8625
94
0.621651
67a2e906eaed62c7d8b41d2a8b7c15e56ba0fdd9
760
use crate::{account::AccountHash, system_contract_errors::pos::Error, TransferredTo, URef, U512}; /// Provides an access to mint. pub trait MintProvider { /// Transfer `amount` from `source` purse to a `target` account. fn transfer_purse_to_account( &mut self, source: URef, target: AccountHash, amount: U512, ) -> Result<TransferredTo, Error>; /// Transfer `amount` from `source` purse to a `target` purse. fn transfer_purse_to_purse( &mut self, source: URef, target: URef, amount: U512, ) -> Result<(), Error>; /// Checks balance of a `purse`. Returns `None` if given purse does not exist. fn balance(&mut self, purse: URef) -> Result<Option<U512>, Error>; }
31.666667
97
0.627632
9cf7a73fe0c02c60fd702ceef57343dfeb202b2a
251
use crate::types::Value; use super::{BinaryOperator, OperatorResult}; pub struct LessEqualsOperator; impl BinaryOperator for LessEqualsOperator { fn eval(lhs: Value, rhs: Value) -> OperatorResult { Ok(Value::Boolean(lhs <= rhs)) } }
22.818182
55
0.701195
166c450993f02a51702a85d8c408923561af32ba
1,535
use super::*; use errors::LocationError; use files::CodeLocation; use statics::{valid_name_char, NameBuilder}; use strings::parse_static_str; #[derive(Debug, Clone)] pub struct Import { pub name: String, pub path: String_, pub location: CodeLocation, } impl GetName for Import { fn name(&self) -> Option<String> { Some(self.name.clone()) } } impl GetLocation for Import { fn location(&self) -> CodeLocation { self.location.clone() } } pub fn parse_import(t: &mut Tokenizer) -> Result<Vec<Import>, LocationError> { let mut res: Vec<Import> = vec![]; // Parse all import 'main: loop { let index = t.index; let y = t.y; let restore_location = |t: &mut Tokenizer| { t.index = index; t.y = y; }; // Parse a single import let location = t.last_index_location(); let c_option = t.next_while(" \t\n"); let mut c = if let Some(c) = c_option { c } else { break; }; let mut import_name = NameBuilder::new(); loop { match c { ' ' | '\t' | '\n' => break, cr if !valid_name_char(cr) => { restore_location(t); break 'main; } cr => { import_name.push(cr); c = t.must_next_char()?; } }; } c = t.must_next_while(" \t\n")?; if c != '"' { restore_location(t); break 'main; } let path = parse_static_str(t)?; res.push(Import { name: import_name.to_string(t)?, path, location, }) } Ok(res) }
19.43038
78
0.554397
ede92acf23ebf3b07205f8ee2746e33cca36ab53
29,342
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; pub mod container_groups { use crate::models::*; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ContainerGroupListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ContainerInstance/containerGroups", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| list::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| list::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| list::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContainerGroupListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<ContainerGroupListResult, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerInstance/containerGroups", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(|source| list_by_resource_group::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| list_by_resource_group::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| list_by_resource_group::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| list_by_resource_group::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContainerGroupListResult = serde_json::from_slice(rsp_body).map_err(|source| list_by_resource_group::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list_by_resource_group::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list_by_resource_group { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, container_group_name: &str, ) -> std::result::Result<ContainerGroup, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerInstance/containerGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, container_group_name ); let mut url = url::Url::parse(url_str).map_err(|source| get::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| get::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| get::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| get::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContainerGroup = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(get::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod get { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, container_group_name: &str, container_group: &ContainerGroup, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerInstance/containerGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, container_group_name ); let mut url = url::Url::parse(url_str).map_err(|source| create_or_update::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| create_or_update::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = azure_core::to_json(container_group).map_err(|source| create_or_update::Error::SerializeError { source })?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| create_or_update::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| create_or_update::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContainerGroup = serde_json::from_slice(rsp_body).map_err(|source| create_or_update::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(create_or_update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: ContainerGroup = serde_json::from_slice(rsp_body).map_err(|source| create_or_update::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); Err(create_or_update::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod create_or_update { use crate::{models, models::*}; #[derive(Debug)] pub enum Response { Ok200(ContainerGroup), Created201(ContainerGroup), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, container_group_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerInstance/containerGroups/{}", operation_config.base_path(), subscription_id, resource_group_name, container_group_name ); let mut url = url::Url::parse(url_str).map_err(|source| delete::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| delete::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| delete::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| delete::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ContainerGroup = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(delete::Response::Ok200(rsp_value)) } http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); Err(delete::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod delete { use crate::{models, models::*}; #[derive(Debug)] pub enum Response { Ok200(ContainerGroup), NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod operations { use crate::models::*; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.ContainerInstance/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| list::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| list::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| list::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod container_group_usage { use crate::models::*; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, ) -> std::result::Result<UsageListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ContainerInstance/locations/{}/usages", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| list::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| list::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| list::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: UsageListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod container_logs { use crate::models::*; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, container_group_name: &str, container_name: &str, tail: Option<i64>, ) -> std::result::Result<Logs, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerInstance/containerGroups/{}/containers/{}/logs", operation_config.base_path(), subscription_id, resource_group_name, container_group_name, container_name ); let mut url = url::Url::parse(url_str).map_err(|source| list::Error::ParseUrlError { source })?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(|source| list::Error::GetTokenError { source })?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", operation_config.api_version()); if let Some(tail) = tail { url.query_pairs_mut().append_pair("tail", tail.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(|source| list::Error::BuildRequestError { source })?; let rsp = http_client .execute_request(req) .await .map_err(|source| list::Error::ExecuteRequestError { source })?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: Logs = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError { source, body: rsp_body.clone(), })?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); Err(list::Error::UnexpectedResponse { status_code, body: rsp_body.clone(), }) } } } pub mod list { use crate::{models, models::*}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Unexpected HTTP status code {}", status_code)] UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes }, #[error("Failed to parse request URL: {}", source)] ParseUrlError { source: url::ParseError }, #[error("Failed to build request: {}", source)] BuildRequestError { source: http::Error }, #[error("Failed to execute request: {}", source)] ExecuteRequestError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to serialize request body: {}", source)] SerializeError { source: Box<dyn std::error::Error + Sync + Send> }, #[error("Failed to deserialize response body: {}", source)] DeserializeError { source: serde_json::Error, body: bytes::Bytes }, #[error("Failed to get access token: {}", source)] GetTokenError { source: azure_core::errors::AzureError }, } } }
48.499174
134
0.571876
8fb06a9e46a82369fffd4e802e8c729f7fd30ba3
273
use actix_web::{web::Json, ResponseError}; use serde::Serialize; /// Helper function to reduce boilerplate of an OK/Json response #[allow(dead_code)] pub fn respond_json<T>(data: T) -> Result<Json<T>, Box<dyn ResponseError>> where T: Serialize, { Ok(Json(data)) }
22.75
74
0.703297
91aea3c37c1de5e5570501280cd0116ea2c3ad8a
4,607
use crate::ffi; use crate::textures::texture::Texture; #[repr(transparent)] pub struct Image { image: ffi::Image, } impl Image { pub fn load_image(filename: impl AsRef<str>) -> Self { let filename = ffi::str_to_cstring(filename); // SAFETY: ffi // SAFETY: Since ffi::LoadImage makes a temporary ffi::Image and // ffi::Image has no destructor, making Image with from_raw satisfies // all conditions of safety. unsafe { Image::from_raw(ffi::LoadImage(filename.as_ptr())) } } pub fn load_image_raw( filename: impl AsRef<str>, width: i32, height: i32, format: i32, header_size: i32, ) -> Self { let filename = ffi::str_to_cstring(filename); // SAFETY: ffi // SAFETY: Since ffi::LoadImageRaw makes a temporary ffi::Image and // ffi::Image has no destructor, making Image with from_raw satisfies // all conditions of safety. unsafe { Image::from_raw(ffi::LoadImageRaw( filename.as_ptr(), width, height, format, header_size, )) } } pub fn load_image_from_memory(filename: impl AsRef<str>, file_data: &[u8]) -> Self { let filename = ffi::str_to_cstring(filename); // SAFETY: ffi // SAFETY: Since ffi::LoadImageFromMemory makes a temporary ffi::Image and // ffi::Image has no destructor, making Image with from_raw satisfies // all conditions of safety. unsafe { Image::from_raw(ffi::LoadImageFromMemory( filename.as_ptr(), file_data.as_ptr(), file_data.len() as i32, )) } } pub fn load_image_from_texture(texture: Texture) -> Self { // SAFETY: ffi // SAFETY: Since ffi::LoadImageFromTexture makes a temporary ffi::Image and // ffi::Image has no destructor, making Image with from_raw satisfies // all conditions of safety. // SAFETY: since we do not convert back to the Texture, ignoring the second // value of the into_raw is fine unsafe { Image::from_raw(ffi::LoadImageFromTexture(texture.into_raw().0)) } } pub fn load_image_from_screen() -> Self { // SAFETY: ffi // SAFETY: Since ffi::LoadImageFromScreen makes a temporary ffi::Image and // ffi::Image has no destructor, making Image with from_raw satisfies // all conditions of safety. unsafe { Image::from_raw(ffi::LoadImageFromScreen()) } } /// This function is unsafe because it takes a raw pointer as its parameter pub unsafe fn load_image_anim(filename: impl AsRef<str>, frames: *mut i32) -> Self { let filename = ffi::str_to_cstring(filename); // SAFETY: ffi // SAFETY: Since ffi::LoadImageAnim makes a temporary ffi::Image and // ffi::Image has no destructor, making Image with from_raw satisfies // all conditions of safety. Image::from_raw(ffi::LoadImageAnim(filename.as_ptr(), frames)) } /// Convert ffi::Image structure into Rust's one. /// This function is unsafe because Image has a destructor which does not exist in C. /// If there is one ffi::Image that makes two distinct Image, it can be happen that /// the destructor of Image is called more than twice, cause a double free. /// /// ## Safety /// - There is no manually called 'UnloadImage' with ffi::Image. /// - `from_raw` must call once for one ffi::Image. #[inline] pub(crate) unsafe fn from_raw(image: ffi::Image) -> Self { Self { image } } /// Takes the inner value /// This is unsafe because after making ffi::Image, user can never drop the /// ffi::Image. /// /// ## Safety /// - After calling this, one should manually drop the output or convert back to /// the Rust's Image. #[inline] pub(crate) unsafe fn take_raw(&self) -> ffi::Image { self.image } } impl Clone for Image { fn clone(&self) -> Self { // SAFETY: ffi // SAFETY: ffi::ImageCopy makes a deep copy of ffi::Image. // In addition, the obtained ffi::Image is temporary value // for which no destructor of ffi::Image is called. unsafe { Image::from_raw(ffi::ImageCopy(self.image)) } } } impl Drop for Image { fn drop(&mut self) { // SAFETY: ffi // SAFETY: Since this is a destructor, into_raw is fine unsafe { ffi::UnloadImage(self.image) } } }
34.901515
89
0.6056
75d14e2d88ea0278b4aef7b2c12b5cccc3e9c86e
2,493
use near_sdk::collections::Vector; use near_sdk::{ext_contract, Gas, PromiseResult}; //use serde_derive::{Serialize, Deserialize}; use crate::*; const GAS_FOR_RESOLVE_TRANSFER: Gas = Gas(10_000_000_000_000); const GAS_FOR_NFT_TRANSFER_CALL: Gas = Gas(25_000_000_000_000 + GAS_FOR_RESOLVE_TRANSFER.0); const MIN_GAS_FOR_NFT_TRANSFER_CALL: Gas = Gas(100_000_000_000_000); const NO_DEPOSIT: Balance = 0; const DEPOSIT: Balance = 1; #[near_bindgen] impl Contract { #[payable] pub fn nft_unbundle( &mut self, token_id: TokenId, ) { assert_one_yocto(); //get the sender to transfer the token from the sender to the receiver let caller_id = env::predecessor_account_id(); //get the token object if there is some token object let mut token = if let Some(token) = self.tokens_by_id.get(&token_id) { if token.owner_id != caller_id { //we refund the owner for releasing the storage used up by the approved account IDs //refund_approved_account_ids(owner_id, &approved_account_ids); // The token is not owner by the receiver anymore. Can't return it. } let mut range_iterator = token.bundles.iter(); while let Some(bundle) = range_iterator.next() { ext_nft::nft_transfer( caller_id.clone(), bundle.token_id.clone(), bundle.approval_id.clone(), None, bundle.contract.clone(), // contract account id DEPOSIT, // yocto NEAR to attach GAS_FOR_NFT_TRANSFER_CALL // gas to attach ); } //we remove the token from the receiver self.internal_remove_token_from_owner(&caller_id.clone(), &token_id); //we refund the receiver any approved account IDs that they may have set on the token // refund_approved_account_ids(caller_id.clone(), &token.approved_account_ids); //reset the approved account IDs to what they were before the transfer // token.approved_account_ids = approved_account_ids; //if there isn't a token object, it was burned and so we return true } else { //we refund the owner for releasing the storage used up by the approved account IDs // refund_approved_account_ids(self.owner_id, &approved_account_ids); }; } }
43.736842
99
0.627357
1a2035c33a0fd24e149035be47bf0e89215c891b
14,205
#[doc = "Reader of register INTENSET"] pub type R = crate::R<u32, super::INTENSET>; #[doc = "Writer for register INTENSET"] pub type W = crate::W<u32, super::INTENSET>; #[doc = "Register INTENSET `reset()`'s with value 0"] impl crate::ResetValue for super::INTENSET { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Write '1' to enable interrupt for SAMPLERDY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SAMPLERDY_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<SAMPLERDY_A> for bool { #[inline(always)] fn from(variant: SAMPLERDY_A) -> Self { match variant { SAMPLERDY_A::DISABLED => false, SAMPLERDY_A::ENABLED => true, } } } #[doc = "Reader of field `SAMPLERDY`"] pub type SAMPLERDY_R = crate::R<bool, SAMPLERDY_A>; impl SAMPLERDY_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> SAMPLERDY_A { match self.bits { false => SAMPLERDY_A::DISABLED, true => SAMPLERDY_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == SAMPLERDY_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == SAMPLERDY_A::ENABLED } } #[doc = "Write '1' to enable interrupt for SAMPLERDY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum SAMPLERDY_AW { #[doc = "1: Enable"] SET, } impl From<SAMPLERDY_AW> for bool { #[inline(always)] fn from(variant: SAMPLERDY_AW) -> Self { match variant { SAMPLERDY_AW::SET => true, } } } #[doc = "Write proxy for field `SAMPLERDY`"] pub struct SAMPLERDY_W<'a> { w: &'a mut W, } impl<'a> SAMPLERDY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: SAMPLERDY_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(SAMPLERDY_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } #[doc = "Write '1' to enable interrupt for REPORTRDY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum REPORTRDY_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<REPORTRDY_A> for bool { #[inline(always)] fn from(variant: REPORTRDY_A) -> Self { match variant { REPORTRDY_A::DISABLED => false, REPORTRDY_A::ENABLED => true, } } } #[doc = "Reader of field `REPORTRDY`"] pub type REPORTRDY_R = crate::R<bool, REPORTRDY_A>; impl REPORTRDY_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> REPORTRDY_A { match self.bits { false => REPORTRDY_A::DISABLED, true => REPORTRDY_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == REPORTRDY_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == REPORTRDY_A::ENABLED } } #[doc = "Write '1' to enable interrupt for REPORTRDY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum REPORTRDY_AW { #[doc = "1: Enable"] SET, } impl From<REPORTRDY_AW> for bool { #[inline(always)] fn from(variant: REPORTRDY_AW) -> Self { match variant { REPORTRDY_AW::SET => true, } } } #[doc = "Write proxy for field `REPORTRDY`"] pub struct REPORTRDY_W<'a> { w: &'a mut W, } impl<'a> REPORTRDY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: REPORTRDY_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(REPORTRDY_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1); self.w } } #[doc = "Write '1' to enable interrupt for ACCOF event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ACCOF_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<ACCOF_A> for bool { #[inline(always)] fn from(variant: ACCOF_A) -> Self { match variant { ACCOF_A::DISABLED => false, ACCOF_A::ENABLED => true, } } } #[doc = "Reader of field `ACCOF`"] pub type ACCOF_R = crate::R<bool, ACCOF_A>; impl ACCOF_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> ACCOF_A { match self.bits { false => ACCOF_A::DISABLED, true => ACCOF_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == ACCOF_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == ACCOF_A::ENABLED } } #[doc = "Write '1' to enable interrupt for ACCOF event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ACCOF_AW { #[doc = "1: Enable"] SET, } impl From<ACCOF_AW> for bool { #[inline(always)] fn from(variant: ACCOF_AW) -> Self { match variant { ACCOF_AW::SET => true, } } } #[doc = "Write proxy for field `ACCOF`"] pub struct ACCOF_W<'a> { w: &'a mut W, } impl<'a> ACCOF_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: ACCOF_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(ACCOF_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2); self.w } } #[doc = "Write '1' to enable interrupt for DBLRDY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DBLRDY_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<DBLRDY_A> for bool { #[inline(always)] fn from(variant: DBLRDY_A) -> Self { match variant { DBLRDY_A::DISABLED => false, DBLRDY_A::ENABLED => true, } } } #[doc = "Reader of field `DBLRDY`"] pub type DBLRDY_R = crate::R<bool, DBLRDY_A>; impl DBLRDY_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DBLRDY_A { match self.bits { false => DBLRDY_A::DISABLED, true => DBLRDY_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == DBLRDY_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == DBLRDY_A::ENABLED } } #[doc = "Write '1' to enable interrupt for DBLRDY event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DBLRDY_AW { #[doc = "1: Enable"] SET, } impl From<DBLRDY_AW> for bool { #[inline(always)] fn from(variant: DBLRDY_AW) -> Self { match variant { DBLRDY_AW::SET => true, } } } #[doc = "Write proxy for field `DBLRDY`"] pub struct DBLRDY_W<'a> { w: &'a mut W, } impl<'a> DBLRDY_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DBLRDY_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(DBLRDY_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3); self.w } } #[doc = "Write '1' to enable interrupt for STOPPED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPED_A { #[doc = "0: Read: Disabled"] DISABLED, #[doc = "1: Read: Enabled"] ENABLED, } impl From<STOPPED_A> for bool { #[inline(always)] fn from(variant: STOPPED_A) -> Self { match variant { STOPPED_A::DISABLED => false, STOPPED_A::ENABLED => true, } } } #[doc = "Reader of field `STOPPED`"] pub type STOPPED_R = crate::R<bool, STOPPED_A>; impl STOPPED_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> STOPPED_A { match self.bits { false => STOPPED_A::DISABLED, true => STOPPED_A::ENABLED, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { *self == STOPPED_A::DISABLED } #[doc = "Checks if the value of the field is `ENABLED`"] #[inline(always)] pub fn is_enabled(&self) -> bool { *self == STOPPED_A::ENABLED } } #[doc = "Write '1' to enable interrupt for STOPPED event\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum STOPPED_AW { #[doc = "1: Enable"] SET, } impl From<STOPPED_AW> for bool { #[inline(always)] fn from(variant: STOPPED_AW) -> Self { match variant { STOPPED_AW::SET => true, } } } #[doc = "Write proxy for field `STOPPED`"] pub struct STOPPED_W<'a> { w: &'a mut W, } impl<'a> STOPPED_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: STOPPED_AW) -> &'a mut W { { self.bit(variant.into()) } } #[doc = "Enable"] #[inline(always)] pub fn set(self) -> &'a mut W { self.variant(STOPPED_AW::SET) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | (((value as u32) & 0x01) << 4); self.w } } impl R { #[doc = "Bit 0 - Write '1' to enable interrupt for SAMPLERDY event"] #[inline(always)] pub fn samplerdy(&self) -> SAMPLERDY_R { SAMPLERDY_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Write '1' to enable interrupt for REPORTRDY event"] #[inline(always)] pub fn reportrdy(&self) -> REPORTRDY_R { REPORTRDY_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Write '1' to enable interrupt for ACCOF event"] #[inline(always)] pub fn accof(&self) -> ACCOF_R { ACCOF_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Write '1' to enable interrupt for DBLRDY event"] #[inline(always)] pub fn dblrdy(&self) -> DBLRDY_R { DBLRDY_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Write '1' to enable interrupt for STOPPED event"] #[inline(always)] pub fn stopped(&self) -> STOPPED_R { STOPPED_R::new(((self.bits >> 4) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Write '1' to enable interrupt for SAMPLERDY event"] #[inline(always)] pub fn samplerdy(&mut self) -> SAMPLERDY_W { SAMPLERDY_W { w: self } } #[doc = "Bit 1 - Write '1' to enable interrupt for REPORTRDY event"] #[inline(always)] pub fn reportrdy(&mut self) -> REPORTRDY_W { REPORTRDY_W { w: self } } #[doc = "Bit 2 - Write '1' to enable interrupt for ACCOF event"] #[inline(always)] pub fn accof(&mut self) -> ACCOF_W { ACCOF_W { w: self } } #[doc = "Bit 3 - Write '1' to enable interrupt for DBLRDY event"] #[inline(always)] pub fn dblrdy(&mut self) -> DBLRDY_W { DBLRDY_W { w: self } } #[doc = "Bit 4 - Write '1' to enable interrupt for STOPPED event"] #[inline(always)] pub fn stopped(&mut self) -> STOPPED_W { STOPPED_W { w: self } } }
28.296813
84
0.555438