hexsha
stringlengths
40
40
size
int64
4
1.05M
content
stringlengths
4
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
f53bf4551ea8619e2a1eaa5556ca12a547924740
737
use clap::ArgMatches; use failure::{Error, Fail}; use std::fs::{create_dir, File}; use std::path::Path; #[derive(Debug, Fail)] #[fail(display = "Project exists!")] struct ProjectExists; pub fn cmd_new(matches: &ArgMatches) -> Result<(), Error> { let name = matches.value_of("name").unwrap(); let path = Path::new(name); if path.exists() { return Err(Error::from(ProjectExists)); } create_project_dir(path) } fn create_project_dir(path: &Path) -> Result<(), Error> { create_dir(path)?; create_dir(path.join("contents"))?; create_dir(path.join("layouts"))?; create_dir(path.join("public"))?; create_dir(path.join("assets"))?; File::create(path.join("config.toml"))?; Ok(()) }
23.03125
59
0.63365
8afd558135f8af289e8f9737c7630a42625a436d
5,948
use crate::command::{Command, ExecutionContext}; use crate::utils::paths; use certificate::{KeyCertPair, NewCertificateConfig}; use std::{ fs::{File, OpenOptions}, io::prelude::*, path::Path, }; use tedge_config::*; use tedge_users; use super::error::CertError; /// Create a self-signed device certificate pub struct CreateCertCmd { /// The device identifier pub id: String, /// The path where the device certificate will be stored pub cert_path: FilePath, /// The path where the device private key will be stored pub key_path: FilePath, } impl Command for CreateCertCmd { fn description(&self) -> String { format!("create a test certificate for the device {}.", self.id) } fn execute(&self, context: &ExecutionContext) -> Result<(), anyhow::Error> { let config = NewCertificateConfig::default(); let () = self.create_test_certificate(&config, &context.user_manager)?; Ok(()) } } impl CreateCertCmd { fn create_test_certificate( &self, config: &NewCertificateConfig, user_manager: &tedge_users::UserManager, ) -> Result<(), CertError> { let _user_guard = user_manager.become_user(tedge_users::BROKER_USER)?; paths::validate_parent_dir_exists(&self.cert_path).map_err(CertError::CertPathError)?; paths::validate_parent_dir_exists(&self.key_path).map_err(CertError::KeyPathError)?; let cert = KeyCertPair::new_selfsigned_certificate(&config, &self.id)?; // Creating files with permission 644 let mut cert_file = create_new_file(&self.cert_path) .map_err(|err| err.cert_context(self.cert_path.clone()))?; let mut key_file = create_new_file(&self.key_path) .map_err(|err| err.key_context(self.key_path.clone()))?; let cert_pem = cert.certificate_pem_string()?; cert_file.write_all(cert_pem.as_bytes())?; cert_file.sync_all()?; // Prevent the certificate to be overwritten paths::set_permission(&cert_file, 0o444)?; { // Make sure the key is secret, before write paths::set_permission(&key_file, 0o600)?; // Zero the private key on drop let cert_key = cert.private_key_pem_string()?; key_file.write_all(cert_key.as_bytes())?; key_file.sync_all()?; // Prevent the key to be overwritten paths::set_permission(&key_file, 0o400)?; } Ok(()) } } fn create_new_file(path: impl AsRef<Path>) -> Result<File, CertError> { Ok(OpenOptions::new().write(true).create_new(true).open(path)?) } #[cfg(test)] mod tests { use super::*; use assert_matches::assert_matches; use std::fs; use tedge_users::UserManager; use tempfile::*; #[test] fn basic_usage() { let dir = tempdir().unwrap(); let cert_path = temp_file_path(&dir, "my-device-cert.pem"); let key_path = temp_file_path(&dir, "my-device-key.pem"); let id = "my-device-id"; let cmd = CreateCertCmd { id: String::from(id), cert_path: cert_path.clone(), key_path: key_path.clone(), }; assert_matches!( cmd.create_test_certificate(&NewCertificateConfig::default(), &UserManager::new()), Ok(()) ); assert_eq!(parse_pem_file(&cert_path).unwrap().tag, "CERTIFICATE"); assert_eq!(parse_pem_file(&key_path).unwrap().tag, "PRIVATE KEY"); } #[test] fn check_certificate_is_not_overwritten() { let dir = tempdir().unwrap(); let cert_path = temp_file_path(&dir, "my-device-cert.pem"); let key_path = temp_file_path(&dir, "my-device-key.pem"); let cert_content = "some cert content"; let key_content = "some key content"; fs::write(&cert_path, cert_content).unwrap(); fs::write(&key_path, key_content).unwrap(); let cmd = CreateCertCmd { id: "my-device-id".into(), cert_path: cert_path.clone(), key_path: key_path.clone(), }; assert!(cmd .create_test_certificate(&NewCertificateConfig::default(), &UserManager::new()) .ok() .is_none()); assert_eq!(fs::read(&cert_path).unwrap(), cert_content.as_bytes()); assert_eq!(fs::read(&key_path).unwrap(), key_content.as_bytes()); } #[test] fn create_certificate_in_non_existent_directory() { let dir = tempdir().unwrap(); let key_path = temp_file_path(&dir, "my-device-key.pem"); let cert_path = FilePath::from("/non/existent/cert/path"); let cmd = CreateCertCmd { id: "my-device-id".into(), cert_path, key_path, }; let cert_error = cmd .create_test_certificate(&NewCertificateConfig::default(), &UserManager::new()) .unwrap_err(); assert_matches!(cert_error, CertError::CertPathError { .. }); } #[test] fn create_key_in_non_existent_directory() { let dir = tempdir().unwrap(); let cert_path = temp_file_path(&dir, "my-device-cert.pem"); let key_path = FilePath::from("/non/existent/key/path"); let cmd = CreateCertCmd { id: "my-device-id".into(), cert_path, key_path, }; let cert_error = cmd .create_test_certificate(&NewCertificateConfig::default(), &UserManager::new()) .unwrap_err(); assert_matches!(cert_error, CertError::KeyPathError { .. }); } fn temp_file_path(dir: &TempDir, filename: &str) -> FilePath { dir.path().join(filename).into() } fn parse_pem_file(path: impl AsRef<Path>) -> Result<pem::Pem, String> { let content = fs::read(path).map_err(|err| err.to_string())?; pem::parse(content).map_err(|err| err.to_string()) } }
31.807487
95
0.608104
759a710f940419f8763a5b9b753ff2298701255d
4,864
#[macro_export] macro_rules! nu { (cwd: $cwd:expr, $path:expr, $($part:expr),*) => {{ use $crate::fs::DisplayPath; let path = format!($path, $( $part.display_path() ),*); nu!($cwd, &path) }}; (cwd: $cwd:expr, $path:expr) => {{ nu!($cwd, $path) }}; ($cwd:expr, $path:expr) => {{ pub use std::error::Error; pub use std::io::prelude::*; pub use std::process::{Command, Stdio}; let commands = &*format!( " cd \"{}\" {} exit", $crate::fs::in_directory($cwd), $crate::fs::DisplayPath::display_path(&$path) ); let test_bins = $crate::fs::binaries(); let test_bins = dunce::canonicalize(&test_bins).unwrap_or_else(|e| { panic!( "Couldn't canonicalize dummy binaries path {}: {:?}", test_bins.display(), e ) }); let mut paths = $crate::shell_os_paths(); paths.insert(0, test_bins); let paths_joined = match std::env::join_paths(paths.iter()) { Ok(all) => all, Err(_) => panic!("Couldn't join paths for PATH var."), }; let mut process = match Command::new($crate::fs::executable_path()) .env("PATH", paths_joined) .arg("--skip-plugins") .arg("--no-history") .arg("--config-file") .arg($crate::fs::DisplayPath::display_path(&$crate::fs::fixtures().join("playground/config/default.toml"))) .stdout(Stdio::piped()) .stdin(Stdio::piped()) .stderr(Stdio::piped()) .spawn() { Ok(child) => child, Err(why) => panic!("Can't run test {}", why.to_string()), }; let stdin = process.stdin.as_mut().expect("couldn't open stdin"); stdin .write_all(commands.as_bytes()) .expect("couldn't write to stdin"); let output = process .wait_with_output() .expect("couldn't read from stdout/stderr"); let out = $crate::macros::read_std(&output.stdout); let err = String::from_utf8_lossy(&output.stderr); println!("=== stderr\n{}", err); $crate::Outcome::new(out,err.into_owned()) }}; } #[macro_export] macro_rules! nu_with_plugins { (cwd: $cwd:expr, $path:expr, $($part:expr),*) => {{ use $crate::fs::DisplayPath; let path = format!($path, $( $part.display_path() ),*); nu_with_plugins!($cwd, &path) }}; (cwd: $cwd:expr, $path:expr) => {{ nu_with_plugins!($cwd, $path) }}; ($cwd:expr, $path:expr) => {{ pub use std::error::Error; pub use std::io::prelude::*; pub use std::process::{Command, Stdio}; let commands = &*format!( " cd \"{}\" {} exit", $crate::fs::in_directory($cwd), $crate::fs::DisplayPath::display_path(&$path) ); let test_bins = $crate::fs::binaries(); let test_bins = dunce::canonicalize(&test_bins).unwrap_or_else(|e| { panic!( "Couldn't canonicalize dummy binaries path {}: {:?}", test_bins.display(), e ) }); let mut paths = $crate::shell_os_paths(); paths.insert(0, test_bins); let paths_joined = match std::env::join_paths(paths.iter()) { Ok(all) => all, Err(_) => panic!("Couldn't join paths for PATH var."), }; let mut process = match Command::new($crate::fs::executable_path()) .env("PATH", paths_joined) .stdout(Stdio::piped()) .stdin(Stdio::piped()) .stderr(Stdio::piped()) .spawn() { Ok(child) => child, Err(why) => panic!("Can't run test {}", why.to_string()), }; let stdin = process.stdin.as_mut().expect("couldn't open stdin"); stdin .write_all(commands.as_bytes()) .expect("couldn't write to stdin"); let output = process .wait_with_output() .expect("couldn't read from stdout/stderr"); let out = $crate::macros::read_std(&output.stdout); let err = String::from_utf8_lossy(&output.stderr); println!("=== stderr\n{}", err); $crate::Outcome::new(out,err.into_owned()) }}; } pub fn read_std(std: &[u8]) -> String { let out = String::from_utf8_lossy(std); let out = out.lines().collect::<Vec<_>>().join("\n"); let out = out.replace("\r\n", ""); out.replace("\n", "") }
29.840491
119
0.481497
bfe476f4bc2607fba022807ae7fc17cf3f4f93a4
10,167
use std::collections::BTreeMap; use geom::{Angle, Circle, Distance, Speed, Time}; use map_gui::render::DrawPedestrian; use map_model::{BuildingID, LaneID, OffstreetParking, Traversable, SIDEWALK_THICKNESS}; use sim::{DrawPedestrianInput, PedestrianID, PersonID, TripMode, TripResult, VehicleType}; use widgetry::{Color, EventCtx, Line, Text, TextExt, Widget}; use crate::app::App; use crate::info::{header_btns, make_table, make_tabs, Details, Tab}; pub fn info(ctx: &mut EventCtx, app: &App, details: &mut Details, id: BuildingID) -> Widget { Widget::custom_col(vec![ header(ctx, app, details, id, Tab::BldgInfo(id)), info_body(ctx, app, details, id).tab_body(ctx), ]) } fn info_body(ctx: &mut EventCtx, app: &App, details: &mut Details, id: BuildingID) -> Widget { let mut rows = vec![]; let b = app.primary.map.get_b(id); let mut kv = Vec::new(); kv.push(("Address", b.address.clone())); if let Some(ref names) = b.name { kv.push(("Name", names.get(app.opts.language.as_ref()).to_string())); } if app.opts.dev { kv.push(("OSM ID", format!("{}", b.orig_id.inner()))); } let num_spots = b.num_parking_spots(); if app.primary.sim.infinite_parking() { kv.push(( "Parking", format!( "Unlimited, currently {} cars inside", app.primary.sim.bldg_to_parked_cars(b.id).len() ), )); } else if num_spots > 0 { let free = app.primary.sim.get_free_offstreet_spots(b.id).len(); if let OffstreetParking::PublicGarage(ref n, _) = b.parking { kv.push(( "Parking", format!("{} / {} public spots available via {}", free, num_spots, n), )); } else { kv.push(( "Parking", format!("{} / {} private spots available", free, num_spots), )); } } else { kv.push(("Parking", "None".to_string())); } rows.extend(make_table(ctx, kv)); let mut txt = Text::new(); if !b.amenities.is_empty() { txt.add_line(""); if b.amenities.len() == 1 { txt.add_line("1 amenity:"); } else { txt.add_line(format!("{} amenities:", b.amenities.len())); } for a in &b.amenities { txt.add_line(format!( " {} ({})", a.names.get(app.opts.language.as_ref()), a.amenity_type )); } } if !app.primary.sim.infinite_parking() { txt.add_line(""); if let Some(pl) = app .primary .sim .walking_path_to_nearest_parking_spot(&app.primary.map, id) .and_then(|path| path.trace(&app.primary.map)) { let color = app.cs.parking_trip; // TODO But this color doesn't show up well against the info panel... txt.add_line(Line("Nearest parking").fg(color)); txt.append(Line(format!( " is ~{} away by foot", pl.length() / Speed::miles_per_hour(3.0) ))); details .unzoomed .push(color, pl.make_polygons(Distance::meters(10.0))); details.zoomed.extend( color, pl.dashed_lines( Distance::meters(0.75), Distance::meters(1.0), Distance::meters(0.4), ), ); } else { txt.add_line("No nearby parking available") } } if !txt.is_empty() { rows.push(txt.into_widget(ctx)) } if app.opts.dev { rows.push( ctx.style() .btn_outline .text("Open OSM") .build_widget(ctx, format!("open {}", b.orig_id)), ); if !b.osm_tags.is_empty() { rows.push("Raw OpenStreetMap data".text_widget(ctx)); rows.extend(make_table( ctx, b.osm_tags .inner() .iter() .map(|(k, v)| (k, v.to_string())) .collect(), )); } } Widget::col(rows) } pub fn people(ctx: &mut EventCtx, app: &App, details: &mut Details, id: BuildingID) -> Widget { Widget::custom_col(vec![ header(ctx, app, details, id, Tab::BldgPeople(id)), people_body(ctx, app, details, id).tab_body(ctx), ]) } fn people_body(ctx: &mut EventCtx, app: &App, details: &mut Details, id: BuildingID) -> Widget { let mut rows = vec![]; // Two caveats about these counts: // 1) A person might use multiple modes through the day, but this just picks a single category. // 2) Only people currently in the building currently are counted, whether or not that's their // home. let mut drivers = 0; let mut cyclists = 0; let mut others = 0; let mut ppl: Vec<(Time, Widget)> = Vec::new(); for p in app.primary.sim.bldg_to_people(id) { let person = app.primary.sim.get_person(p); let mut has_car = false; let mut has_bike = false; for vehicle in &person.vehicles { if vehicle.vehicle_type == VehicleType::Car { has_car = true; } else if vehicle.vehicle_type == VehicleType::Bike { has_bike = true; } } if has_car { drivers += 1; } else if has_bike { cyclists += 1; } else { others += 1; } let mut next_trip: Option<(Time, TripMode)> = None; for t in &person.trips { match app.primary.sim.trip_to_agent(*t) { TripResult::TripNotStarted => { let trip = app.primary.sim.trip_info(*t); next_trip = Some((trip.departure, trip.mode)); break; } TripResult::Ok(_) | TripResult::ModeChange => { // TODO What to do here? This is meant for building callers right now break; } TripResult::TripDone | TripResult::TripCancelled => {} TripResult::TripDoesntExist => unreachable!(), } } details .hyperlinks .insert(p.to_string(), Tab::PersonTrips(p, BTreeMap::new())); let widget = Widget::row(vec![ ctx.style().btn_outline.text(p.to_string()).build_def(ctx), if let Some((t, mode)) = next_trip { format!( "Leaving in {} to {}", t - app.primary.sim.time(), mode.verb() ) .text_widget(ctx) } else { "Staying inside".text_widget(ctx) }, ]); ppl.push(( next_trip .map(|(t, _)| t) .unwrap_or(app.primary.sim.get_end_of_day()), widget, )); } // Sort by time to next trip ppl.sort_by_key(|(t, _)| *t); if ppl.is_empty() { rows.push("Nobody's inside right now".text_widget(ctx)); } else { rows.push( format!( "{} drivers, {} cyclists, {} others", drivers, cyclists, others ) .text_widget(ctx), ); for (_, w) in ppl { rows.push(w); } } Widget::col(rows) } fn header(ctx: &EventCtx, app: &App, details: &mut Details, id: BuildingID, tab: Tab) -> Widget { let mut rows = vec![]; rows.push(Widget::row(vec![ Line(id.to_string()).small_heading().into_widget(ctx), header_btns(ctx), ])); rows.push(make_tabs( ctx, &mut details.hyperlinks, tab, vec![("Info", Tab::BldgInfo(id)), ("People", Tab::BldgPeople(id))], )); draw_occupants(details, app, id, None); // TODO Draw cars parked inside? Widget::custom_col(rows) } pub fn draw_occupants(details: &mut Details, app: &App, id: BuildingID, focus: Option<PersonID>) { // TODO Lots of fun ideas here. Have a deterministic simulation based on building ID and time // to have people "realistically" move around. Draw little floor plans. let mut ppl = app.primary.sim.bldg_to_people(id); let num_rows_cols = (ppl.len() as f64).sqrt().ceil() as usize; let ped_len = SIDEWALK_THICKNESS.inner_meters() / 2.0; let separation = ped_len * 1.5; let total_width_height = (num_rows_cols as f64) * (ped_len + separation); let top_left = app .primary .map .get_b(id) .label_center .offset(-total_width_height / 2.0, -total_width_height / 2.0); // TODO Current thing is inefficient and can easily wind up outside the building. 'OUTER: for x in 0..num_rows_cols { for y in 0..num_rows_cols { let person = if let Some(p) = ppl.pop() { p } else { break 'OUTER; }; let pos = top_left.offset( (x as f64) * (ped_len + separation), (y as f64) * (ped_len + separation), ); if Some(person) == focus { details.zoomed.push( Color::YELLOW.alpha(0.8), Circle::new(pos, SIDEWALK_THICKNESS).to_polygon(), ); } DrawPedestrian::geometry( &mut details.zoomed, &app.primary.sim, &app.cs, &DrawPedestrianInput { // Lies id: PedestrianID(person.0), person, pos, facing: Angle::degrees(90.0), waiting_for_turn: None, preparing_bike: false, // Both hands and feet! waiting_for_bus: true, on: Traversable::Lane(LaneID(0)), }, 0, ); } } }
31.574534
99
0.501525
dee846672b31cb9aabd845f494deef249002bf59
2,457
use crate::api::OdooConnection; use crate::error::{Error, E_INV_CRED, E_INV_RESP}; use xmlrpc::{Request, Value}; #[derive(Debug)] pub struct InvoiceData { odoo_connection: OdooConnection, pub state: String, } pub trait Invoice { fn selection(&mut self) -> Result<(), Error>; } impl Invoice for InvoiceData { /// Query invoice fn selection(&mut self) -> Result<(), Error> { let request_object: String = format!("{}/xmlrpc/2/object", self.odoo_connection.connection.url); // Read key let mut vec_select: Vec<Value> = Vec::new(); vec_select.push(Value::String("name".to_string())); vec_select.push(Value::String("date".to_string())); vec_select.push(Value::String("invoice_partner_display_name".to_string())); vec_select.push(Value::String("state".to_string())); let mut vec_read1: Vec<Value> = Vec::new(); let mut vec_read2: Vec<Value> = Vec::new(); let mut vec_read3: Vec<Value> = Vec::new(); if self.state == "sent".to_string() { vec_read3.push(Value::String("state".to_string())); vec_read3.push(Value::String("=".to_string())); vec_read3.push(Value::String("sent".to_string())); // sent posted and ??? } else { vec_read3.push(Value::String("state".to_string())); vec_read3.push(Value::String("!=".to_string())); vec_read3.push(Value::String("sent".to_string())); } vec_read2.push(Value::Array(vec_read3)); vec_read1.push(Value::Array(vec_read2)); let read = Request::new("execute_kw") .arg(self.odoo_connection.connection.db.as_str()) .arg(self.odoo_connection.uid.ok_or(E_INV_CRED)?) .arg(self.odoo_connection.connection.password.as_str()) .arg("account.move") .arg("search_read") .arg(Value::Array(vec_read1)) .arg(Value::Struct( vec![("fields".to_string(), Value::Array(vec_select))] .into_iter() .collect(), )) .call_url(request_object.as_str())?; let arr = read.as_array().ok_or(E_INV_RESP)?; println!("{:?}", arr); Ok(()) } } impl InvoiceData { pub fn new( odoo_connection: OdooConnection, state: String, ) -> Self { Self { odoo_connection, state, } } }
35.608696
85
0.569394
69627da5a6c4f7eb6aa71ad4dd9a05e0ea5606fc
26,829
//! 9P protocol data types and constants. //! //! # Protocol //! 9P2000.L extern crate nix; use std::fs; use std::mem::{size_of, size_of_val}; use std::os::unix::fs::MetadataExt; /// 9P2000 version string pub const P92000: &'static str = "9P2000"; /// 9P2000.L version string pub const P92000L: &'static str = "9P2000.L"; // 9P magic numbers /// Special tag which `Tversion`/`Rversion` must use as `tag` pub const NOTAG: u16 = !0; /// Special value which `Tattach` with no auth must use as `afid` /// /// If the client does not wish to authenticate the connection, or knows that authentication is /// not required, the afid field in the attach message should be set to `NOFID` pub const NOFID: u32 = !0; /// Special uid which `Tauth`/`Tattach` use as `n_uname` to indicate no uid is specified pub const NONUNAME: u32 = !0; /// Ample room for `Twrite`/`Rread` header /// /// size[4] Tread/Twrite[2] tag[2] fid[4] offset[8] count[4] pub const IOHDRSZ: u32 = 24; /// Room for readdir header pub const READDIRHDRSZ: u32 = 24; /// v9fs default port pub const V9FS_PORT: u16 = 564; /// Old 9P2000 protocol types /// /// Types in this module are not used 9P2000.L pub mod p92000 { /// The type of I/O /// /// Open mode to be checked against the permissions for the file. pub mod om { /// Open for read pub const READ: u8 = 0; /// Write pub const WRITE: u8 = 1; /// Read and write pub const RDWR: u8 = 2; /// Execute, == read but check execute permission pub const EXEC: u8 = 3; /// Or'ed in (except for exec), truncate file first pub const TRUNC: u8 = 16; /// Or'ed in, close on exec pub const CEXEC: u8 = 32; /// Or'ed in, remove on close pub const RCLOSE: u8 = 64; } /// Bits in Stat.mode pub mod dm { /// Mode bit for directories pub const DIR: u32 = 0x80000000; /// Mode bit for append only files pub const APPEND: u32 = 0x40000000; /// Mode bit for exclusive use files pub const EXCL: u32 = 0x20000000; /// Mode bit for mounted channel pub const MOUNT: u32 = 0x10000000; /// Mode bit for authentication file pub const AUTH: u32 = 0x08000000; /// Mode bit for non-backed-up files pub const TMP: u32 = 0x04000000; /// Mode bit for read permission pub const READ: u32 = 0x4; /// Mode bit for write permission pub const WRITE: u32 = 0x2; /// Mode bit for execute permission pub const EXEC: u32 = 0x1; } /// Plan 9 Namespace metadata (somewhat like a unix fstat) /// /// NOTE: Defined as `Dir` in libc.h of Plan 9 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Stat { /// Server type pub typ: u16, /// Server subtype pub dev: u32, /// Unique id from server pub qid: super::Qid, /// Permissions pub mode: u32, /// Last read time pub atime: u32, /// Last write time pub mtime: u32, /// File length pub length: u64, /// Last element of path pub name: String, /// Owner name pub uid: String, /// Group name pub gid: String, /// Last modifier name pub muid: String, } impl Stat { /// Get the current size of the stat pub fn size(&self) -> u16 { use std::mem::{size_of, size_of_val}; (size_of_val(&self.typ) + size_of_val(&self.dev) + size_of_val(&self.qid) + size_of_val(&self.mode) + size_of_val(&self.atime) + size_of_val(&self.mtime) + size_of_val(&self.length) + (size_of::<u16>() * 4) + self.name.len() + self.uid.len() + self.gid.len() + self.muid.len()) as u16 } } } // pub mod p92000 /// File lock type, Flock.typ pub mod ltype { bitflags! { pub struct LockType: u8 { const RDLOCK = 0; const WRLOCK = 1; const UNLOCK = 2; } } } pub use self::ltype::LockType; /// File lock flags, Flock.flags pub mod lflag { bitflags! { pub struct LockFlag: u32 { #[doc = "Blocking request"] const BLOCK = 1; #[doc = "Reserved for future use"] const RECLAIM = 2; } } } pub use self::lflag::LockFlag; /// File lock status pub mod lstatus { bitflags! { pub struct LockStatus: u8 { const SUCCESS = 0; const BLOCKED = 1; const ERROR = 2; const GRACE = 3; } } } pub use self::lstatus::LockStatus; /// Bits in Qid.typ /// /// QidType can be constructed from std::fs::FileType via From trait /// /// # Protocol /// 9P2000/9P2000.L pub mod qt { bitflags! { pub struct QidType: u8 { #[doc = "Type bit for directories"] const DIR = 0x80; #[doc = "Type bit for append only files"] const APPEND = 0x40; #[doc = "Type bit for exclusive use files"] const EXCL = 0x20; #[doc = "Type bit for mounted channel"] const MOUNT = 0x10; #[doc = "Type bit for authentication file"] const AUTH = 0x08; #[doc = "Type bit for not-backed-up file"] const TMP = 0x04; #[doc = "Type bits for symbolic links (9P2000.u)"] const SYMLINK = 0x02; #[doc = "Type bits for hard-link (9P2000.u)"] const LINK = 0x01; #[doc = "Plain file"] const FILE = 0x00; } } impl From<::std::fs::FileType> for QidType { fn from(typ: ::std::fs::FileType) -> Self { From::from(&typ) } } impl<'a> From<&'a ::std::fs::FileType> for QidType { fn from(typ: &'a ::std::fs::FileType) -> Self { let mut qid_type = FILE; if typ.is_dir() { qid_type.insert(DIR) } if typ.is_symlink() { qid_type.insert(SYMLINK) } qid_type } } } pub use self::qt::QidType; /// Bits in `mask` and `valid` of `Tgetattr` and `Rgetattr`. /// /// # Protocol /// 9P2000.L pub mod getattr { bitflags! { pub struct GetattrMask: u64 { const MODE = 0x00000001; const NLINK = 0x00000002; const UID = 0x00000004; const GID = 0x00000008; const RDEV = 0x00000010; const ATIME = 0x00000020; const MTIME = 0x00000040; const CTIME = 0x00000080; const INO = 0x00000100; const SIZE = 0x00000200; const BLOCKS = 0x00000400; const BTIME = 0x00000800; const GEN = 0x00001000; const DATA_VERSION = 0x00002000; #[doc = "Mask for fields up to BLOCKS"] const BASIC =0x000007ff; #[doc = "Mask for All fields above"] const ALL = 0x00003fff; } } } pub use self::getattr::GetattrMask; /// Bits in `mask` of `Tsetattr`. /// /// If a time bit is set without the corresponding SET bit, the current /// system time on the server is used instead of the value sent in the request. /// /// # Protocol /// 9P2000.L pub mod setattr { bitflags! { pub struct SetattrMask: u32 { const MODE = 0x00000001; const UID = 0x00000002; const GID = 0x00000004; const SIZE = 0x00000008; const ATIME = 0x00000010; const MTIME = 0x00000020; const CTIME = 0x00000040; const ATIME_SET = 0x00000080; const MTIME_SET = 0x00000100; } } } pub use self::setattr::SetattrMask; /// Server side data type for path tracking /// /// The server's unique identification for the file being accessed /// /// # Protocol /// 9P2000/9P2000.L #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Qid { /// Specify whether the file is a directory, append-only file, etc. pub typ: QidType, /// Version number for a file; typically, it is incremented every time the file is modified pub version: u32, /// An integer which is unique among all files in the hierarchy pub path: u64, } /// Filesystem information corresponding to `struct statfs` of Linux. /// /// # Protocol /// 9P2000.L #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Statfs { /// Type of file system pub typ: u32, /// Optimal transfer block size pub bsize: u32, /// Total data blocks in file system pub blocks: u64, /// Free blocks in fs pub bfree: u64, /// Free blocks avail to non-superuser pub bavail: u64, /// Total file nodes in file system pub files: u64, /// Free file nodes in fs pub ffree: u64, /// Filesystem ID pub fsid: u64, /// Maximum length of filenames pub namelen: u32, } impl From<nix::sys::statvfs::vfs::Statvfs> for Statfs { fn from(buf: nix::sys::statvfs::vfs::Statvfs) -> Statfs { Statfs { typ: 0, bsize: buf.f_bsize as u32, blocks: buf.f_blocks, bfree: buf.f_bfree, bavail: buf.f_bavail, files: buf.f_files, ffree: buf.f_ffree, fsid: buf.f_fsid as u64, namelen: buf.f_namemax as u32, } } } /// Time struct /// /// # Protocol /// 9P2000.L #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Time { pub sec: u64, pub nsec: u64, } /// File attributes corresponding to `struct stat` of Linux. /// /// Stat can be constructed from `std::fs::Metadata` via From trait /// /// # Protocol /// 9P2000.L #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Stat { /// Protection pub mode: u32, /// User ID of owner pub uid: u32, /// Group ID of owner pub gid: u32, /// Number of hard links pub nlink: u64, /// Device ID (if special file) pub rdev: u64, /// Total size, in bytes pub size: u64, /// Blocksize for file system I/O pub blksize: u64, /// Number of 512B blocks allocated pub blocks: u64, /// Time of last access pub atime: Time, /// Time of last modification pub mtime: Time, /// Time of last status change pub ctime: Time, } impl From<fs::Metadata> for Stat { fn from(attr: fs::Metadata) -> Self { From::from(&attr) } } // Default conversion from metadata of libstd impl<'a> From<&'a fs::Metadata> for Stat { fn from(attr: &'a fs::Metadata) -> Self { Stat { mode: attr.mode(), uid: attr.uid(), gid: attr.gid(), nlink: attr.nlink(), rdev: attr.rdev(), size: attr.size() as u64, blksize: attr.blksize() as u64, blocks: attr.blocks() as u64, atime: Time { sec: attr.atime() as u64, nsec: attr.atime_nsec() as u64, }, mtime: Time { sec: attr.mtime() as u64, nsec: attr.mtime_nsec() as u64, }, ctime: Time { sec: attr.ctime() as u64, nsec: attr.ctime_nsec() as u64, }, } } } /// Subset of `Stat` used for `Tsetattr` #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct SetAttr { pub mode: u32, pub uid: u32, pub gid: u32, pub size: u64, pub atime: Time, pub mtime: Time, } /// Directory entry used in `Rreaddir` /// /// # Protocol /// 9P2000.L #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DirEntry { /// Qid for this directory pub qid: Qid, /// The index of this entry pub offset: u64, /// Corresponds to `d_type` of `struct dirent` /// /// Use `0` if you can't set this properly. It might be enough. pub typ: u8, /// Directory name pub name: String, } impl DirEntry { pub fn size(&self) -> u32 { (size_of_val(&self.qid) + size_of_val(&self.offset) + size_of_val(&self.typ) + size_of::<u16>() + self.name.len()) as u32 } } /// Directory entry array #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct DirEntryData { pub data: Vec<DirEntry>, } impl DirEntryData { pub fn new() -> DirEntryData { Self::with(Vec::new()) } pub fn with(v: Vec<DirEntry>) -> DirEntryData { DirEntryData { data: v } } pub fn data(&self) -> &[DirEntry] { &self.data } pub fn size(&self) -> u32 { self.data.iter().fold(0, |a, e| a + e.size()) as u32 } pub fn push(&mut self, entry: DirEntry) { self.data.push(entry); } } /// Data type used in `Rread` and `Twrite` /// /// # Protocol /// 9P2000/9P2000.L #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Data(pub Vec<u8>); /// Similar to Linux `struct flock` /// /// # Protocol /// 9P2000.L #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Flock { pub typ: LockType, pub flags: LockFlag, pub start: u64, pub length: u64, pub proc_id: u32, pub client_id: String, } /// Getlock structure /// /// # Protocol /// 9P2000.L #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Getlock { pub typ: LockType, pub start: u64, pub length: u64, pub proc_id: u32, pub client_id: String, } // Commented out the types not used in 9P2000.L enum_from_primitive! { #[doc = "Message type, 9P operations"] #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum MsgType { // 9P2000.L Tlerror = 6, // Illegal, never used Rlerror, Tstatfs = 8, Rstatfs, Tlopen = 12, Rlopen, Tlcreate = 14, Rlcreate, Tsymlink = 16, Rsymlink, Tmknod = 18, Rmknod, Trename = 20, Rrename, Treadlink = 22, Rreadlink, Tgetattr = 24, Rgetattr, Tsetattr = 26, Rsetattr, Txattrwalk = 30, Rxattrwalk, Txattrcreate = 32, Rxattrcreate, Treaddir = 40, Rreaddir, Tfsync = 50, Rfsync, Tlock = 52, Rlock, Tgetlock = 54, Rgetlock, Tlink = 70, Rlink, Tmkdir = 72, Rmkdir, Trenameat = 74, Rrenameat, Tunlinkat = 76, Runlinkat, // 9P2000 Tversion = 100, Rversion, Tauth = 102, Rauth, Tattach = 104, Rattach, //Terror = 106, // Illegal, never used //Rerror, Tflush = 108, Rflush, Twalk = 110, Rwalk, //Topen = 112, //Ropen, //Tcreate = 114, //Rcreate, Tread = 116, Rread, Twrite = 118, Rwrite, Tclunk = 120, Rclunk, Tremove = 122, Rremove, //Tstat = 124, //Rstat, //Twstat = 126, //Rwstat, } } impl MsgType { /// If the message type is T-message pub fn is_t(&self) -> bool { !self.is_r() } /// If the message type is R-message pub fn is_r(&self) -> bool { use MsgType::*; match *self { Rlerror | Rstatfs | Rlopen | Rlcreate | Rsymlink | Rmknod | Rrename | Rreadlink | Rgetattr | Rsetattr | Rxattrwalk | Rxattrcreate | Rreaddir | Rfsync | Rlock | Rgetlock | Rlink | Rmkdir | Rrenameat | Runlinkat | Rversion | Rauth | Rattach | Rflush | Rwalk | Rread | Rwrite | Rclunk | Rremove => true, _ => false, } } } impl<'a> From<&'a Fcall> for MsgType { fn from(fcall: &'a Fcall) -> MsgType { match *fcall { Fcall::Rlerror { .. } => MsgType::Rlerror, Fcall::Tstatfs { .. } => MsgType::Tstatfs, Fcall::Rstatfs { .. } => MsgType::Rstatfs, Fcall::Tlopen { .. } => MsgType::Tlopen, Fcall::Rlopen { .. } => MsgType::Rlopen, Fcall::Tlcreate { .. } => MsgType::Tlcreate, Fcall::Rlcreate { .. } => MsgType::Rlcreate, Fcall::Tsymlink { .. } => MsgType::Tsymlink, Fcall::Rsymlink { .. } => MsgType::Rsymlink, Fcall::Tmknod { .. } => MsgType::Tmknod, Fcall::Rmknod { .. } => MsgType::Rmknod, Fcall::Trename { .. } => MsgType::Trename, Fcall::Rrename => MsgType::Rrename, Fcall::Treadlink { .. } => MsgType::Treadlink, Fcall::Rreadlink { .. } => MsgType::Rreadlink, Fcall::Tgetattr { .. } => MsgType::Tgetattr, Fcall::Rgetattr { .. } => MsgType::Rgetattr, Fcall::Tsetattr { .. } => MsgType::Tsetattr, Fcall::Rsetattr => MsgType::Rsetattr, Fcall::Txattrwalk { .. } => MsgType::Txattrwalk, Fcall::Rxattrwalk { .. } => MsgType::Rxattrwalk, Fcall::Txattrcreate { .. } => MsgType::Txattrcreate, Fcall::Rxattrcreate => MsgType::Rxattrcreate, Fcall::Treaddir { .. } => MsgType::Treaddir, Fcall::Rreaddir { .. } => MsgType::Rreaddir, Fcall::Tfsync { .. } => MsgType::Tfsync, Fcall::Rfsync => MsgType::Rfsync, Fcall::Tlock { .. } => MsgType::Tlock, Fcall::Rlock { .. } => MsgType::Rlock, Fcall::Tgetlock { .. } => MsgType::Tgetlock, Fcall::Rgetlock { .. } => MsgType::Rgetlock, Fcall::Tlink { .. } => MsgType::Tlink, Fcall::Rlink => MsgType::Rlink, Fcall::Tmkdir { .. } => MsgType::Tmkdir, Fcall::Rmkdir { .. } => MsgType::Rmkdir, Fcall::Trenameat { .. } => MsgType::Trenameat, Fcall::Rrenameat => MsgType::Rrenameat, Fcall::Tunlinkat { .. } => MsgType::Tunlinkat, Fcall::Runlinkat => MsgType::Runlinkat, Fcall::Tauth { .. } => MsgType::Tauth, Fcall::Rauth { .. } => MsgType::Rauth, Fcall::Tattach { .. } => MsgType::Tattach, Fcall::Rattach { .. } => MsgType::Rattach, Fcall::Tversion { .. } => MsgType::Tversion, Fcall::Rversion { .. } => MsgType::Rversion, Fcall::Tflush { .. } => MsgType::Tflush, Fcall::Rflush => MsgType::Rflush, Fcall::Twalk { .. } => MsgType::Twalk, Fcall::Rwalk { .. } => MsgType::Rwalk, Fcall::Tread { .. } => MsgType::Tread, Fcall::Rread { .. } => MsgType::Rread, Fcall::Twrite { .. } => MsgType::Twrite, Fcall::Rwrite { .. } => MsgType::Rwrite, Fcall::Tclunk { .. } => MsgType::Tclunk, Fcall::Rclunk => MsgType::Rclunk, Fcall::Tremove { .. } => MsgType::Tremove, Fcall::Rremove => MsgType::Rremove, } } } /// A data type encapsulating the various 9P messages #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Fcall { // 9P2000.L Rlerror { ecode: u32, }, Tstatfs { fid: u32, }, Rstatfs { statfs: Statfs, }, Tlopen { fid: u32, flags: u32, }, Rlopen { qid: Qid, iounit: u32, }, Tlcreate { fid: u32, name: String, flags: u32, mode: u32, gid: u32, }, Rlcreate { qid: Qid, iounit: u32, }, Tsymlink { fid: u32, name: String, symtgt: String, gid: u32, }, Rsymlink { qid: Qid, }, Tmknod { dfid: u32, name: String, mode: u32, major: u32, minor: u32, gid: u32, }, Rmknod { qid: Qid, }, Trename { fid: u32, dfid: u32, name: String, }, Rrename, Treadlink { fid: u32, }, Rreadlink { target: String, }, Tgetattr { fid: u32, req_mask: GetattrMask, }, /// Reserved members specified in the protocol are handled in Encodable/Decodable traits. Rgetattr { valid: GetattrMask, qid: Qid, stat: Stat, }, Tsetattr { fid: u32, valid: SetattrMask, stat: SetAttr, }, Rsetattr, Txattrwalk { fid: u32, newfid: u32, name: String, }, Rxattrwalk { size: u64, }, Txattrcreate { fid: u32, name: String, attr_size: u64, flags: u32, }, Rxattrcreate, Treaddir { fid: u32, offset: u64, count: u32, }, Rreaddir { data: DirEntryData, }, Tfsync { fid: u32, }, Rfsync, Tlock { fid: u32, flock: Flock, }, Rlock { status: LockStatus, }, Tgetlock { fid: u32, flock: Getlock, }, Rgetlock { flock: Getlock, }, Tlink { dfid: u32, fid: u32, name: String, }, Rlink, Tmkdir { dfid: u32, name: String, mode: u32, gid: u32, }, Rmkdir { qid: Qid, }, Trenameat { olddirfid: u32, oldname: String, newdirfid: u32, newname: String, }, Rrenameat, Tunlinkat { dirfd: u32, name: String, flags: u32, }, Runlinkat, // 9P2000.u Tauth { afid: u32, uname: String, aname: String, n_uname: u32, }, Rauth { aqid: Qid, }, Tattach { fid: u32, afid: u32, uname: String, aname: String, n_uname: u32, }, Rattach { qid: Qid, }, // 9P2000 Tversion { msize: u32, version: String, }, Rversion { msize: u32, version: String, }, Tflush { oldtag: u16, }, Rflush, Twalk { fid: u32, newfid: u32, wnames: Vec<String>, }, Rwalk { wqids: Vec<Qid>, }, Tread { fid: u32, offset: u64, count: u32, }, Rread { data: Data, }, Twrite { fid: u32, offset: u64, data: Data, }, Rwrite { count: u32, }, Tclunk { fid: u32, }, Rclunk, Tremove { fid: u32, }, Rremove, // 9P2000 operations not used for 9P2000.L //Tauth { afid: u32, uname: String, aname: String }, //Rauth { aqid: Qid }, //Rerror { ename: String }, //Tattach { fid: u32, afid: u32, uname: String, aname: String }, //Rattach { qid: Qid }, //Topen { fid: u32, mode: u8 }, //Ropen { qid: Qid, iounit: u32 }, //Tcreate { fid: u32, name: String, perm: u32, mode: u8 }, //Rcreate { qid: Qid, iounit: u32 }, //Tstat { fid: u32 }, //Rstat { stat: Stat }, //Twstat { fid: u32, stat: Stat }, //Rwstat, } impl Fcall { /// Get the fids which self contains pub fn fids(&self) -> Vec<u32> { match *self { Fcall::Tstatfs { fid } => vec![fid], Fcall::Tlopen { fid, .. } => vec![fid], Fcall::Tlcreate { fid, .. } => vec![fid], Fcall::Tsymlink { fid, .. } => vec![fid], Fcall::Tmknod { dfid, .. } => vec![dfid], Fcall::Trename { fid, dfid, .. } => vec![fid, dfid], Fcall::Treadlink { fid } => vec![fid], Fcall::Tgetattr { fid, .. } => vec![fid], Fcall::Tsetattr { fid, .. } => vec![fid], Fcall::Txattrwalk { fid, .. } => vec![fid], Fcall::Txattrcreate { fid, .. } => vec![fid], Fcall::Treaddir { fid, .. } => vec![fid], Fcall::Tfsync { fid, .. } => vec![fid], Fcall::Tlock { fid, .. } => vec![fid], Fcall::Tgetlock { fid, .. } => vec![fid], Fcall::Tlink { dfid, fid, .. } => vec![dfid, fid], Fcall::Tmkdir { dfid, .. } => vec![dfid], Fcall::Trenameat { olddirfid, newdirfid, .. } => vec![olddirfid, newdirfid], Fcall::Tunlinkat { dirfd, .. } => vec![dirfd], Fcall::Tattach { afid, .. } if afid != NOFID => vec![afid], Fcall::Twalk { fid, .. } => vec![fid], Fcall::Tread { fid, .. } => vec![fid], Fcall::Twrite { fid, .. } => vec![fid], Fcall::Tclunk { fid, .. } => vec![fid], Fcall::Tremove { fid } => vec![fid], _ => Vec::new(), } } /// Get the newfids which self contains pub fn newfids(&self) -> Vec<u32> { match *self { Fcall::Txattrwalk { newfid, .. } => vec![newfid], Fcall::Tauth { afid, .. } => vec![afid], Fcall::Tattach { fid, .. } => vec![fid], Fcall::Twalk { newfid, .. } => vec![newfid], _ => Vec::new(), } } /// Get the qids which self contains pub fn qids(&self) -> Vec<Qid> { match *self { Fcall::Rlopen { qid, .. } => vec![qid], Fcall::Rlcreate { qid, .. } => vec![qid], Fcall::Rsymlink { qid } => vec![qid], Fcall::Rmknod { qid } => vec![qid], Fcall::Rgetattr { qid, .. } => vec![qid], Fcall::Rmkdir { qid } => vec![qid], Fcall::Rauth { aqid } => vec![aqid], Fcall::Rattach { qid } => vec![qid], Fcall::Rwalk { ref wqids } => wqids.clone(), _ => Vec::new(), } } } /// Envelope for 9P messages #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Msg { /// Chosen and used by the client to identify the message. /// The reply to the message will have the same tag pub tag: u16, /// Message body encapsulating the various 9P messages pub body: Fcall, }
27.432515
95
0.498789
e5b112d62f0029620ac8fa7819a27e7e723a3c85
2,459
//! The `system_transaction` module provides functionality for creating system transactions. use crate::{ hash::Hash, pubkey::Pubkey, signature::{Keypair, KeypairUtil}, system_instruction, transaction::Transaction, }; /// Create and sign new SystemInstruction::CreateAccount transaction pub fn create_account( from_keypair: &Keypair, to_keypair: &Keypair, recent_blockhash: Hash, lamports: u64, space: u64, program_id: &Pubkey, ) -> Transaction { let from_pubkey = from_keypair.pubkey(); let to_pubkey = to_keypair.pubkey(); let create_instruction = system_instruction::create_account(&from_pubkey, &to_pubkey, lamports, space, program_id); let instructions = vec![create_instruction]; Transaction::new_signed_instructions( &[from_keypair, to_keypair], instructions, recent_blockhash, ) } /// Create and sign new system_instruction::Assign transaction pub fn assign(from_keypair: &Keypair, recent_blockhash: Hash, program_id: &Pubkey) -> Transaction { let from_pubkey = from_keypair.pubkey(); let assign_instruction = system_instruction::assign(&from_pubkey, program_id); let instructions = vec![assign_instruction]; Transaction::new_signed_instructions(&[from_keypair], instructions, recent_blockhash) } /// Create and sign new system_instruction::Transfer transaction pub fn transfer( from_keypair: &Keypair, to: &Pubkey, lamports: u64, recent_blockhash: Hash, ) -> Transaction { let from_pubkey = from_keypair.pubkey(); let transfer_instruction = system_instruction::transfer(&from_pubkey, to, lamports); let instructions = vec![transfer_instruction]; Transaction::new_signed_instructions(&[from_keypair], instructions, recent_blockhash) } /// Create and sign new nonced system_instruction::Transfer transaction pub fn nonced_transfer( from_keypair: &Keypair, to: &Pubkey, lamports: u64, nonce_account: &Pubkey, nonce_authority: &Keypair, nonce_hash: Hash, ) -> Transaction { let from_pubkey = from_keypair.pubkey(); let transfer_instruction = system_instruction::transfer(&from_pubkey, to, lamports); let instructions = vec![transfer_instruction]; Transaction::new_signed_with_nonce( instructions, Some(&from_pubkey), &[from_keypair, nonce_authority], nonce_account, &nonce_authority.pubkey(), nonce_hash, ) }
33.22973
99
0.717365
6ab1fbb66ce1a394cc23009a564d59bd498f44b2
4,753
//! Runtime crate for the USB Armory Mk II (i.MX6UL core) #![no_std] #![warn(missing_docs, rust_2018_idioms, unused_qualifications)] use core::sync::atomic::{self, Ordering}; use pac::GICC; mod i2c; mod leds; mod rtc; mod serial; // Software Generated Interrupts extern "C" { fn SGI0(); fn SGI1(); fn SGI2(); fn SGI3(); fn SGI4(); fn SGI5(); fn SGI6(); fn SGI7(); fn SGI8(); fn SGI9(); fn SGI10(); fn SGI11(); fn SGI12(); fn SGI13(); fn SGI14(); fn SGI15(); } static SGIS: [unsafe extern "C" fn(); 16] = [ SGI0, SGI1, SGI2, SGI3, SGI4, SGI5, SGI6, SGI7, SGI8, SGI9, SGI10, SGI11, SGI12, SGI13, SGI14, SGI15, ]; include!(concat!(env!("OUT_DIR"), "/interrupts.rs")); #[cfg(not(any(feature = "dram", feature = "ocram")))] compile_error!("one of the following Cargo features must be enabled: `dram` or `ocram`"); #[cfg(all(feature = "dram", feature = "ocram"))] compile_error!("Cargo features `dram` and `ocram` are both enabled but only one must be enabled"); // NOTE due to ABI requirements the real entry point, `_start`, is written in // assembly and lives in the `asm.s` file. That subroutine calls this one. // NOTE(C ABI) Rust ABI is unspecified / unstable; all calls between Rust code // and external assembly must use the stable C ABI #[no_mangle] unsafe extern "C" fn start() -> ! { // NOTE the ROM bootloader can't write the initial values of `.data` to OCRAM because it uses // the OCRAM itself. Thus we copy those here, after the ROM bootloader has terminated and // there's no risk to corrupt memory extern "C" { static mut _sbss: u32; static mut _ebss: u32; static mut _sdata: u32; static mut _edata: u32; static _sidata: u32; } r0::zero_bss(&mut _sbss, &mut _ebss); r0::init_data(&mut _sdata, &mut _edata, &_sidata); // ensure all the previous writes are committed before any of the following code (which may // access `.data`) is executed atomic::fence(Ordering::SeqCst); /* Initialize some peripherals that will always be configured in this way */ // enable the RTC with no calibration rtc::init(); // LEDS leds::init(); // turn the white LED on and the blue LED off to indicate we are alive leds::set(false, true); // the debug accessory (which routes the serial output of the device to the host) is connected // to the Armory through a USB-C receptacle. This receptacle is disabled by default so we enable // it here. The IC that manages the receptacle (FUSB303) only talks I2C. i2c::init(); if i2c::init_fusb303().is_err() { fatal() } serial::init(); // on cold boots it seems the receptacle takes quite a while to become active so we have added // a delay (~200ms) here to give it time to become ready let start = rtc::now(); while rtc::now() < start + 6554 { continue; } extern "Rust" { // NOTE(Rust ABI) this subroutine is provided by a Rust crate fn main() -> !; } main() } #[no_mangle] extern "C" fn IRQ() { // NOTE(borrow_unchecked) IRQs are masked, plus this is a single-instruction // read of a read-only register (that has side effects, though) let iar = GICC::borrow_unchecked(|gicc| gicc.IAR.read()); let iid = (iar & ((1 << 10) - 1)) as u16; let f = if iid == 1023 { // spurious interrupt return; } else if iid < 16 { // Software Generated Interrupt SGIS[iid as usize] } else if iid < (32 + 128) { // Shared Peripheral Interrupt // NOTE(get_unchecked) avoid panicking branch unsafe { *SPIS.get_unchecked((iid - 32) as usize) } } else { extern "C" { fn DefaultHandler() -> !; } unsafe { DefaultHandler() } }; unsafe { cortex_a::enable_irq(); f(); } cortex_a::disable_irq(); // NOTE(borrow_unchecked) single-instruction write to a write-only register GICC::borrow_unchecked(|gicc| { // end of interrupt gicc.EOIR.write(iid as u32); }); } /// Fatal error during initialization: turn on both LEDs and halt the processor fn fatal() -> ! { leds::set(true, true); loop { continue; } } // NOTE this is written in assembly because it should never touch the stack // pointer regardless of the optimization level used to compile the application // #[no_mangle] // fn DataAbort() -> ! { // static MSG: &str = "\ndata abort exception (it could indicate a stack overflow)\n"; // Serial::borrow_unchecked(|serial| { // serial.write_all(MSG.as_bytes()); // }); // Serial::flush(); // usbarmory::reset(); // }
28.981707
100
0.62045
8f20cf309c229a1560c9e51eed0e93bd7ba06584
479
fn main() { use std::io::Read; let mut data = [0; 11]; let mut stdin = ::std::io::stdin(); stdin.read(&mut data[..]).unwrap(); for idx in 0..data.len() { if data[idx] > 127 { return; // avoid invalid utf8 by exiting early } } let buf = unsafe { ::std::str::from_utf8_unchecked(&data[..]) }; if buf.starts_with("hello") { if buf.as_bytes()[5..].starts_with(b" world") { panic!() } } }
22.809524
68
0.492693
fcd18d46d9d151e832159418574b2befa72358d1
1,282
use crate::ens::ENS; use std::str::FromStr; use web3::api::Web3; use web3::futures::Future; use web3::types::{Address, Bytes, H520}; pub fn to_address<T: web3::Transport>(address: &str, web3: &Web3<T>) -> Result<Address, String> { if address.ends_with(".eth") || address.ends_with(".xyz") { let ens = ENS::new(&web3); match ens.address(address) { Ok(s) => return Ok(s), Err(_e) => return Err(format!("unable to resolve ENS address {}", address)), } } else { match Address::from_str(&address.replace("0x", "")) { Ok(s) => return Ok(s), Err(_e) => return Err(format!("invalid address {}", address)), } } } pub fn to_ens<T: web3::Transport>(address: Address, web3: &Web3<T>) -> Result<String, String> { let ens = ENS::new(&web3); match ens.name(address) { Ok(s) => Ok(s), Err(_e) => Err(format!("unable to ENS reverse address {}", address)), } } pub fn sign<T: web3::Transport>( account: Address, data: Bytes, web3: &Web3<T>, ) -> Result<H520, String> { let result = web3.eth().sign(account, data); match result.wait() { Ok(s) => Ok(s), Err(_e) => Err(format!("unable to sign with account {:?}", account)), } }
29.136364
97
0.556942
d9e3e3e382a7ba449f6b3e05f1a494d8a56cf775
717
use crate::evaluate::evaluate_baseline_expr; use crate::prelude::*; use log::{log_enabled, trace}; use futures::stream::once; use nu_errors::ShellError; use nu_protocol::hir::SpannedExpression; use nu_protocol::Scope; pub(crate) async fn run_expression_block( expr: SpannedExpression, context: &mut EvaluationContext, scope: Arc<Scope>, ) -> Result<InputStream, ShellError> { if log_enabled!(log::Level::Trace) { trace!(target: "nu::run::expr", "->"); trace!(target: "nu::run::expr", "{:?}", expr); } let registry = context.registry().clone(); let output = evaluate_baseline_expr(&expr, &registry, scope).await?; Ok(once(async { Ok(output) }).to_input_stream()) }
27.576923
72
0.670851
645fed37b71f0c5ad0b01c0a1e280f10bec306e6
3,691
//! # Timer //! //! A timer that mimics iOS's timer. //! //! ## Usage //! //! - Use `Timer::new(<duration>)` to initialise a new timer instance. `<duration>` is a //! `chrono::Duration`. The timer is paused at the duration you specified and will **not** //! run until you call `.resume()` or `.pause_or_resume()`. //! - While running, call `.pause_or_resume()`, `.pause()` or `.resume()` to pause or resume. //! - When you want to stop (reset), call `.stop()`, which resets the timer and returns //! [`TimerData`](struct.TimerData.html) use chrono::{DateTime, Duration, Local}; #[derive(Debug, Clone)] pub struct TimerData { pub total: Duration, pub remaining: Duration, pub start_moments: Vec<DateTime<Local>>, // moments at which the timer resumes; the first is the start monent pub pause_moments: Vec<DateTime<Local>>, // moments at which the timer is paused; the last is the stop moment } impl TimerData { fn new(duration: Duration) -> Self { Self { total: duration, remaining: duration, start_moments: Vec::new(), pause_moments: Vec::new(), } } pub fn start(&self) -> DateTime<Local> { self.start_moments[0] } pub fn stop(&self) -> DateTime<Local> { self.pause_moments[self.pause_moments.len() - 1] } pub fn duration_expected(&self) -> Duration { self.total } pub fn duration_actual(&self) -> Duration { self.stop() - self.start() } } /// A countdown timer #[derive(Clone, Debug)] pub struct Timer { pub paused: bool, pub data: TimerData, } impl Timer { /// Returns stopwatch reset to zero pub fn new(duration: Duration) -> Self { Self { paused: true, // finished by default; start by explicitly calling `.resume()` data: TimerData::new(duration), } } /// Read the timer. Returns the duration passed. pub fn read(&self) -> Duration { if self.paused { self.data.remaining } else { self.data.remaining - (Local::now() - self.last_start()) } } /// Pause or resume the timer. (If paused, resume, and vice versa.) pub fn pause_or_resume(&mut self) { self.pause_or_resume_at(Local::now()); } pub fn pause_or_resume_at(&mut self, moment: DateTime<Local>) { if self.paused { self.resume_at(moment); } else { self.pause_at(moment); } } /// Pause the timer (suggest using `pause_or_resume` instead.) pub fn pause(&mut self) { self.pause_at(Local::now()); } pub fn pause_at(&mut self, moment: DateTime<Local>) { self.data.pause_moments.push(moment); self.data.remaining = self.data.remaining - (moment - self.last_start()); self.paused = true; } /// Resume the timer (suggest using `pause_or_resume` instead.) pub fn resume(&mut self) { self.resume_at(Local::now()); } pub fn resume_at(&mut self, moment: DateTime<Local>) { self.data.start_moments.push(moment); self.paused = false; } /// Stop the timer, return the data, and reset the timer with the previously set duration. pub fn stop(&mut self) -> TimerData { self.stop_at(Local::now()) } pub fn stop_at(&mut self, moment: DateTime<Local>) -> TimerData { self.data.pause_moments.push(moment); let duration = self.data.total; let data = std::mem::replace(&mut self.data, TimerData::new(duration)); data } fn last_start(&self) -> DateTime<Local> { self.data.start_moments[self.data.start_moments.len() - 1] } }
31.016807
113
0.601734
e60bed798551da9e80dc7abacdf918afeeaddb9f
9,696
use { crate::cli::CliError, solana_client::{ client_error::{ClientError, Result as ClientResult}, rpc_client::RpcClient, }, solana_sdk::{ commitment_config::CommitmentConfig, message::Message, native_token::lamports_to_sol, pubkey::Pubkey, }, }; pub fn check_account_for_fee( rpc_client: &RpcClient, account_pubkey: &Pubkey, message: &Message, ) -> Result<(), CliError> { check_account_for_multiple_fees(rpc_client, account_pubkey, &[message]) } pub fn check_account_for_fee_with_commitment( rpc_client: &RpcClient, account_pubkey: &Pubkey, message: &Message, commitment: CommitmentConfig, ) -> Result<(), CliError> { check_account_for_multiple_fees_with_commitment( rpc_client, account_pubkey, &[message], commitment, ) } pub fn check_account_for_multiple_fees( rpc_client: &RpcClient, account_pubkey: &Pubkey, messages: &[&Message], ) -> Result<(), CliError> { check_account_for_multiple_fees_with_commitment( rpc_client, account_pubkey, messages, CommitmentConfig::default(), ) } pub fn check_account_for_multiple_fees_with_commitment( rpc_client: &RpcClient, account_pubkey: &Pubkey, messages: &[&Message], commitment: CommitmentConfig, ) -> Result<(), CliError> { check_account_for_spend_multiple_fees_with_commitment( rpc_client, account_pubkey, 0, messages, commitment, ) } pub fn check_account_for_spend_multiple_fees_with_commitment( rpc_client: &RpcClient, account_pubkey: &Pubkey, balance: u64, messages: &[&Message], commitment: CommitmentConfig, ) -> Result<(), CliError> { let fee = get_fee_for_messages(rpc_client, messages)?; if !check_account_for_balance_with_commitment( rpc_client, account_pubkey, balance + fee, commitment, ) .map_err(Into::<ClientError>::into)? { if balance > 0 { return Err(CliError::InsufficientFundsForSpendAndFee( lamports_to_sol(balance), lamports_to_sol(fee), *account_pubkey, )); } else { return Err(CliError::InsufficientFundsForFee( lamports_to_sol(fee), *account_pubkey, )); } } Ok(()) } pub fn get_fee_for_messages( rpc_client: &RpcClient, messages: &[&Message], ) -> Result<u64, CliError> { Ok(messages .iter() .map(|message| { println!("msg {:?}", message.recent_blockhash); rpc_client.get_fee_for_message(message) }) .collect::<Result<Vec<_>, _>>()? .iter() .sum()) } pub fn check_account_for_balance( rpc_client: &RpcClient, account_pubkey: &Pubkey, balance: u64, ) -> ClientResult<bool> { check_account_for_balance_with_commitment( rpc_client, account_pubkey, balance, CommitmentConfig::default(), ) } pub fn check_account_for_balance_with_commitment( rpc_client: &RpcClient, account_pubkey: &Pubkey, balance: u64, commitment: CommitmentConfig, ) -> ClientResult<bool> { let lamports = rpc_client .get_balance_with_commitment(account_pubkey, commitment)? .value; if lamports != 0 && lamports >= balance { return Ok(true); } Ok(false) } pub fn check_unique_pubkeys( pubkey0: (&Pubkey, String), pubkey1: (&Pubkey, String), ) -> Result<(), CliError> { if pubkey0.0 == pubkey1.0 { Err(CliError::BadParameter(format!( "Identical pubkeys found: `{}` and `{}` must be unique", pubkey0.1, pubkey1.1 ))) } else { Ok(()) } } #[cfg(test)] mod tests { use { super::*, serde_json::json, solana_client::{ rpc_request::RpcRequest, rpc_response::{Response, RpcResponseContext}, }, solana_sdk::system_instruction, std::collections::HashMap, }; #[test] fn test_check_account_for_fees() { let account_balance = 1; let account_balance_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(account_balance), }); let pubkey = solana_sdk::pubkey::new_rand(); let pubkey0 = Pubkey::new(&[0; 32]); let pubkey1 = Pubkey::new(&[1; 32]); let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); let message0 = Message::new(&[ix0], Some(&pubkey0)); let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1); let message1 = Message::new(&[ix0, ix1], Some(&pubkey0)); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetBalance, account_balance_response.clone()); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); check_account_for_fee(&rpc_client, &pubkey, &message0).expect("unexpected result"); let check_fee_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(2), }); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetFeeForMessage, check_fee_response); mocks.insert(RpcRequest::GetBalance, account_balance_response.clone()); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert!(check_account_for_fee(&rpc_client, &pubkey, &message1).is_err()); let check_fee_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(2), }); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetFeeForMessage, check_fee_response); mocks.insert(RpcRequest::GetBalance, account_balance_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert!( check_account_for_multiple_fees(&rpc_client, &pubkey, &[&message0, &message0]).is_err() ); let account_balance = 2; let account_balance_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(account_balance), }); let check_fee_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(1), }); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetFeeForMessage, check_fee_response); mocks.insert(RpcRequest::GetBalance, account_balance_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); check_account_for_multiple_fees(&rpc_client, &pubkey, &[&message0, &message0]) .expect("unexpected result"); } #[test] fn test_check_account_for_balance() { let account_balance = 50; let account_balance_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(account_balance), }); let pubkey = solana_sdk::pubkey::new_rand(); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetBalance, account_balance_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); assert!(check_account_for_balance(&rpc_client, &pubkey, 1).unwrap()); assert!(check_account_for_balance(&rpc_client, &pubkey, account_balance).unwrap()); assert!(!check_account_for_balance(&rpc_client, &pubkey, account_balance + 1).unwrap()); } #[test] fn test_get_fee_for_messages() { let check_fee_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(1), }); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetFeeForMessage, check_fee_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); // No messages, no fee. assert_eq!(get_fee_for_messages(&rpc_client, &[]).unwrap(), 0); // One message w/ one signature, a fee. let pubkey0 = Pubkey::new(&[0; 32]); let pubkey1 = Pubkey::new(&[1; 32]); let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); let message0 = Message::new(&[ix0], Some(&pubkey0)); assert_eq!(get_fee_for_messages(&rpc_client, &[&message0]).unwrap(), 1); // No signatures, no fee. let check_fee_response = json!(Response { context: RpcResponseContext { slot: 1 }, value: json!(0), }); let mut mocks = HashMap::new(); mocks.insert(RpcRequest::GetFeeForMessage, check_fee_response); let rpc_client = RpcClient::new_mock_with_mocks("".to_string(), mocks); let message = Message::default(); assert_eq!( get_fee_for_messages(&rpc_client, &[&message, &message]).unwrap(), 0 ); } #[test] fn test_check_unique_pubkeys() { let pubkey0 = solana_sdk::pubkey::new_rand(); let pubkey_clone = pubkey0; let pubkey1 = solana_sdk::pubkey::new_rand(); check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "bar".to_string())) .expect("unexpected result"); check_unique_pubkeys((&pubkey0, "foo".to_string()), (&pubkey1, "foo".to_string())) .expect("unexpected result"); assert!(check_unique_pubkeys( (&pubkey0, "foo".to_string()), (&pubkey_clone, "bar".to_string()) ) .is_err()); } }
32.32
99
0.61118
2186d491f05390acbef8f570f62fa7fbd85ecabf
95
Martes.Interfaces.Cir Martes.Interfaces.Rec Martes.Interfaces.Test Martes.Interfaces.Drawable2
19
27
0.873684
2f181478f543ab30b45c37d43c9aa4a69b60adaa
13,907
//! Lowering rules for S390x. use crate::ir::condcodes::IntCC; use crate::ir::Inst as IRInst; use crate::ir::{MemFlags, Opcode}; use crate::isa::s390x::abi::*; use crate::isa::s390x::inst::*; use crate::isa::s390x::settings as s390x_settings; use crate::isa::s390x::S390xBackend; use crate::machinst::lower::*; use crate::machinst::*; use crate::settings::Flags; use crate::CodegenResult; use regalloc::Reg; use smallvec::SmallVec; pub mod isle; //============================================================================ // Lowering: force instruction input into a register /// Sign-extend the low `from_bits` bits of `value` to a full u64. fn sign_extend_to_u64(value: u64, from_bits: u8) -> u64 { assert!(from_bits <= 64); if from_bits >= 64 { value } else { (((value << (64 - from_bits)) as i64) >> (64 - from_bits)) as u64 } } /// Lower an instruction input to a reg. fn put_input_in_reg<C: LowerCtx<I = Inst>>(ctx: &mut C, input: InsnInput) -> Reg { ctx.put_input_in_regs(input.insn, input.input) .only_reg() .unwrap() } //============================================================================= // Lowering: comparisons /// Determines whether this condcode interprets inputs as signed or /// unsigned. See the documentation for the `icmp` instruction in /// cranelift-codegen/meta/src/shared/instructions.rs for further insights /// into this. pub fn condcode_is_signed(cc: IntCC) -> bool { match cc { IntCC::Equal => false, IntCC::NotEqual => false, IntCC::SignedGreaterThanOrEqual => true, IntCC::SignedGreaterThan => true, IntCC::SignedLessThanOrEqual => true, IntCC::SignedLessThan => true, IntCC::UnsignedGreaterThanOrEqual => false, IntCC::UnsignedGreaterThan => false, IntCC::UnsignedLessThanOrEqual => false, IntCC::UnsignedLessThan => false, IntCC::Overflow => true, IntCC::NotOverflow => true, } } //============================================================================ // Lowering: main entry point for lowering a instruction fn lower_insn_to_regs<C: LowerCtx<I = Inst>>( ctx: &mut C, insn: IRInst, flags: &Flags, isa_flags: &s390x_settings::Flags, ) -> CodegenResult<()> { let op = ctx.data(insn).opcode(); let inputs: SmallVec<[InsnInput; 4]> = (0..ctx.num_inputs(insn)) .map(|i| InsnInput { insn, input: i }) .collect(); let outputs: SmallVec<[InsnOutput; 2]> = (0..ctx.num_outputs(insn)) .map(|i| InsnOutput { insn, output: i }) .collect(); let ty = if outputs.len() > 0 { Some(ctx.output_ty(insn, 0)) } else { None }; if let Ok(()) = super::lower::isle::lower(ctx, flags, isa_flags, &outputs, insn) { return Ok(()); } let implemented_in_isle = || { unreachable!( "implemented in ISLE: inst = `{}`, type = `{:?}`", ctx.dfg().display_inst(insn), ty ); }; match op { Opcode::Nop | Opcode::Copy | Opcode::Iconst | Opcode::Bconst | Opcode::F32const | Opcode::F64const | Opcode::Null | Opcode::Iadd | Opcode::IaddIfcout | Opcode::Isub | Opcode::Iabs | Opcode::Ineg | Opcode::Imul | Opcode::Umulhi | Opcode::Smulhi | Opcode::Udiv | Opcode::Urem | Opcode::Sdiv | Opcode::Srem | Opcode::Ishl | Opcode::Ushr | Opcode::Sshr | Opcode::Rotr | Opcode::Rotl | Opcode::Ireduce | Opcode::Uextend | Opcode::Sextend | Opcode::Bnot | Opcode::Band | Opcode::Bor | Opcode::Bxor | Opcode::BandNot | Opcode::BorNot | Opcode::BxorNot | Opcode::Bitselect | Opcode::Breduce | Opcode::Bextend | Opcode::Bmask | Opcode::Bint | Opcode::Clz | Opcode::Cls | Opcode::Ctz | Opcode::Popcnt | Opcode::Fadd | Opcode::Fsub | Opcode::Fmul | Opcode::Fdiv | Opcode::Fmin | Opcode::Fmax | Opcode::Sqrt | Opcode::Fneg | Opcode::Fabs | Opcode::Fpromote | Opcode::Fdemote | Opcode::Ceil | Opcode::Floor | Opcode::Trunc | Opcode::Nearest | Opcode::Fma | Opcode::Fcopysign | Opcode::FcvtFromUint | Opcode::FcvtFromSint | Opcode::FcvtToUint | Opcode::FcvtToSint | Opcode::FcvtToUintSat | Opcode::FcvtToSintSat | Opcode::Bitcast | Opcode::Load | Opcode::Uload8 | Opcode::Sload8 | Opcode::Uload16 | Opcode::Sload16 | Opcode::Uload32 | Opcode::Sload32 | Opcode::Store | Opcode::Istore8 | Opcode::Istore16 | Opcode::Istore32 | Opcode::AtomicRmw | Opcode::AtomicCas | Opcode::AtomicLoad | Opcode::AtomicStore | Opcode::Fence | Opcode::Icmp | Opcode::Fcmp | Opcode::IsNull | Opcode::IsInvalid | Opcode::Select | Opcode::SelectifSpectreGuard | Opcode::Trap | Opcode::ResumableTrap | Opcode::Trapz | Opcode::Trapnz | Opcode::ResumableTrapnz | Opcode::Trapif | Opcode::Debugtrap | Opcode::StackAddr | Opcode::FuncAddr | Opcode::SymbolValue => implemented_in_isle(), Opcode::UaddSat | Opcode::SaddSat => unimplemented!(), Opcode::UsubSat | Opcode::SsubSat => unimplemented!(), Opcode::Bitrev => unimplemented!(), Opcode::FcvtLowFromSint => unimplemented!("FcvtLowFromSint"), Opcode::StackLoad | Opcode::StackStore => { panic!("Direct stack memory access not supported; should not be used by Wasm"); } Opcode::ConstAddr => unimplemented!(), Opcode::HeapAddr => { panic!("heap_addr should have been removed by legalization!"); } Opcode::TableAddr => { panic!("table_addr should have been removed by legalization!"); } Opcode::GlobalValue => { panic!("global_value should have been removed by legalization!"); } Opcode::TlsValue => { unimplemented!("Thread-local storage support not implemented!"); } Opcode::GetPinnedReg | Opcode::SetPinnedReg => { unimplemented!("Pinned register support not implemented!"); } Opcode::Call | Opcode::CallIndirect => { let caller_conv = ctx.abi().call_conv(); let (mut abi, inputs) = match op { Opcode::Call => { let (extname, dist) = ctx.call_target(insn).unwrap(); let extname = extname.clone(); let sig = ctx.call_sig(insn).unwrap(); assert!(inputs.len() == sig.params.len()); assert!(outputs.len() == sig.returns.len()); ( S390xABICaller::from_func(sig, &extname, dist, caller_conv, flags)?, &inputs[..], ) } Opcode::CallIndirect => { let ptr = put_input_in_reg(ctx, inputs[0]); let sig = ctx.call_sig(insn).unwrap(); assert!(inputs.len() - 1 == sig.params.len()); assert!(outputs.len() == sig.returns.len()); ( S390xABICaller::from_ptr(sig, ptr, op, caller_conv, flags)?, &inputs[1..], ) } _ => unreachable!(), }; assert!(inputs.len() == abi.num_args()); for (i, input) in inputs.iter().enumerate() { let arg_reg = put_input_in_reg(ctx, *input); abi.emit_copy_regs_to_arg(ctx, i, ValueRegs::one(arg_reg)); } abi.emit_call(ctx); for (i, output) in outputs.iter().enumerate() { let retval_reg = get_output_reg(ctx, *output).only_reg().unwrap(); abi.emit_copy_retval_to_regs(ctx, i, ValueRegs::one(retval_reg)); } abi.accumulate_outgoing_args_size(ctx); } Opcode::FallthroughReturn | Opcode::Return => { for (i, input) in inputs.iter().enumerate() { let reg = put_input_in_reg(ctx, *input); let retval_reg = ctx.retval(i).only_reg().unwrap(); let ty = ctx.input_ty(insn, i); ctx.emit(Inst::gen_move(retval_reg, reg, ty)); } // N.B.: the Ret itself is generated by the ABI. } Opcode::RawBitcast | Opcode::Splat | Opcode::Swizzle | Opcode::Insertlane | Opcode::Extractlane | Opcode::Imin | Opcode::Umin | Opcode::Imax | Opcode::Umax | Opcode::AvgRound | Opcode::FminPseudo | Opcode::FmaxPseudo | Opcode::Uload8x8 | Opcode::Uload8x8Complex | Opcode::Sload8x8 | Opcode::Sload8x8Complex | Opcode::Uload16x4 | Opcode::Uload16x4Complex | Opcode::Sload16x4 | Opcode::Sload16x4Complex | Opcode::Uload32x2 | Opcode::Uload32x2Complex | Opcode::Sload32x2 | Opcode::Sload32x2Complex | Opcode::Vconst | Opcode::Shuffle | Opcode::Vsplit | Opcode::Vconcat | Opcode::Vselect | Opcode::VanyTrue | Opcode::VallTrue | Opcode::VhighBits | Opcode::ScalarToVector | Opcode::Snarrow | Opcode::Unarrow | Opcode::Uunarrow | Opcode::SwidenLow | Opcode::SwidenHigh | Opcode::UwidenLow | Opcode::UwidenHigh | Opcode::WideningPairwiseDotProductS | Opcode::SqmulRoundSat | Opcode::FvpromoteLow | Opcode::Fvdemote | Opcode::IaddPairwise => { // TODO unimplemented!("Vector ops not implemented."); } Opcode::Isplit | Opcode::Iconcat => unimplemented!("Wide integer ops not implemented."), Opcode::IfcmpSp => { panic!("Unused opcode should not be encountered."); } Opcode::LoadComplex | Opcode::Uload8Complex | Opcode::Sload8Complex | Opcode::Uload16Complex | Opcode::Sload16Complex | Opcode::Uload32Complex | Opcode::Sload32Complex | Opcode::StoreComplex | Opcode::Istore8Complex | Opcode::Istore16Complex | Opcode::Istore32Complex => { panic!("Load/store complex opcode should not be encountered."); } Opcode::Ifcmp | Opcode::Ffcmp | Opcode::Trapff | Opcode::Trueif | Opcode::Trueff | Opcode::Selectif => { panic!("Flags opcode should not be encountered."); } Opcode::Jump | Opcode::Brz | Opcode::Brnz | Opcode::BrIcmp | Opcode::Brif | Opcode::Brff | Opcode::BrTable => { panic!("Branch opcode reached non-branch lowering logic!"); } Opcode::IaddImm | Opcode::ImulImm | Opcode::UdivImm | Opcode::SdivImm | Opcode::UremImm | Opcode::SremImm | Opcode::IrsubImm | Opcode::IaddCin | Opcode::IaddIfcin | Opcode::IaddCout | Opcode::IaddCarry | Opcode::IaddIfcarry | Opcode::IsubBin | Opcode::IsubIfbin | Opcode::IsubBout | Opcode::IsubIfbout | Opcode::IsubBorrow | Opcode::IsubIfborrow | Opcode::BandImm | Opcode::BorImm | Opcode::BxorImm | Opcode::RotlImm | Opcode::RotrImm | Opcode::IshlImm | Opcode::UshrImm | Opcode::SshrImm | Opcode::IcmpImm | Opcode::IfcmpImm => { panic!("ALU+imm and ALU+carry ops should not appear here!"); } } Ok(()) } //============================================================================= // Lowering-backend trait implementation. impl LowerBackend for S390xBackend { type MInst = Inst; fn lower<C: LowerCtx<I = Inst>>(&self, ctx: &mut C, ir_inst: IRInst) -> CodegenResult<()> { lower_insn_to_regs(ctx, ir_inst, &self.flags, &self.isa_flags) } fn lower_branch_group<C: LowerCtx<I = Inst>>( &self, ctx: &mut C, branches: &[IRInst], targets: &[MachLabel], ) -> CodegenResult<()> { // A block should end with at most two branches. The first may be a // conditional branch; a conditional branch can be followed only by an // unconditional branch or fallthrough. Otherwise, if only one branch, // it may be an unconditional branch, a fallthrough, a return, or a // trap. These conditions are verified by `is_ebb_basic()` during the // verifier pass. assert!(branches.len() <= 2); if branches.len() == 2 { let op1 = ctx.data(branches[1]).opcode(); assert!(op1 == Opcode::Jump); } // Lower the first branch in ISLE. This will automatically handle // the second branch (if any) by emitting a two-way conditional branch. if let Ok(()) = super::lower::isle::lower_branch( ctx, &self.flags, &self.isa_flags, branches[0], targets, ) { return Ok(()); } unreachable!( "implemented in ISLE: branch = `{}`", ctx.dfg().display_inst(branches[0]), ); } }
30.973274
96
0.524484
ff1bd5e7ff289565c06076f8f7437743e1427ec4
15,212
use std::collections::hash_map::Entry; use std::collections::BTreeMap; use rustc_data_structures::fx::FxHashMap; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::Symbol; use serde::ser::{Serialize, SerializeStruct, Serializer}; use crate::clean; use crate::clean::types::{FnDecl, FnRetTy, GenericBound, Generics, Type, WherePredicate}; use crate::formats::cache::Cache; use crate::formats::item_type::ItemType; use crate::html::markdown::short_markdown_summary; use crate::html::render::{IndexItem, IndexItemFunctionType, RenderType, TypeWithKind}; /// Indicates where an external crate can be found. crate enum ExternalLocation { /// Remote URL root of the external crate Remote(String), /// This external crate can be found in the local doc/ folder Local, /// The external crate could not be found. Unknown, } /// Builds the search index from the collected metadata crate fn build_index<'tcx>(krate: &clean::Crate, cache: &mut Cache, tcx: TyCtxt<'tcx>) -> String { let mut defid_to_pathid = FxHashMap::default(); let mut crate_items = Vec::with_capacity(cache.search_index.len()); let mut crate_paths = vec![]; // Attach all orphan items to the type's definition if the type // has since been learned. for &(did, ref item) in &cache.orphan_impl_items { if let Some(&(ref fqp, _)) = cache.paths.get(&did) { let desc = item .doc_value() .map_or_else(String::new, |s| short_markdown_summary(&s, &item.link_names(cache))); cache.search_index.push(IndexItem { ty: item.type_(), name: item.name.unwrap().to_string(), path: fqp[..fqp.len() - 1].join("::"), desc, parent: Some(did), parent_idx: None, search_type: get_index_search_type(item, tcx), aliases: item.attrs.get_doc_aliases(), }); } } let crate_doc = krate .module .doc_value() .map_or_else(String::new, |s| short_markdown_summary(&s, &krate.module.link_names(cache))); let Cache { ref mut search_index, ref paths, .. } = *cache; // Aliases added through `#[doc(alias = "...")]`. Since a few items can have the same alias, // we need the alias element to have an array of items. let mut aliases: BTreeMap<String, Vec<usize>> = BTreeMap::new(); // Sort search index items. This improves the compressibility of the search index. search_index.sort_unstable_by(|k1, k2| { // `sort_unstable_by_key` produces lifetime errors let k1 = (&k1.path, &k1.name, &k1.ty, &k1.parent); let k2 = (&k2.path, &k2.name, &k2.ty, &k2.parent); std::cmp::Ord::cmp(&k1, &k2) }); // Set up alias indexes. for (i, item) in search_index.iter().enumerate() { for alias in &item.aliases[..] { aliases.entry(alias.to_lowercase()).or_insert_with(Vec::new).push(i); } } // Reduce `DefId` in paths into smaller sequential numbers, // and prune the paths that do not appear in the index. let mut lastpath = String::new(); let mut lastpathid = 0usize; for item in search_index { item.parent_idx = item.parent.and_then(|defid| match defid_to_pathid.entry(defid) { Entry::Occupied(entry) => Some(*entry.get()), Entry::Vacant(entry) => { let pathid = lastpathid; entry.insert(pathid); lastpathid += 1; if let Some(&(ref fqp, short)) = paths.get(&defid) { crate_paths.push((short, fqp.last().unwrap().clone())); Some(pathid) } else { None } } }); // Omit the parent path if it is same to that of the prior item. if lastpath == item.path { item.path.clear(); } else { lastpath = item.path.clone(); } crate_items.push(&*item); } struct CrateData<'a> { doc: String, items: Vec<&'a IndexItem>, paths: Vec<(ItemType, String)>, // The String is alias name and the vec is the list of the elements with this alias. // // To be noted: the `usize` elements are indexes to `items`. aliases: &'a BTreeMap<String, Vec<usize>>, } impl<'a> Serialize for CrateData<'a> { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let has_aliases = !self.aliases.is_empty(); let mut crate_data = serializer.serialize_struct("CrateData", if has_aliases { 9 } else { 8 })?; crate_data.serialize_field("doc", &self.doc)?; crate_data.serialize_field( "t", &self.items.iter().map(|item| &item.ty).collect::<Vec<_>>(), )?; crate_data.serialize_field( "n", &self.items.iter().map(|item| &item.name).collect::<Vec<_>>(), )?; crate_data.serialize_field( "q", &self.items.iter().map(|item| &item.path).collect::<Vec<_>>(), )?; crate_data.serialize_field( "d", &self.items.iter().map(|item| &item.desc).collect::<Vec<_>>(), )?; crate_data.serialize_field( "i", &self .items .iter() .map(|item| { assert_eq!( item.parent.is_some(), item.parent_idx.is_some(), "`{}` is missing idx", item.name ); item.parent_idx.map(|x| x + 1).unwrap_or(0) }) .collect::<Vec<_>>(), )?; crate_data.serialize_field( "f", &self.items.iter().map(|item| &item.search_type).collect::<Vec<_>>(), )?; crate_data.serialize_field("p", &self.paths)?; if has_aliases { crate_data.serialize_field("a", &self.aliases)?; } crate_data.end() } } // Collect the index into a string format!( r#""{}":{}"#, krate.name(tcx), serde_json::to_string(&CrateData { doc: crate_doc, items: crate_items, paths: crate_paths, aliases: &aliases, }) .expect("failed serde conversion") // All these `replace` calls are because we have to go through JS string for JSON content. .replace(r"\", r"\\") .replace("'", r"\'") // We need to escape double quotes for the JSON. .replace("\\\"", "\\\\\"") ) } crate fn get_index_search_type<'tcx>( item: &clean::Item, tcx: TyCtxt<'tcx>, ) -> Option<IndexItemFunctionType> { let (mut inputs, mut output) = match *item.kind { clean::FunctionItem(ref f) => get_all_types(&f.generics, &f.decl, tcx), clean::MethodItem(ref m, _) => get_all_types(&m.generics, &m.decl, tcx), clean::TyMethodItem(ref m) => get_all_types(&m.generics, &m.decl, tcx), _ => return None, }; inputs.retain(|a| a.ty.name.is_some()); output.retain(|a| a.ty.name.is_some()); let output = if output.is_empty() { None } else { Some(output) }; Some(IndexItemFunctionType { inputs, output }) } fn get_index_type(clean_type: &clean::Type, generics: Vec<TypeWithKind>) -> RenderType { RenderType { name: get_index_type_name(clean_type, true).map(|s| s.as_str().to_ascii_lowercase()), generics: if generics.is_empty() { None } else { Some(generics) }, } } fn get_index_type_name(clean_type: &clean::Type, accept_generic: bool) -> Option<Symbol> { match *clean_type { clean::ResolvedPath { ref path, .. } => { let path_segment = path.segments.last().unwrap(); Some(path_segment.name) } clean::DynTrait(ref bounds, _) => { let path = &bounds[0].trait_; Some(path.segments.last().unwrap().name) } clean::Generic(s) if accept_generic => Some(s), clean::Primitive(ref p) => Some(p.as_sym()), clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic), clean::Generic(_) | clean::BareFunction(_) | clean::Tuple(_) | clean::Slice(_) | clean::Array(_, _) | clean::RawPointer(_, _) | clean::QPath { .. } | clean::Infer | clean::ImplTrait(_) => None, } } /// The point of this function is to replace bounds with types. /// /// i.e. `[T, U]` when you have the following bounds: `T: Display, U: Option<T>` will return /// `[Display, Option]` (we just returns the list of the types, we don't care about the /// wrapped types in here). crate fn get_real_types<'tcx>( generics: &Generics, arg: &Type, tcx: TyCtxt<'tcx>, recurse: usize, res: &mut Vec<TypeWithKind>, ) { fn insert_ty( res: &mut Vec<TypeWithKind>, tcx: TyCtxt<'_>, ty: Type, mut generics: Vec<TypeWithKind>, ) { let is_full_generic = ty.is_full_generic(); if is_full_generic && generics.len() == 1 { // In this case, no need to go through an intermediate state if the generics // contains only one element. // // For example: // // fn foo<T: Display>(r: Option<T>) {} // // In this case, it would contain: // // ``` // [{ // name: "option", // generics: [{ // name: "", // generics: [ // name: "Display", // generics: [] // }] // }] // }] // ``` // // After removing the intermediate (unnecessary) full generic, it'll become: // // ``` // [{ // name: "option", // generics: [{ // name: "Display", // generics: [] // }] // }] // ``` // // To be noted that it can work if there is ONLY ONE generic, otherwise we still // need to keep it as is! res.push(generics.pop().unwrap()); return; } let mut index_ty = get_index_type(&ty, generics); if index_ty.name.as_ref().map(|s| s.is_empty()).unwrap_or(true) { return; } if is_full_generic { // We remove the name of the full generic because we have no use for it. index_ty.name = Some(String::new()); res.push(TypeWithKind::from((index_ty, ItemType::Generic))); } else if let Some(kind) = ty.def_id_no_primitives().map(|did| tcx.def_kind(did).into()) { res.push(TypeWithKind::from((index_ty, kind))); } else if ty.is_primitive() { // This is a primitive, let's store it as such. res.push(TypeWithKind::from((index_ty, ItemType::Primitive))); } } if recurse >= 10 { // FIXME: remove this whole recurse thing when the recursion bug is fixed return; } if let Type::Generic(arg_s) = *arg { if let Some(where_pred) = generics.where_predicates.iter().find(|g| match g { WherePredicate::BoundPredicate { ty, .. } => { ty.def_id_no_primitives() == arg.def_id_no_primitives() } _ => false, }) { let mut ty_generics = Vec::new(); let bounds = where_pred.get_bounds().unwrap_or_else(|| &[]); for bound in bounds.iter() { if let GenericBound::TraitBound(poly_trait, _) = bound { for x in poly_trait.generic_params.iter() { if !x.is_type() { continue; } if let Some(ty) = x.get_type() { get_real_types(generics, &ty, tcx, recurse + 1, &mut ty_generics); } } } } insert_ty(res, tcx, arg.clone(), ty_generics); } if let Some(bound) = generics.params.iter().find(|g| g.is_type() && g.name == arg_s) { let mut ty_generics = Vec::new(); for bound in bound.get_bounds().unwrap_or(&[]) { if let Some(path) = bound.get_trait_path() { let ty = Type::ResolvedPath { did: path.def_id(), path }; get_real_types(generics, &ty, tcx, recurse + 1, &mut ty_generics); } } insert_ty(res, tcx, arg.clone(), ty_generics); } } else { let mut ty_generics = Vec::new(); if let Some(arg_generics) = arg.generics() { for gen in arg_generics.iter() { get_real_types(generics, gen, tcx, recurse + 1, &mut ty_generics); } } insert_ty(res, tcx, arg.clone(), ty_generics); } } /// Return the full list of types when bounds have been resolved. /// /// i.e. `fn foo<A: Display, B: Option<A>>(x: u32, y: B)` will return /// `[u32, Display, Option]`. crate fn get_all_types<'tcx>( generics: &Generics, decl: &FnDecl, tcx: TyCtxt<'tcx>, ) -> (Vec<TypeWithKind>, Vec<TypeWithKind>) { let mut all_types = Vec::new(); for arg in decl.inputs.values.iter() { if arg.type_.is_self_type() { continue; } // FIXME: performance wise, it'd be much better to move `args` declaration outside of the // loop and replace this line with `args.clear()`. let mut args = Vec::new(); get_real_types(generics, &arg.type_, tcx, 0, &mut args); if !args.is_empty() { // FIXME: once back to performance improvements, replace this line with: // `all_types.extend(args.drain(..));`. all_types.extend(args); } else { if let Some(kind) = arg.type_.def_id_no_primitives().map(|did| tcx.def_kind(did).into()) { all_types.push(TypeWithKind::from((get_index_type(&arg.type_, vec![]), kind))); } } } let mut ret_types = Vec::new(); match decl.output { FnRetTy::Return(ref return_type) => { get_real_types(generics, return_type, tcx, 0, &mut ret_types); if ret_types.is_empty() { if let Some(kind) = return_type.def_id_no_primitives().map(|did| tcx.def_kind(did).into()) { ret_types.push(TypeWithKind::from((get_index_type(return_type, vec![]), kind))); } } } _ => {} }; (all_types, ret_types) }
36.92233
100
0.521825
3ae2e8f8a9911125a6de144c9ebf51e697e4adc1
10,818
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. use liqum_node_runtime::{self, opaque::Block, RuntimeApi}; use sc_client_api::ExecutorProvider; use sc_consensus::LongestChain; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sc_finality_grandpa::{ FinalityProofProvider as GrandpaFinalityProofProvider, SharedVoterState, StorageAndProofProvider, }; use sc_service::{error::Error as ServiceError, AbstractService, Configuration, ServiceBuilder}; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use sp_inherents::InherentDataProviders; use std::sync::Arc; use std::time::Duration; // Our native executor instance. native_executor_instance!( pub Executor, liqum_node_runtime::api::dispatch, liqum_node_runtime::native_version, ); /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. macro_rules! new_full_start { ($config:expr) => {{ use jsonrpc_core::IoHandler; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; use std::sync::Arc; let mut import_setup = None; let inherent_data_providers = sp_inherents::InherentDataProviders::new(); let builder = sc_service::ServiceBuilder::new_full::< liqum_node_runtime::opaque::Block, liqum_node_runtime::RuntimeApi, crate::service::Executor, >($config)? .with_select_chain(|_config, backend| Ok(sc_consensus::LongestChain::new(backend.clone())))? .with_transaction_pool(|builder| { let pool_api = sc_transaction_pool::FullChainApi::new(builder.client().clone()); Ok(sc_transaction_pool::BasicPool::new( builder.config().transaction_pool.clone(), std::sync::Arc::new(pool_api), builder.prometheus_registry(), )) })? .with_import_queue( |_config, client, mut select_chain, _transaction_pool, spawn_task_handle, registry| { let select_chain = select_chain .take() .ok_or_else(|| sc_service::Error::SelectChainRequired)?; let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( client.clone(), &(client.clone() as Arc<_>), select_chain, )?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( grandpa_block_import.clone(), client.clone(), ); let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( sc_consensus_aura::slot_duration(&*client)?, aura_block_import, Some(Box::new(grandpa_block_import.clone())), None, client, inherent_data_providers.clone(), spawn_task_handle, registry, )?; import_setup = Some((grandpa_block_import, grandpa_link)); Ok(import_queue) }, )? .with_rpc_extensions(|builder| -> Result<IoHandler<sc_rpc::Metadata>, _> { let handler = pallet_contracts_rpc::Contracts::new(builder.client().clone()); let delegate = pallet_contracts_rpc::ContractsApi::to_delegate(handler); let mut io = IoHandler::default(); io.extend_with(delegate); Ok(io) })?; (builder, import_setup, inherent_data_providers) }}; } /// Builds a new service for a full client. pub fn new_full(config: Configuration) -> Result<impl AbstractService, ServiceError> { let role = config.role.clone(); let force_authoring = config.force_authoring; let name = config.network.node_name.clone(); let disable_grandpa = config.disable_grandpa; let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); let (block_import, grandpa_link) = import_setup.take().expect( "Link Half and Block Import are present for Full Services or setup failed before. qed", ); let service = builder .with_finality_proof_provider(|client, backend| { // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider let provider = client as Arc<dyn StorageAndProofProvider<_, _>>; Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) })? .build()?; if role.is_authority() { let proposer = sc_basic_authorship::ProposerFactory::new( service.client(), service.transaction_pool(), service.prometheus_registry().as_ref(), ); let client = service.client(); let select_chain = service .select_chain() .ok_or(ServiceError::SelectChainRequired)?; let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( sc_consensus_aura::slot_duration(&*client)?, client, select_chain, block_import, proposer, service.network(), inherent_data_providers.clone(), force_authoring, service.keystore(), can_author_with, )?; // the AURA authoring task is considered essential, i.e. if it // fails we take down the service with it. service.spawn_essential_task("aura", aura); } // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. let keystore = if role.is_authority() { Some(service.keystore()) } else { None }; let grandpa_config = sc_finality_grandpa::Config { // FIXME #1578 make this available through chainspec gossip_duration: Duration::from_millis(333), justification_period: 512, name: Some(name), observer_enabled: false, keystore, is_authority: role.is_network_authority(), }; let enable_grandpa = !disable_grandpa; if enable_grandpa { // start the full GRANDPA voter // NOTE: non-authorities could run the GRANDPA observer protocol, but at // this point the full voter should provide better guarantees of block // and vote data availability than the observer. The observer has not // been tested extensively yet and having most nodes in a network run it // could lead to finality stalls. let grandpa_config = sc_finality_grandpa::GrandpaParams { config: grandpa_config, link: grandpa_link, network: service.network(), inherent_data_providers: inherent_data_providers.clone(), telemetry_on_connect: Some(service.telemetry_on_connect_stream()), voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry: service.prometheus_registry(), shared_voter_state: SharedVoterState::empty(), }; // the GRANDPA voter task is considered infallible, i.e. // if it fails we take down the service with it. service.spawn_essential_task( "grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, ); } else { sc_finality_grandpa::setup_disabled_grandpa( service.client(), &inherent_data_providers, service.network(), )?; } Ok(service) } /// Builds a new service for a light client. pub fn new_light(config: Configuration) -> Result<impl AbstractService, ServiceError> { let inherent_data_providers = InherentDataProviders::new(); ServiceBuilder::new_light::<Block, RuntimeApi, Executor>(config)? .with_select_chain(|_config, backend| Ok(LongestChain::new(backend.clone())))? .with_transaction_pool(|builder| { let fetcher = builder .fetcher() .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; let pool_api = sc_transaction_pool::LightChainApi::new(builder.client().clone(), fetcher.clone()); let pool = sc_transaction_pool::BasicPool::with_revalidation_type( builder.config().transaction_pool.clone(), Arc::new(pool_api), builder.prometheus_registry(), sc_transaction_pool::RevalidationType::Light, ); Ok(pool) })? .with_import_queue_and_fprb( |_config, client, backend, fetcher, _select_chain, _tx_pool, spawn_task_handle, prometheus_registry| { let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| { "Trying to start light import queue without active fetch checker" })?; let grandpa_block_import = sc_finality_grandpa::light_block_import( client.clone(), backend, &(client.clone() as Arc<_>), Arc::new(fetch_checker), )?; let finality_proof_import = grandpa_block_import.clone(); let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( sc_consensus_aura::slot_duration(&*client)?, grandpa_block_import, None, Some(Box::new(finality_proof_import)), client, inherent_data_providers.clone(), spawn_task_handle, prometheus_registry, )?; Ok((import_queue, finality_proof_request_builder)) }, )? .with_finality_proof_provider(|client, backend| { // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider let provider = client as Arc<dyn StorageAndProofProvider<_, _>>; Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) })? .build() }
39.626374
100
0.604918
dd7fc1dac1a4c6910f3229e4349106bd85b61814
4,227
/* * Copyright (c) 2017 Boucher, Antoni <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ use gtk::{ ContainerExt, EditableSignals, Entry, EntryExt, Inhibit, Label, LabelExt, WidgetExt, Window, WindowType, }; use gtk::Orientation::Vertical; use relm_derive::Msg; use relm::{connect, Relm, Update, Widget, WidgetTest}; use self::Msg::*; struct Model { content: String, } #[derive(Msg)] enum Msg { Change, Quit, } struct Win { model: Model, widgets: Widgets, } #[derive(Clone)] struct Widgets { input: Entry, label: Label, window: Window, } impl Update for Win { type Model = Model; type ModelParam = (); type Msg = Msg; fn model(_: &Relm<Self>, _: ()) -> Model { Model { content: String::new(), } } fn update(&mut self, event: Msg) { match event { Change => { self.model.content = self.widgets.input.get_text() .chars() .rev() .collect(); self.widgets.label.set_text(&self.model.content); }, Quit => gtk::main_quit(), } } } impl Widget for Win { type Root = Window; fn root(&self) -> Self::Root { self.widgets.window.clone() } fn view(relm: &Relm<Self>, model: Self::Model) -> Self { let vbox = gtk::Box::new(Vertical, 0); let input = Entry::new(); vbox.add(&input); let label = Label::new(None); vbox.add(&label); let window = Window::new(WindowType::Toplevel); window.add(&vbox); window.show_all(); connect!(relm, input, connect_changed(_), Change); connect!(relm, window, connect_delete_event(_, _), return (Some(Quit), Inhibit(false))); Win { model, widgets: Widgets { input, label, window, }, } } } impl WidgetTest for Win { type Widgets = Widgets; fn get_widgets(&self) -> Self::Widgets { self.widgets.clone() } } fn main() { Win::run(()).expect("Win::run failed"); } #[cfg(test)] mod tests { use gdk::keys::constants as key; use gtk::LabelExt; use gtk_test::assert_text; use relm_test::{enter_key, enter_keys}; use crate::Win; #[test] fn label_change() { let (_component, widgets) = relm::init_test::<Win>(()).expect("init_test failed"); let entry = &widgets.input; let label = &widgets.label; assert_text!(label, ""); enter_keys(entry, "test"); assert_text!(label, "tset"); enter_key(entry, key::BackSpace); assert_text!(label, "set"); enter_key(entry, key::Home); //enter_key(entry, key::Delete); // TODO: when supported by enigo. enter_keys(entry, "a"); assert_text!(label, "seta"); enter_key(entry, key::End); enter_keys(entry, "a"); //assert_text!(label, "aseta"); // FIXME } }
25.011834
96
0.577478
d727afac86e3d7b6ff953fc8d1d86aca2b454098
2,168
use util::input::get_input_string; #[allow(dead_code)] fn most_common_bit(list: &Vec<String>, idx: usize) -> i64 { let mut count = 0; for s in list { let s_vec: Vec<char> = s.chars().collect(); if s_vec[idx] == '0' { count -= 1 } else { count += 1 } } (count >= 0) as i64 } #[allow(dead_code)] pub fn solve_part1() -> i64 { let strings: Vec<String> = get_input_string("03"); let length = strings[0].len(); let mut common_digits_list = vec![String::new(); strings[0].len()]; for i in 0..length { common_digits_list[i] = most_common_bit(&strings, i).to_string(); } let epsilon_string = common_digits_list .iter() .map(|x| if x == "1" { "0" } else { "1" }) .collect::<Vec<_>>() .join(""); let gamma_string = common_digits_list.join(""); i64::from_str_radix(&gamma_string, 2).unwrap() * i64::from_str_radix(&epsilon_string, 2).unwrap() } #[allow(dead_code)] pub fn solve_part2() -> i64 { let (mut oxygen_list, mut co2_list): (Vec<String>, Vec<String>) = (get_input_string("03"), get_input_string("03")); let (mut ogr, mut co2): (String, String) = (String::new(), String::new()); let length = oxygen_list[0].len(); for i in 0..length { let oxy_bit = most_common_bit(&oxygen_list, i).to_string(); let co2_bit = (1 - most_common_bit(&co2_list, i)).to_string(); oxygen_list.retain(|e| e[i..i + 1] == oxy_bit); co2_list.retain(|e| e[i..i + 1] == co2_bit); if oxygen_list.len() == 1 && ogr.is_empty() { ogr = oxygen_list[0].clone(); } if co2_list.len() == 1 && co2.is_empty() { co2 = co2_list[0].clone(); } } i64::from_str_radix(&ogr, 2).unwrap() * i64::from_str_radix(&co2, 2).unwrap() } #[cfg(test)] mod tests { use super::*; #[test] fn test_solve_part1() { assert_eq!(solve_part1(), 3633500); } #[test] fn test_solve_part2() { assert_eq!(solve_part2(), 4550283); } } fn main() { println!("{}", solve_part1()); println!("{}", solve_part2()); }
28.526316
81
0.553506
ed5aa028540f8b0c7bc17908b0d74337b78bc3c8
98
//! Structs related to ASN.1 Parser pub mod module; pub mod oid; pub mod defs; pub mod types;
9.8
35
0.693878
9b2a558c1d23e25809422760421ed6e923b923f6
532
use pairing::{CurveAffine, Engine}; use std::fmt; pub trait PolyEngine { type Commitment: Commitment<Point = <Self::Pairing as Engine>::G1Affine>; type Opening: Opening; type Pairing: Engine; // TODO: Make default generics of this trait } pub trait Commitment: Clone + Copy + Sized + Send + Sync + 'static { type Point: CurveAffine; fn from_point(point: &Self::Point) -> Self; fn into_point(&self) -> Self::Point; fn into_bytes(&self) -> Vec<u8>; } pub trait Opening {} pub trait Challenge {}
21.28
77
0.663534
8f471ebbd18a4893516296e5929493a590525cfb
725
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. use deno_core::url::Url; use deno_file::op_file_create_object_url; use deno_file::op_file_revoke_object_url; use deno_file::BlobUrlStore; use deno_file::Location; pub fn init( rt: &mut deno_core::JsRuntime, blob_url_store: BlobUrlStore, maybe_location: Option<Url>, ) { { let op_state = rt.op_state(); let mut op_state = op_state.borrow_mut(); op_state.put(blob_url_store); if let Some(location) = maybe_location { op_state.put(Location(location)); } } super::reg_sync(rt, "op_file_create_object_url", op_file_create_object_url); super::reg_sync(rt, "op_file_revoke_object_url", op_file_revoke_object_url); }
30.208333
78
0.746207
f8deb3533bce873eb522e43bf1a00bd4417e9c9d
8,106
//! Storage crate provides the interfaces to interact with the database. //! The backend database used in this crate is `Postgres`, and interaction //! with it is based on the `diesel` crate. //! //! The essential structure of this crate is the `StorageProcessor`, which //! holds down the connection to the database and provides abstract interfaces //! to modify it (called `Schema`s). //! //! # Crate Architecture Overview //! //! This crate can be divided into three logical parts: //! - Connection utilities. Tools to establish connections to the database, //! stored in the `connection` module. //! - `Schema`s. Schema is a logically extracted access to the part of //! the database, e.g. `ethereum` (which contains methods to store the //! information about interaction with the Ethereum blockchain). //! - `StorageProcessor`. A structure that connects the two points above //! into one user-friendly interface. //! //! Most of schema modules contain at least two files: //! - `mod.rs`, which contains the schema itself. //! - `records.rs`, which contains the representation of the associated database //! tables as structures. //! //! The latter ones usually don't contain any logic other than the structures //! declarations, and all the logic is contained in either schema (for most //! modules), or in an additional helper module (e.g. in the `chain/block` module). //! //! # Schema Hierarchy //! //! There are the following sets of schemas: //! //! - config, for the server config. //! - data_restore, for the data_restore crate. //! - ethereum, for the data associated with the Ethereum blockchain. //! - prover, for the data on prover jobs, proofs, etc. //! - tokens, for storing and loading known tokens. //! - chain - the biggest one, which includes several schemas for the ZKSync sidechain itself. //! //! The chain module includes the following schemas: //! //! - account, for storing and loading account data. //! - block, the main one, which implements the logic of the block creation. //! - operations, the transactions storage. //! - operations_ext, a set of getters for the operations, more specific and convenient to use than operations has. //! - state, basically the sidechain state manager (which includes the applying of the state changes). //! - stats, other auxiliary schema which provides additional getters for the database stats. //! //! If you have to add a method, and can't decide which schema it belongs to, use the following logic: //! //! 1. Will your method be used by different modules? If no (e.g. it'll be only used by `eth_sender` or `data_restore`), //! then mind adding method to high-level schema (you may even create a new one, if it makes sense). //! If yes, probably it affects the sidechain state, and you should choose one of the `chain` schemas. //! 2. Will your method be used by other schemas? If yes, choose one of the "low-level" schemas, like `operations, //! or `account`. //! 3. Is your method is some form of convenient getter? If so, `operations_ext` may be suitable. //! 4. Otherwise, it probably should be in `block` (for high-level interaction), `state` (for ZKSync tables update that //! are not low-level enough for other modules), or a new schema (if none of existing ones fit your needs). //! //! # Testing Approach //! //! Tests for the storage use the actual empty Postgres database. //! Because of that, these tests are disabled by default, to run them you must use //! `zksync db-test` (or `zksync db-test-no-reset`, if this is not a first run) //! command, which will setup the database and enable the tests by passing a feature flag. //! //! Tests are implemented in a form of "test transactions", which are database transactions //! that will never be committed. Thus it is not required to clear the database after running //! tests. Also the database used for tests is different than the database used for `server`, //! thus one should not fear to overwrite any important data by running the tests. // `sqlx` macros result in these warning being triggered. #![allow(clippy::toplevel_ref_arg, clippy::suspicious_else_formatting)] // Built-in deps // use std::env; // External imports use sqlx::{pool::PoolConnection, postgres::Postgres, Connection, PgConnection, Transaction}; // Workspace imports // Local imports use crate::connection::holder::ConnectionHolder; // mod schema; #[cfg(test)] mod tests; pub mod chain; pub mod config; pub mod connection; pub mod data_restore; pub mod diff; pub mod ethereum; pub mod prover; pub mod tokens; pub mod utils; pub use crate::connection::ConnectionPool; pub type QueryResult<T> = Result<T, failure::Error>; /// Storage processor is the main storage interaction point. /// It holds down the connection (either direct or pooled) to the database /// and provide methods to obtain different storage schemas. #[derive(Debug)] pub struct StorageProcessor<'a> { conn: ConnectionHolder<'a>, in_transaction: bool, } impl<'a> StorageProcessor<'a> { /// Creates a `StorageProcessor` using an unique sole connection to the database. pub async fn establish_connection<'b>() -> QueryResult<StorageProcessor<'b>> { let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL must be set"); let connection = PgConnection::connect(&database_url).await?; Ok(StorageProcessor { conn: ConnectionHolder::Direct(connection), in_transaction: false, }) } pub async fn start_transaction<'c: 'b, 'b>( &'c mut self, ) -> Result<StorageProcessor<'b>, failure::Error> { let transaction = self.conn().begin().await?; let mut processor = StorageProcessor::from_transaction(transaction); processor.in_transaction = true; Ok(processor) } /// Checks if the `StorageProcessor` is currently within database transaction. pub fn in_transaction(&self) -> bool { self.in_transaction } pub fn from_transaction(conn: Transaction<'_, Postgres>) -> StorageProcessor<'_> { StorageProcessor { conn: ConnectionHolder::Transaction(conn), in_transaction: true, } } pub async fn commit(self) -> QueryResult<()> { if let ConnectionHolder::Transaction(transaction) = self.conn { transaction.commit().await?; Ok(()) } else { panic!("StorageProcessor::commit can only be invoked after calling StorageProcessor::begin_transaction"); } } /// Creates a `StorageProcessor` using a pool of connections. /// This method borrows one of the connections from the pool, and releases it /// after `drop`. pub fn from_pool(conn: PoolConnection<Postgres>) -> Self { Self { conn: ConnectionHolder::Pooled(conn), in_transaction: false, } } /// Gains access to the `Chain` schemas. pub fn chain(&mut self) -> chain::ChainIntermediator<'_, 'a> { chain::ChainIntermediator(self) } /// Gains access to the `Config` schema. pub fn config_schema(&mut self) -> config::ConfigSchema<'_, 'a> { config::ConfigSchema(self) } /// Gains access to the `DataRestore` schema. pub fn data_restore_schema(&mut self) -> data_restore::DataRestoreSchema<'_, 'a> { data_restore::DataRestoreSchema(self) } /// Gains access to the `Ethereum` schema. pub fn ethereum_schema(&mut self) -> ethereum::EthereumSchema<'_, 'a> { ethereum::EthereumSchema(self) } /// Gains access to the `Prover` schema. pub fn prover_schema(&mut self) -> prover::ProverSchema<'_, 'a> { prover::ProverSchema(self) } /// Gains access to the `Tokens` schema. pub fn tokens_schema(&mut self) -> tokens::TokensSchema<'_, 'a> { tokens::TokensSchema(self) } fn conn(&mut self) -> &mut PgConnection { match &mut self.conn { ConnectionHolder::Pooled(conn) => conn, ConnectionHolder::Direct(conn) => conn, ConnectionHolder::Transaction(conn) => conn, } } }
40.53
120
0.685788
0329db78b8f3936aaa4851c7d0962720042e440b
599
/* * Humor API * * Awesome Humor API. * * The version of the OpenAPI document: 1.0 * Contact: [email protected] * Generated by: https://openapi-generator.tech */ #[allow(unused_imports)] use serde_json::Value; #[derive(Debug, Serialize, Deserialize)] pub struct InlineResponse2009 { #[serde(rename = "joke")] pub joke: String, #[serde(rename = "tags")] pub tags: Vec<String>, } impl InlineResponse2009 { pub fn new(joke: String, tags: Vec<String>) -> InlineResponse2009 { InlineResponse2009 { joke: joke, tags: tags, } } }
19.322581
71
0.621035
649d20b5b360a429d465614ddfbca4832b5c7b05
3,354
#[doc = "Register `PRO_SPI_INTR_1_MAP` reader"] pub struct R(crate::R<PRO_SPI_INTR_1_MAP_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PRO_SPI_INTR_1_MAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PRO_SPI_INTR_1_MAP_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PRO_SPI_INTR_1_MAP_SPEC>) -> Self { R(reader) } } #[doc = "Register `PRO_SPI_INTR_1_MAP` writer"] pub struct W(crate::W<PRO_SPI_INTR_1_MAP_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PRO_SPI_INTR_1_MAP_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PRO_SPI_INTR_1_MAP_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PRO_SPI_INTR_1_MAP_SPEC>) -> Self { W(writer) } } #[doc = "Field `PRO_SPI_INTR_1_MAP` reader - "] pub struct PRO_SPI_INTR_1_MAP_R(crate::FieldReader<u8, u8>); impl PRO_SPI_INTR_1_MAP_R { #[inline(always)] pub(crate) fn new(bits: u8) -> Self { PRO_SPI_INTR_1_MAP_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PRO_SPI_INTR_1_MAP_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PRO_SPI_INTR_1_MAP` writer - "] pub struct PRO_SPI_INTR_1_MAP_W<'a> { w: &'a mut W, } impl<'a> PRO_SPI_INTR_1_MAP_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x1f) | (value as u32 & 0x1f); self.w } } impl R { #[doc = "Bits 0:4"] #[inline(always)] pub fn pro_spi_intr_1_map(&self) -> PRO_SPI_INTR_1_MAP_R { PRO_SPI_INTR_1_MAP_R::new((self.bits & 0x1f) as u8) } } impl W { #[doc = "Bits 0:4"] #[inline(always)] pub fn pro_spi_intr_1_map(&mut self) -> PRO_SPI_INTR_1_MAP_W { PRO_SPI_INTR_1_MAP_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pro_spi_intr_1_map](index.html) module"] pub struct PRO_SPI_INTR_1_MAP_SPEC; impl crate::RegisterSpec for PRO_SPI_INTR_1_MAP_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pro_spi_intr_1_map::R](R) reader structure"] impl crate::Readable for PRO_SPI_INTR_1_MAP_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pro_spi_intr_1_map::W](W) writer structure"] impl crate::Writable for PRO_SPI_INTR_1_MAP_SPEC { type Writer = W; } #[doc = "`reset()` method sets PRO_SPI_INTR_1_MAP to value 0x10"] impl crate::Resettable for PRO_SPI_INTR_1_MAP_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x10 } }
32.25
399
0.639237
e2e56a23f38d25d46cd844261fe62554de9e9452
457
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR MIT //! This module provides a mechanism which Kani can use to override standard codegen. //! For example, we the Kani provides pseudo-functions, such as kani::assume(). //! These functions should not be codegenned as MIR. //! Instead, we use a "hook" to generate the correct CBMC intrinsic. mod hooks; pub use hooks::{fn_hooks, GotocHooks};
38.083333
85
0.743982
6aa54a2c25c43476ae0e85e04155cae00d4d9670
2,737
use std::collections::HashMap; use chrono::prelude::*; use chrono::Duration; use chrono_english::{parse_date_string, Dialect}; use cli_table::{format::Justify, print_stdout, Cell, Style, Table}; use eyre::{eyre, Result}; use structopt::StructOpt; use atuin_client::database::Database; use atuin_client::history::History; use atuin_client::settings::Settings; #[derive(StructOpt)] pub enum Cmd { #[structopt( about="compute statistics for all of time", aliases=&["d", "da"], )] All, #[structopt( about="compute statistics for a single day", aliases=&["d", "da"], )] Day { words: Vec<String> }, } fn compute_stats(history: &[History]) -> Result<()> { let mut commands = HashMap::<String, i64>::new(); for i in history { *commands.entry(i.command.clone()).or_default() += 1; } let most_common_command = commands.iter().max_by(|a, b| a.1.cmp(b.1)); if most_common_command.is_none() { return Err(eyre!("No commands found")); } let table = vec![ vec![ "Most used command".cell(), most_common_command .unwrap() .0 .cell() .justify(Justify::Right), ], vec![ "Commands ran".cell(), history.len().to_string().cell().justify(Justify::Right), ], vec![ "Unique commands ran".cell(), commands.len().to_string().cell().justify(Justify::Right), ], ] .table() .title(vec![ "Statistic".cell().bold(true), "Value".cell().bold(true), ]) .bold(true); print_stdout(table)?; Ok(()) } impl Cmd { pub async fn run( &self, db: &mut (impl Database + Send + Sync), settings: &Settings, ) -> Result<()> { match self { Self::Day { words } => { let words = if words.is_empty() { String::from("yesterday") } else { words.join(" ") }; let start = match settings.dialect.to_lowercase().as_str() { "uk" => parse_date_string(&words, Local::now(), Dialect::Uk)?, _ => parse_date_string(&words, Local::now(), Dialect::Us)?, }; let end = start + Duration::days(1); let history = db.range(start.into(), end.into()).await?; compute_stats(&history)?; Ok(()) } Self::All => { let history = db.list(None, false).await?; compute_stats(&history)?; Ok(()) } } } }
24.881818
82
0.495798
9b5183a852d452b9cf52a01e0dfb0f5366f824ad
109
pub use coco_branch::CocoBranch; pub use coco_commit::CocoCommit; pub mod coco_branch; pub mod coco_commit;
18.166667
32
0.807339
1401aa6fcae6c4e1ab7d73ac6928bdbf378c9748
8,618
use crate::commands::PerItemCommand; use crate::data::command_dict; use crate::prelude::*; use nu_errors::ShellError; use nu_protocol::{ CallInfo, NamedType, PositionalType, Primitive, ReturnSuccess, Signature, SyntaxShape, TaggedDictBuilder, UntaggedValue, Value, }; use nu_source::SpannedItem; use nu_value_ext::get_data_by_key; pub struct Help; impl PerItemCommand for Help { fn name(&self) -> &str { "help" } fn signature(&self) -> Signature { Signature::build("help").rest(SyntaxShape::Any, "the name of command(s) to get help on") } fn usage(&self) -> &str { "Display help information about commands." } fn run( &self, call_info: &CallInfo, registry: &CommandRegistry, _raw_args: &RawCommandArgs, _input: Value, ) -> Result<OutputStream, ShellError> { let tag = &call_info.name_tag; match call_info.args.nth(0) { Some(Value { value: UntaggedValue::Primitive(Primitive::String(document)), tag, }) => { let mut help = VecDeque::new(); if document == "commands" { let mut sorted_names = registry.names(); sorted_names.sort(); for cmd in sorted_names { let mut short_desc = TaggedDictBuilder::new(tag.clone()); let value = command_dict( registry.get_command(&cmd).ok_or_else(|| { ShellError::labeled_error( format!("Could not load {}", cmd), "could not load command", tag, ) })?, tag.clone(), ); short_desc.insert_untagged("name", cmd); short_desc.insert_untagged( "description", get_data_by_key(&value, "usage".spanned_unknown()) .ok_or_else(|| { ShellError::labeled_error( "Expected a usage key", "expected a 'usage' key", &value.tag, ) })? .as_string()?, ); help.push_back(ReturnSuccess::value(short_desc.into_value())); } } else if let Some(command) = registry.get_command(document) { return Ok( get_help(&command.name(), &command.usage(), command.signature()).into(), ); } else { return Err(ShellError::labeled_error( "Can't find command (use 'help commands' for full list)", "can't find command", tag, )); } let help = futures::stream::iter(help); Ok(help.to_output_stream()) } _ => { let msg = r#"Welcome to Nushell. Here are some tips to help you get started. * help commands - list all available commands * help <command name> - display help about a particular command Nushell works on the idea of a "pipeline". Pipelines are commands connected with the '|' character. Each stage in the pipeline works together to load, parse, and display information to you. [Examples] List the files in the current directory, sorted by size: ls | sort-by size Get information about the current system: sys | get host Get the processes on your system actively using CPU: ps | where cpu > 0 You can also learn more at https://www.nushell.sh/book/"#; let output_stream = futures::stream::iter(vec![ReturnSuccess::value( UntaggedValue::string(msg).into_value(tag), )]); Ok(output_stream.to_output_stream()) } } } } pub(crate) fn get_help( cmd_name: &str, cmd_usage: &str, cmd_sig: Signature, ) -> impl Into<OutputStream> { let mut help = VecDeque::new(); let mut long_desc = String::new(); long_desc.push_str(&cmd_usage); long_desc.push_str("\n"); let signature = cmd_sig; let mut one_liner = String::new(); one_liner.push_str(&signature.name); one_liner.push_str(" "); for positional in &signature.positional { match &positional.0 { PositionalType::Mandatory(name, _m) => { one_liner.push_str(&format!("<{}> ", name)); } PositionalType::Optional(name, _o) => { one_liner.push_str(&format!("({}) ", name)); } } } if signature.rest_positional.is_some() { one_liner.push_str(" ...args"); } if !signature.named.is_empty() { one_liner.push_str("{flags} "); } long_desc.push_str(&format!("\nUsage:\n > {}\n", one_liner)); if !signature.positional.is_empty() || signature.rest_positional.is_some() { long_desc.push_str("\nparameters:\n"); for positional in signature.positional { match positional.0 { PositionalType::Mandatory(name, _m) => { long_desc.push_str(&format!(" <{}> {}\n", name, positional.1)); } PositionalType::Optional(name, _o) => { long_desc.push_str(&format!(" ({}) {}\n", name, positional.1)); } } } if let Some(rest_positional) = signature.rest_positional { long_desc.push_str(&format!(" ...args: {}\n", rest_positional.1)); } } if !signature.named.is_empty() { long_desc.push_str("\nflags:\n"); for (flag, ty) in signature.named { let msg = match ty.0 { NamedType::Switch(s) => { if let Some(c) = s { format!( " -{}, --{}{} {}\n", c, flag, if !ty.1.is_empty() { ":" } else { "" }, ty.1 ) } else { format!( " --{}{} {}\n", flag, if !ty.1.is_empty() { ":" } else { "" }, ty.1 ) } } NamedType::Mandatory(s, m) => { if let Some(c) = s { format!( " -{}, --{} <{}> (required parameter){} {}\n", c, flag, m.display(), if !ty.1.is_empty() { ":" } else { "" }, ty.1 ) } else { format!( " --{} <{}> (required parameter){} {}\n", flag, m.display(), if !ty.1.is_empty() { ":" } else { "" }, ty.1 ) } } NamedType::Optional(s, o) => { if let Some(c) = s { format!( " -{}, --{} <{}>{} {}\n", c, flag, o.display(), if !ty.1.is_empty() { ":" } else { "" }, ty.1 ) } else { format!( " --{} <{}>{} {}\n", flag, o.display(), if !ty.1.is_empty() { ":" } else { "" }, ty.1 ) } } }; long_desc.push_str(&msg); } } help.push_back(ReturnSuccess::value( UntaggedValue::string(long_desc).into_value(Tag::from((0, cmd_name.len(), None))), )); help }
34.610442
99
0.411348
f463cdbde999f59b0804ba737b0ee4f297bfea1b
1,425
/* * Copyright (C) 2015-2019 Benjamin Fry <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //! All record data structures and related serialization methods // TODO: these should each be it's own struct, it would make parsing and decoding a little cleaner // and also a little more ergonomic when accessing. // each of these module's has the parser for that rdata embedded, to keep the file sizes down... pub mod a; pub mod aaaa; pub mod caa; pub mod mx; pub mod name; pub mod naptr; pub mod null; pub mod openpgpkey; pub mod opt; pub mod soa; pub mod srv; pub mod sshfp; pub mod tlsa; pub mod txt; pub use self::caa::CAA; pub use self::mx::MX; pub use self::naptr::NAPTR; pub use self::null::NULL; pub use self::openpgpkey::OPENPGPKEY; pub use self::opt::OPT; pub use self::soa::SOA; pub use self::srv::SRV; pub use self::sshfp::SSHFP; pub use self::tlsa::TLSA; pub use self::txt::TXT;
29.6875
98
0.729825
8ad16384c3eb2614cc0e0354c844c0de9f96f86a
1,631
// Definition for a binary tree node. // #[derive(Debug, PartialEq, Eq)] // pub struct TreeNode { // pub val: i32, // pub left: Option<Rc<RefCell<TreeNode>>>, // pub right: Option<Rc<RefCell<TreeNode>>>, // } // // impl TreeNode { // #[inline] // pub fn new(val: i32) -> Self { // TreeNode { // val, // left: None, // right: None // } // } // } use std::cell::RefCell; use std::rc::Rc; // Rc<RefCell>> is such a pain. impl Solution { pub fn increasing_bst( mut root: Option<Rc<RefCell<TreeNode>>>, ) -> Option<Rc<RefCell<TreeNode>>> { let mut stack: Vec<Option<_>> = Vec::new(); let mut inorder: Vec<Option<_>> = Vec::new(); // inorder traversal. while stack.len() != 0 || root.is_some() { match root.is_some() { true => { stack.push(root.clone()); // root => root.right root = root.expect("Some").borrow().left.clone() } false => { root = stack.pop().expect("Some."); // Make left node = None inorder.push(root.clone()); root.clone().expect("Some").borrow_mut().left.take(); // Make root = root.right root = root.expect("Some").borrow().right.clone() } } } // reattach nodes. for i in (1..inorder.len()).rev() { inorder[i - 1].as_ref().expect("Some.").borrow_mut().right = inorder[i].clone(); } inorder[0].clone() } }
29.654545
92
0.467811
2233c5f553c923c498b7fb3cb307c3943e96fc51
3,326
//! Metric protocol, aggregation and processing for Sentry. //! //! Metrics are high-volume values sent from Sentry clients, integrations, or extracted from errors //! and transactions, that can be aggregated and queried over large time windows. As opposed to rich //! errors and transactions, metrics carry relatively little context information in tags with low //! cardinality. //! //! # Protocol //! //! Clients submit metrics in a [text-based protocol](Metric) based on StatsD. A sample submission //! looks like this: //! //! ```text //! endpoint.response_time@ms:57|d|#route:user_index //! endpoint.hits:1|c|#route:user_index //! ``` //! //! The metric type is part of its signature just like the unit. Therefore, it is allowed to reuse a //! metric name for multiple metric types, which will result in multiple metrics being recorded. //! //! # Metric Envelopes //! //! To send one or more metrics to Relay, the raw protocol is enclosed in an envelope item of type //! `metrics`: //! //! ```text //! {} //! {"type": "metrics", "timestamp": 1615889440, ...} //! endpoint.response_time@ms:57|d|#route:user_index //! ... //! ``` //! //! The timestamp in the item header is used to send backdated metrics. If it is omitted, //! the `received` time of the envelope is assumed. //! //! # Aggregation //! //! Relay accumulates all metrics in [time buckets](Bucket) before sending them onwards. Aggregation //! is handled by the [`Aggregator`], which should be created once for the entire system. It flushes //! aggregates in regular intervals, either shortly after their original time window has passed or //! with a debounce delay for backdated submissions. //! //! **Warning**: With chained Relays submission delays accumulate. //! //! Aggregate buckets are encoded in JSON with the following schema: //! //! ```json //! [ //! { //! "name": "endpoint.response_time", //! "unit": "ms", //! "value": [36, 49, 57, 68], //! "type": "d", //! "timestamp": 1615889440, //! "tags": { //! "route": "user_index" //! } //! }, //! { //! "name": "endpoint.hits", //! "value": 4, //! "type": "c", //! "timestamp": 1615889440, //! "tags": { //! "route": "user_index" //! } //! } //! ] //! ``` //! //! # Ingestion //! //! Processing Relays write aggregate buckets into the ingestion Kafka stream. The schema is similar //! to the aggregation payload, with the addition of scoping information: //! //! ```json //! [ //! { //! "org_id": 1, //! "project_id": 42, //! "name": "endpoint.response_time", //! "unit": "ms", //! "value": [36, 49, 57, 68], //! "type": "d", //! "timestamp": 1615889440, //! "tags": { //! "route": "user_index" //! } //! }, //! { //! "org_id": 1, //! "project_id": 42, //! "name": "endpoint.hits", //! "value": 4, //! "type": "c", //! "timestamp": 1615889440, //! "tags": { //! "route": "user_index" //! } //! } //! ] //! ``` #![warn(missing_docs)] #![doc( html_logo_url = "https://raw.githubusercontent.com/getsentry/relay/master/artwork/relay-icon.png", html_favicon_url = "https://raw.githubusercontent.com/getsentry/relay/master/artwork/relay-icon.png" )] mod aggregation; mod protocol; mod statsd; pub use aggregation::*; pub use protocol::*;
28.921739
104
0.611846
fe28ffce337fa6542f7a4fea97227754b0981aad
5,759
//! Matrix room identifiers. use std::{convert::TryInto, fmt, num::NonZeroU8}; use crate::{EventId, MatrixToRef, ServerName}; /// A Matrix room ID. /// /// A `RoomId` is generated randomly or converted from a string slice, and can be converted back /// into a string as needed. /// /// ``` /// # use std::convert::TryFrom; /// # use ruma_identifiers::RoomId; /// assert_eq!( /// RoomId::try_from("!n8f893n9:example.com").unwrap().as_ref(), /// "!n8f893n9:example.com" /// ); /// ``` #[derive(Clone)] pub struct RoomId { pub(crate) full_id: Box<str>, pub(crate) colon_idx: NonZeroU8, } impl fmt::Debug for RoomId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.full_id) } } impl RoomId { /// Attempts to generate a `RoomId` for the given origin server with a localpart consisting of /// 18 random ASCII characters. /// /// Fails if the given homeserver cannot be parsed as a valid host. #[cfg(feature = "rand")] pub fn new(server_name: &ServerName) -> Self { use crate::generate_localpart; let full_id = format!("!{}:{}", generate_localpart(18), server_name).into(); Self { full_id, colon_idx: NonZeroU8::new(19).unwrap() } } /// Returns the rooms's unique ID. pub fn localpart(&self) -> &str { &self.full_id[1..self.colon_idx.get() as usize] } /// Returns the server name of the room ID. pub fn server_name(&self) -> &ServerName { self.full_id[self.colon_idx.get() as usize + 1..].try_into().unwrap() } /// Create a `matrix.to` reference for this room ID. /// /// # Example /// /// ``` /// use ruma_identifiers::{room_id, server_name}; /// /// assert_eq!( /// room_id!("!somewhere:example.org") /// .matrix_to_url([&*server_name!("example.org"), &*server_name!("alt.example.org")]) /// .to_string(), /// "https://matrix.to/#/%21somewhere%3Aexample.org?via=example.org&via=alt.example.org" /// ); /// ``` pub fn matrix_to_url<'a>( &'a self, via: impl IntoIterator<Item = &'a ServerName>, ) -> MatrixToRef<'a> { MatrixToRef::new(&self.full_id, via.into_iter().collect()) } /// Create a `matrix.to` reference for an event scoped under this room ID. pub fn matrix_to_event_url<'a>(&'a self, ev_id: &'a EventId) -> MatrixToRef<'a> { MatrixToRef::event(&self.full_id, ev_id, Vec::new()) } } /// Attempts to create a new Matrix room ID from a string representation. /// /// The string must include the leading ! sigil, the localpart, a literal colon, and a server name. fn try_from<S>(room_id: S) -> Result<RoomId, crate::Error> where S: AsRef<str> + Into<Box<str>>, { let colon_idx = ruma_identifiers_validation::room_id::validate(room_id.as_ref())?; Ok(RoomId { full_id: room_id.into(), colon_idx }) } common_impls!(RoomId, try_from, "a Matrix room ID"); #[cfg(test)] mod tests { use std::convert::TryFrom; use super::RoomId; use crate::Error; #[test] fn valid_room_id() { assert_eq!( RoomId::try_from("!29fhd83h92h0:example.com") .expect("Failed to create RoomId.") .as_ref(), "!29fhd83h92h0:example.com" ); } #[test] fn empty_localpart() { assert_eq!( RoomId::try_from("!:example.com").expect("Failed to create RoomId.").as_ref(), "!:example.com" ); } #[cfg(feature = "rand")] #[test] fn generate_random_valid_room_id() { use crate::server_name; let room_id = RoomId::new(&server_name!("example.com")); let id_str = room_id.as_str(); assert!(id_str.starts_with('!')); assert_eq!(id_str.len(), 31); } #[cfg(feature = "serde")] #[test] fn serialize_valid_room_id() { assert_eq!( serde_json::to_string( &RoomId::try_from("!29fhd83h92h0:example.com").expect("Failed to create RoomId.") ) .expect("Failed to convert RoomId to JSON."), r#""!29fhd83h92h0:example.com""# ); } #[cfg(feature = "serde")] #[test] fn deserialize_valid_room_id() { assert_eq!( serde_json::from_str::<RoomId>(r#""!29fhd83h92h0:example.com""#) .expect("Failed to convert JSON to RoomId"), RoomId::try_from("!29fhd83h92h0:example.com").expect("Failed to create RoomId.") ); } #[test] fn valid_room_id_with_explicit_standard_port() { assert_eq!( RoomId::try_from("!29fhd83h92h0:example.com:443") .expect("Failed to create RoomId.") .as_ref(), "!29fhd83h92h0:example.com:443" ); } #[test] fn valid_room_id_with_non_standard_port() { assert_eq!( RoomId::try_from("!29fhd83h92h0:example.com:5000") .expect("Failed to create RoomId.") .as_ref(), "!29fhd83h92h0:example.com:5000" ); } #[test] fn missing_room_id_sigil() { assert_eq!(RoomId::try_from("carl:example.com").unwrap_err(), Error::MissingLeadingSigil); } #[test] fn missing_room_id_delimiter() { assert_eq!(RoomId::try_from("!29fhd83h92h0").unwrap_err(), Error::MissingDelimiter); } #[test] fn invalid_room_id_host() { assert_eq!(RoomId::try_from("!29fhd83h92h0:/").unwrap_err(), Error::InvalidServerName); } #[test] fn invalid_room_id_port() { assert_eq!( RoomId::try_from("!29fhd83h92h0:example.com:notaport").unwrap_err(), Error::InvalidServerName ); } }
29.085859
99
0.583261
72098e8a89a2b565b46d3f907a7c8593385cd5ea
14,618
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ pub type Chunk=i32; pub type DChunk=i64; pub const CHUNK:usize=32; pub const NOT_SPECIAL:usize =0; pub const PSEUDO_MERSENNE:usize=1; pub const MONTGOMERY_FRIENDLY:usize=2; pub const GENERALISED_MERSENNE:usize=3; pub const WEIERSTRASS:usize=0; pub const EDWARDS:usize=1; pub const MONTGOMERY:usize=2; pub const BN_CURVE: usize=0; pub const BLS_CURVE: usize=1; // Curve 25519 //pub const MODBITS: usize = 255; //pub const MOD8: usize = 5; //pub const BASEBITS: usize = 29; //pub const AES_S: usize=0; // GOLDILOCKS //pub const MODBITS usize=448; //pub const MOD8 usize=7; //pub const BASEBITS usize=29; //pub const AES_S: usize= 0; // BN254 Curve //pub const MODBITS:usize = 254; /* Number of bits in Modulus */ //pub const MOD8:usize = 3; /* Modulus mod 8 */ //pub const BASEBITS:usize = 29; //pub const AES_S:usize=0; // BLS383 Curve pub const MODBITS:usize = 383; /* Number of bits in Modulus */ pub const MOD8: usize = 3; /* Modulus mod 8 */ pub const BASEBITS:usize = 28; pub const AES_S: usize= 0; // BLS455 Curve //pub const MODBITS:usize = 455; /* Number of bits in Modulus */ //pub const MOD8: usize = 3; /* Modulus mod 8 */ //pub const BASEBITS:usize = 29; //pub const AES_S: usize= 128; //--------------- /* RSA/DH modulus length as multiple of BIGBITS */ pub const FFLEN:usize=4; pub const NLEN: usize = (1+((MODBITS-1)/BASEBITS)); pub const BIG_HEX_STRING_LEN:usize = NLEN * 16 + NLEN - 1; pub const DNLEN: usize = 2*NLEN; pub const BMASK: Chunk= ((1<<BASEBITS)-1); pub const MODBYTES: usize = 1+(MODBITS-1)/8; pub const NEXCESS:isize = (1<<((CHUNK)-BASEBITS-1)); pub const FEXCESS:Chunk = ((1 as Chunk)<<(BASEBITS*(NLEN)-MODBITS)); pub const OMASK:Chunk = (-1)<<(MODBITS%BASEBITS); pub const TBITS:usize=MODBITS%BASEBITS; // Number of active bits in top word pub const TMASK:Chunk=(1<<TBITS)-1; pub const BIGBITS:usize = (MODBYTES*8); pub const HBITS: usize=(BASEBITS/2); pub const HMASK: Chunk= ((1<<HBITS)-1); /* Finite field support - for RSA, DH etc. */ pub const FF_BITS:usize=(BIGBITS*FFLEN); /* Finite Field Size in bits - must be 256.2^n */ pub const HFLEN:usize=(FFLEN/2); /* Useful for half-size RSA private key operations */ pub const P_MBITS:usize=(MODBYTES as usize)*8; pub const P_MB: usize=(P_MBITS%BASEBITS); pub const P_OMASK:Chunk=((-1)<<(P_MBITS%BASEBITS)); pub const P_FEXCESS: Chunk=(1<<(BASEBITS*NLEN-P_MBITS)); pub const P_TBITS: usize=(P_MBITS%BASEBITS); // Curve25519 Modulus //pub const MODTYPE:usize=PSEUDO_MERSENNE; //pub const MODULUS:[Chunk;NLEN]=[0x1FFFFFED,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x7FFFFF]; //pub const MCONST:Chunk=19; //GOLDILOCKS //pub const MODTYPE: usize=GENERALISED_MERSENNE; //pub const MODULUS:[Chunk;NLEN]=[0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FDFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFF]; //pub const MCONST: Chunk=0x1; // BN254 Curve Modulus //pub const MODTYPE:usize = NOT_SPECIAL; //pub const MODULUS:[Chunk;NLEN] = [0x13,0x18000000,0x4E9,0x2000000,0x8612,0x6C00000,0x6E8D1,0x10480000,0x252364]; //pub const MCONST:Chunk=0x179435E5; // BLS383 Curve pub const MODTYPE:usize = NOT_SPECIAL; pub const MODULUS:[Chunk;NLEN] = [0xAAD556B,0xACAAB52,0x5F75D7A,0x1BB0147,0xD5D7520,0xCF73083,0xF99EB16,0x531820,0xA68EA32,0x2C01355,0x552A785,0x5C6105C,0x80A9F7,0x7AC52]; pub const MCONST:Chunk=0x123D0BD; // BLS455 Curve //pub const MODTYPE:usize = NOT_SPECIAL; //pub const MODULUS:[Chunk;NLEN] = [0x2AB,0x1500000C,0xAAA55AA,0xB12AAD6,0x6D1BA6C,0xCCA5674,0x12E2CF6E,0xA9F9662,0x34BD939,0x12D8EAB1,0xFD9978E,0x9240600,0xE0F95B,0xAAB9550,0x55555E5,0xAAAAB]; //pub const MCONST:Chunk=0x1F4017FD; // Ed25519 Curve //pub const CURVETYPE:usize=EDWARDS; //pub const CURVE_A:isize = -1; //pub const CURVE_B:[Chunk;NLEN]=[0x135978A3,0xF5A6E50,0x10762ADD,0x149A82,0x1E898007,0x3CBBBC,0x19CE331D,0x1DC56DFF,0x52036C]; //pub const CURVE_ORDER:[Chunk;NLEN]=[0x1CF5D3ED,0x9318D2,0x1DE73596,0x1DF3BD45,0x14D,0x0,0x0,0x0,0x100000]; //pub const CURVE_GX:[Chunk;NLEN]=[0xF25D51A,0xAB16B04,0x969ECB2,0x198EC12A,0xDC5C692,0x1118FEEB,0xFFB0293,0x1A79ADCA,0x216936]; //pub const CURVE_GY:[Chunk;NLEN]=[0x6666658,0x13333333,0x19999999,0xCCCCCCC,0x6666666,0x13333333,0x19999999,0xCCCCCCC,0x666666]; // GOLDILOCKS //pub const CURVETYPE: usize=EDWARDS; //pub const CURVE_A: isize=1; //pub const CURVE_ORDER:[Chunk;NLEN]=[0xB5844F3,0x1BC61495,0x1163D548,0x1984E51B,0x3690216,0xDA4D76B,0xFA7113B,0x1FEF9944,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x7FF]; //pub const CURVE_B:[Chunk;NLEN]=[0x1FFF6756,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FDFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFFFFFF,0x1FFF]; //pub const CURVE_GX:[Chunk;NLEN]=[0x15555555,0xAAAAAAA,0x15555555,0xAAAAAAA,0x15555555,0xAAAAAAA,0x15555555,0x152AAAAA,0xAAAAAAA,0x15555555,0xAAAAAAA,0x15555555,0xAAAAAAA,0x15555555,0xAAAAAAA,0x1555]; //pub const CURVE_GY::[Chunk;NLEN]=[0xA9386ED,0x1757DE6F,0x13681AF6,0x19657DA3,0x3098BBB,0x12C19D15,0x12E03595,0xE515B18,0x17B7E36D,0x1AC426E,0xDBB5E8,0x10D8560,0x159D6205,0xB8246D9,0x17A58D2B,0x15C0]; // BN254 Curve /* pub const CURVETYPE:usize = WEIERSTRASS; pub const CURVE_PAIRING_TYPE:usize = BN_CURVE; pub const CURVE_A:isize = 0; pub const CURVE_B:[Chunk;NLEN]=[0x2,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_ORDER:[Chunk;NLEN]=[0xD,0x8000000,0x428,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364]; pub const CURVE_GX:[Chunk;NLEN]=[0x12,0x18000000,0x4E9,0x2000000,0x8612,0x6C00000,0x6E8D1,0x10480000,0x252364]; pub const CURVE_GY:[Chunk;NLEN]=[0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_FRA:[Chunk;NLEN]=[0xF2A6DE9,0xBEF3603,0xFDDF0B8,0x12E9249A,0x953F850,0xDA85423,0x1232D926,0x32425CF,0x1B3776]; pub const CURVE_FRB:[Chunk;NLEN]=[0x10D5922A,0xC10C9FC,0x10221431,0xF16DB65,0x16AC8DC1,0x1917ABDC,0xDD40FAA,0xD23DA30,0x9EBEE]; pub const CURVE_PXA:[Chunk;NLEN]=[0x803FB2B,0xF721126,0x62FC364,0x9177691,0x1EDB6A46,0x63F4630,0x18BFAE36,0x176A33D6,0x61A10]; pub const CURVE_PXB:[Chunk;NLEN]=[0x7D54CF3,0xC61A60F,0xDE12DC3,0x1AE8D75C,0xAA5B1F4,0x13C62CC1,0xCCC42A,0x1F374E6F,0x516AA]; pub const CURVE_PYA:[Chunk;NLEN]=[0x11CD2B9A,0xF8703C4,0xF826F46,0x1A15CD7B,0x822329B,0x104B34C6,0xD0E6A43,0x140D75F2,0x21897]; pub const CURVE_PYB:[Chunk;NLEN]=[0xB3ACE9B,0x1168D763,0xE7255E2,0xDFFAE,0x18D37B09,0x22B7AF9,0x149A3DB5,0x1CF9162,0xEBB2B]; pub const CURVE_BNX:[Chunk;NLEN]=[0x1,0x4000000,0x10,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_COF:[Chunk;NLEN]=[0x1,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_CRU:[Chunk;NLEN]=[0x7,0xC000000,0x1B3,0x12000000,0x2490,0x11200000,0x126CD,0x0,0x0]; pub const CURVE_W:[[Chunk;NLEN];2]=[[0x3,0x0,0x81,0x3000000,0x618,0x0,0x0,0x0,0x0],[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0]]; pub const CURVE_SB:[[[Chunk;NLEN];2];2]=[[[0x4,0x8000000,0xA1,0x3000000,0x618,0x0,0x0,0x0,0x0],[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0],[0xA,0x8000000,0x3A7,0x1C000000,0x79E1,0x6C00000,0x6E8D1,0x10480000,0x252364]]]; pub const CURVE_WB:[[Chunk;NLEN];4]=[[0x0,0x4000000,0x10,0x1000000,0x208,0x0,0x0,0x0,0x0],[0x5,0x14000000,0x152,0xE000000,0x1C70,0xC00000,0xC489,0x0,0x0],[0x3,0xC000000,0xB1,0x7000000,0xE38,0x10600000,0x6244,0x0,0x0],[0x1,0xC000000,0x30,0x1000000,0x208,0x0,0x0,0x0,0x0]]; pub const CURVE_BB:[[[Chunk;NLEN];4];4]=[[[0xD,0x4000000,0x418,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364],[0xC,0x4000000,0x418,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364],[0xC,0x4000000,0x418,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364],[0x2,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0],[0xC,0x4000000,0x418,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364],[0xD,0x4000000,0x418,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364],[0xC,0x4000000,0x418,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364]],[[0x2,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0],[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0],[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0],[0x1,0x8000000,0x20,0x0,0x0,0x0,0x0,0x0,0x0]],[[0x2,0x4000000,0x10,0x0,0x0,0x0,0x0,0x0,0x0],[0x2,0x10000000,0x40,0x0,0x0,0x0,0x0,0x0,0x0],[0xA,0x0,0x408,0x1F000000,0x7FF9,0x6C00000,0x6E8D1,0x10480000,0x252364],[0x2,0x4000000,0x10,0x0,0x0,0x0,0x0,0x0,0x0]]]; pub const USE_GLV:bool = true; pub const USE_GS_G2:bool = true; pub const USE_GS_GT:bool = true; pub const GT_STRONG:bool = false; */ // BLS383 Curve pub const CURVETYPE:usize = WEIERSTRASS; pub const CURVE_PAIRING_TYPE:usize = BLS_CURVE; pub const CURVE_A:isize = 0; pub const CURVE_ORDER:[Chunk;NLEN]=[0xFFF001,0xFFF8000,0xFE7800,0xBFDE007,0xC5EDF1C,0x3000049,0x910007A,0xC40007F,0x641004C,0x14,0x0,0x0,0x0,0x0]; pub const CURVE_B:[Chunk;NLEN]=[0x9,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_COF:[Chunk;NLEN]=[0x52B,0x2A00,0xAAB2CA0,0x5560AAA,0x6055,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_GX:[Chunk;NLEN]=[0xD10786B,0xD59B348,0x3F54AD0,0x3477C0E,0x4578B9B,0xBF25B73,0x7BB6F65,0x4F6AC00,0xFF57E9C,0xEFD5830,0xFB6EC02,0xADB9F88,0xEE4BC98,0xB08C]; pub const CURVE_GY:[Chunk;NLEN]=[0xD145DDB,0x5DA023,0x5FEF7CC,0x13F518C,0x2B2A66F,0x56EC346,0xC7A925F,0x96F3019,0x981223E,0x9061047,0x8F5BE59,0x4810AD,0x9337671,0x1F390]; pub const CURVE_BNX:[Chunk;NLEN]=[0x40,0x100,0x110,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_CRU:[Chunk;NLEN]=[0xDA155A9,0xA3AAC4E,0x61E5E3D,0xDF2FE87,0xE632625,0xBCDFAAD,0xD3035A6,0x5123128,0xBEAD683,0xDBF3A2B,0x424190,0x5C5FAB2,0x80A9F7,0x7AC52]; pub const CURVE_FRA:[Chunk;NLEN]=[0x2B4508B,0x2BA59A9,0x6EEF343,0x63DB7A0,0x1DFBC74,0x40341CB,0x32D55D3,0x1639E9D,0x5CC36D4,0xB19B3F0,0xD86AB98,0xF323EE4,0xB198672,0x5A5F]; pub const CURVE_FRB:[Chunk;NLEN]=[0x7F904E0,0x81051A9,0xF086A37,0xB7D49A6,0xB7DB8AB,0x8F3EEB8,0xC6C9543,0xEEF7983,0x49CB35D,0x7A65F65,0x7CBFBEC,0x693D177,0x5672384,0x751F2]; pub const CURVE_PXA:[Chunk;NLEN]=[0xBAC9472,0x6059885,0xE2DC36D,0x7C4D31D,0x8C88A7,0xBDC90C3,0x1C688FC,0x29F0197,0xC43F167,0x3693539,0x61EB8BF,0xD81E5A5,0x22B56BF,0x4D507]; pub const CURVE_PXB:[Chunk;NLEN]=[0x272AB23,0x9B4BD7A,0xF44DCE8,0x7AF19D4,0x3206A34,0x3F6F7B9,0x2A819FB,0x571DD3E,0x635D7EE,0x3A2BA3B,0xC1A126,0xAC28C78,0x17C3E5B,0xEE36]; pub const CURVE_PYA:[Chunk;NLEN]=[0x77BD4FD,0x81D2309,0xDFDFC6,0xB66072,0xC89A0C,0x41FC959,0x878287A,0x2E1FBCF,0x14EEE65,0x11C230,0x6BB325E,0x2887881,0x859A05C,0x8F40]; pub const CURVE_PYB:[Chunk;NLEN]=[0x52C4CE6,0xA5E20A2,0xAFF40C8,0x5907A74,0x2448EF3,0x41760A4,0xFDA199,0xFFEF82B,0x8D4EA49,0xA0F29A1,0x6E4997B,0xAC7F7B8,0xBA88C12,0x1DCAB]; pub const CURVE_W:[[Chunk;0];2]=[[],[]]; pub const CURVE_SB:[[[Chunk;0];2];2]=[[[],[]],[[],[]]]; pub const CURVE_WB:[[Chunk;0];4]=[[],[],[],[]]; pub const CURVE_BB:[[[Chunk;0];4];4]=[[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]; pub const USE_GLV:bool = true; pub const USE_GS_G2:bool = true; pub const USE_GS_GT:bool = true; pub const GT_STRONG:bool = false; // BLS455 Curve /* pub const CURVETYPE:usize = WEIERSTRASS; pub const CURVE_PAIRING_TYPE:usize = BLS_CURVE; pub const CURVE_A:isize = 0; pub const CURVE_ORDER:[Chunk;NLEN]=[0x1FC00001,0x3FFF,0x10000070,0x1400000,0x1D100,0xF0003A8,0x13C0009,0x1E200,0x180002E0,0x400001,0x4000,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_B:[Chunk;NLEN]=[0xA,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_COF:[Chunk;NLEN]=[0xABFFAAB,0x14AABFFD,0xD52AADA,0x1562AAAB,0x15556AAA,0x2A,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_GX:[Chunk;NLEN]=[0x1DFCEDD1,0x16A62EEF,0xE80D3ED,0xB8DE876,0x179C7253,0x182DAB52,0x46CC85F,0x1E571D9C,0x1E8392B1,0x1A155867,0x19E6527C,0x23DC64E,0xABEDA59,0xF20662B,0x17CECC01,0x310A]; pub const CURVE_GY:[Chunk;NLEN]=[0x6619B9B,0x27EBCD,0x1BE80A19,0x13B014BA,0x191A4936,0x13911916,0x107A5A3B,0x1DCB0863,0x1F5FB1,0xDE44A9C,0x18E23B2A,0x1FA81FD3,0xB0D6DFA,0xC2FE1EF,0xDDFA7E2,0x3351B]; pub const CURVE_BNX:[Chunk;NLEN]=[0x800,0x1000004,0x40000,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]; pub const CURVE_CRU:[Chunk;NLEN]=[0xAA9,0x9017FE0,0x128DD8AA,0x1F4A321E,0x148FEC90,0x195CDEEA,0xE961AA2,0x168D3B66,0xAFD99D7,0xB989AA6,0x1F9A5724,0x91A05FF,0xE0EF5B,0xAAB9548,0x55555E5,0xAAAAB]; pub const CURVE_FRA:[Chunk;NLEN]=[0xA2EBF21,0x4E67DEE,0xCDE6590,0xE5EA94E,0x15454985,0x140CFC43,0x7E14D1C,0x1778395A,0x1944F022,0x166BEA1A,0xC3DEEA0,0x9F031A0,0x16A7459,0x1F1D6E00,0x125CFB72,0x72FD4]; pub const CURVE_FRB:[Chunk;NLEN]=[0x15D1438A,0x1019821D,0x1DCBF01A,0x1CB40187,0x118C70E6,0x18BD5A30,0xB018251,0x13275D08,0xA06E916,0x1C6D0096,0x39BA8ED,0x1F33D460,0x1F768501,0xB8E274F,0x12F85A72,0x37AD6]; pub const CURVE_PXA:[Chunk;NLEN]=[0x10C1F542,0x23AF907,0x159F840,0xBAC0E1F,0x133D7766,0x1C034C5D,0x14C5C875,0x1ED0BDA2,0x16A49C71,0x1E9FF62D,0x14F3ACC0,0x1E0C9FEA,0xC4638DE,0x74D18DA,0xBEA0030,0x5D962]; pub const CURVE_PXB:[Chunk;NLEN]=[0x749F03D,0xC843773,0xB17BCBA,0x1134AB4B,0x8EA016E,0x1A0D766C,0x58F819E,0x48A1FCC,0xE296F62,0x83370E4,0xC9BA1D5,0x1E43290E,0xEE59A80,0x1FDD85F5,0x1F3819B6,0x1D9F]; pub const CURVE_PYA:[Chunk;NLEN]=[0x181A77F4,0x191AD22,0x1E9F842A,0x1E1E6CF6,0xD55B9D9,0x1D062533,0x15BB1323,0x7ECBC1,0x1A9EC2EF,0x1EE14CE0,0x1E96B271,0xA794439,0x1C544324,0xE6AD5DC,0x16429B0F,0x448E3]; pub const CURVE_PYB:[Chunk;NLEN]=[0x1E1FDBA2,0x1A09DB6C,0xF680D5B,0xFCC6122,0xC488E2A,0x1E489ECD,0x1005617E,0x1CF9EC36,0x1C89ED72,0x16C00D90,0x1563E595,0x1243DDC0,0x8698F9E,0x1BD81E7E,0xF2A0F4A,0x66A0]; pub const CURVE_W:[[Chunk;0];2]=[[],[]]; pub const CURVE_SB:[[[Chunk;0];2];2]=[[[],[]],[[],[]]]; pub const CURVE_WB:[[Chunk;0];4]=[[],[],[],[]]; pub const CURVE_BB:[[[Chunk;0];4];4]=[[[],[],[],[]],[[],[],[],[]],[[],[],[],[]],[[],[],[],[]]]; pub const USE_GLV:bool = true; pub const USE_GS_G2:bool = true; pub const USE_GS_GT:bool = true; pub const GT_STRONG:bool = false; */
62.470085
996
0.785812
332b4186f5d1be7790d145437bb6a7d860ce3e22
32,572
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServiceResource { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<ClusterResourceProperties>, #[serde(skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrackedResource { #[serde(flatten)] pub resource: Resource, #[serde(skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(skip_serializing)] pub id: Option<String>, #[serde(skip_serializing)] pub name: Option<String>, #[serde(rename = "type", skip_serializing)] pub type_: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClusterResourceProperties { #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<cluster_resource_properties::ProvisioningState>, #[serde(rename = "configServerProperties", skip_serializing_if = "Option::is_none")] pub config_server_properties: Option<ConfigServerProperties>, #[serde(skip_serializing_if = "Option::is_none")] pub trace: Option<TraceProperties>, #[serde(rename = "networkProfile", skip_serializing_if = "Option::is_none")] pub network_profile: Option<NetworkProfile>, #[serde(skip_serializing)] pub version: Option<i32>, #[serde(rename = "serviceId", skip_serializing)] pub service_id: Option<String>, } pub mod cluster_resource_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Deleting, Deleted, Succeeded, Failed, Moving, Moved, MoveFailed, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ManagedIdentityProperties { #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option<managed_identity_properties::Type>, #[serde(rename = "principalId", skip_serializing_if = "Option::is_none")] pub principal_id: Option<String>, #[serde(rename = "tenantId", skip_serializing_if = "Option::is_none")] pub tenant_id: Option<String>, } pub mod managed_identity_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { None, SystemAssigned, UserAssigned, #[serde(rename = "SystemAssigned,UserAssigned")] SystemAssignedUserAssigned, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Sku { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub tier: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub capacity: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ConfigServerProperties { #[serde(skip_serializing)] pub state: Option<config_server_properties::State>, #[serde(skip_serializing_if = "Option::is_none")] pub error: Option<Error>, #[serde(rename = "configServer", skip_serializing_if = "Option::is_none")] pub config_server: Option<ConfigServerSettings>, } pub mod config_server_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { NotAvailable, Deleted, Failed, Succeeded, Updating, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TraceProperties { #[serde(skip_serializing)] pub state: Option<trace_properties::State>, #[serde(skip_serializing_if = "Option::is_none")] pub error: Option<Error>, #[serde(skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, #[serde(rename = "appInsightInstrumentationKey", skip_serializing_if = "Option::is_none")] pub app_insight_instrumentation_key: Option<String>, } pub mod trace_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum State { NotAvailable, Failed, Succeeded, Updating, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NetworkProfile { #[serde(rename = "serviceRuntimeSubnetId", skip_serializing_if = "Option::is_none")] pub service_runtime_subnet_id: Option<String>, #[serde(rename = "appSubnetId", skip_serializing_if = "Option::is_none")] pub app_subnet_id: Option<String>, #[serde(rename = "serviceCidr", skip_serializing_if = "Option::is_none")] pub service_cidr: Option<String>, #[serde(rename = "serviceRuntimeNetworkResourceGroup", skip_serializing_if = "Option::is_none")] pub service_runtime_network_resource_group: Option<String>, #[serde(rename = "appNetworkResourceGroup", skip_serializing_if = "Option::is_none")] pub app_network_resource_group: Option<String>, #[serde(rename = "outboundIPs", skip_serializing)] pub outbound_i_ps: Option<network_profile::OutboundIPs>, #[serde(rename = "requiredTraffics", skip_serializing)] pub required_traffics: Vec<RequiredTraffic>, } pub mod network_profile { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OutboundIPs { #[serde(rename = "publicIPs", skip_serializing)] pub public_i_ps: Vec<String>, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RequiredTraffic { #[serde(skip_serializing)] pub protocol: Option<String>, #[serde(skip_serializing)] pub port: Option<i32>, #[serde(skip_serializing)] pub ips: Vec<String>, #[serde(skip_serializing)] pub fqdns: Vec<String>, #[serde(skip_serializing)] pub direction: Option<required_traffic::Direction>, } pub mod required_traffic { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Direction { Inbound, Outbound, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Error { #[serde(skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ConfigServerSettings { #[serde(rename = "gitProperty", skip_serializing_if = "Option::is_none")] pub git_property: Option<ConfigServerGitProperty>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ConfigServerGitProperty { #[serde(skip_serializing_if = "Vec::is_empty")] pub repositories: Vec<GitPatternRepository>, pub uri: String, #[serde(skip_serializing_if = "Option::is_none")] pub label: Option<String>, #[serde(rename = "searchPaths", skip_serializing_if = "Vec::is_empty")] pub search_paths: Vec<String>, #[serde(skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub password: Option<String>, #[serde(rename = "hostKey", skip_serializing_if = "Option::is_none")] pub host_key: Option<String>, #[serde(rename = "hostKeyAlgorithm", skip_serializing_if = "Option::is_none")] pub host_key_algorithm: Option<String>, #[serde(rename = "privateKey", skip_serializing_if = "Option::is_none")] pub private_key: Option<String>, #[serde(rename = "strictHostKeyChecking", skip_serializing_if = "Option::is_none")] pub strict_host_key_checking: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GitPatternRepository { pub name: String, #[serde(skip_serializing_if = "Vec::is_empty")] pub pattern: Vec<String>, pub uri: String, #[serde(skip_serializing_if = "Option::is_none")] pub label: Option<String>, #[serde(rename = "searchPaths", skip_serializing_if = "Vec::is_empty")] pub search_paths: Vec<String>, #[serde(skip_serializing_if = "Option::is_none")] pub username: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub password: Option<String>, #[serde(rename = "hostKey", skip_serializing_if = "Option::is_none")] pub host_key: Option<String>, #[serde(rename = "hostKeyAlgorithm", skip_serializing_if = "Option::is_none")] pub host_key_algorithm: Option<String>, #[serde(rename = "privateKey", skip_serializing_if = "Option::is_none")] pub private_key: Option<String>, #[serde(rename = "strictHostKeyChecking", skip_serializing_if = "Option::is_none")] pub strict_host_key_checking: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TestKeys { #[serde(rename = "primaryKey", skip_serializing_if = "Option::is_none")] pub primary_key: Option<String>, #[serde(rename = "secondaryKey", skip_serializing_if = "Option::is_none")] pub secondary_key: Option<String>, #[serde(rename = "primaryTestEndpoint", skip_serializing_if = "Option::is_none")] pub primary_test_endpoint: Option<String>, #[serde(rename = "secondaryTestEndpoint", skip_serializing_if = "Option::is_none")] pub secondary_test_endpoint: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub enabled: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RegenerateTestKeyRequestPayload { #[serde(rename = "keyType")] pub key_type: regenerate_test_key_request_payload::KeyType, } pub mod regenerate_test_key_request_payload { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum KeyType { Primary, Secondary, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AppResource { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<AppResourceProperties>, #[serde(skip_serializing_if = "Option::is_none")] pub identity: Option<ManagedIdentityProperties>, #[serde(skip_serializing_if = "Option::is_none")] pub location: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProxyResource { #[serde(flatten)] pub resource: Resource, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AppResourceProperties { #[serde(skip_serializing_if = "Option::is_none")] pub public: Option<bool>, #[serde(skip_serializing)] pub url: Option<String>, #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<app_resource_properties::ProvisioningState>, #[serde(rename = "activeDeploymentName", skip_serializing_if = "Option::is_none")] pub active_deployment_name: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub fqdn: Option<String>, #[serde(rename = "httpsOnly", skip_serializing_if = "Option::is_none")] pub https_only: Option<bool>, #[serde(rename = "createdTime", skip_serializing)] pub created_time: Option<String>, #[serde(rename = "temporaryDisk", skip_serializing_if = "Option::is_none")] pub temporary_disk: Option<TemporaryDisk>, #[serde(rename = "persistentDisk", skip_serializing_if = "Option::is_none")] pub persistent_disk: Option<PersistentDisk>, } pub mod app_resource_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Succeeded, Failed, Creating, Updating, Deleting, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TemporaryDisk { #[serde(rename = "sizeInGB", skip_serializing_if = "Option::is_none")] pub size_in_gb: Option<i32>, #[serde(rename = "mountPath", skip_serializing_if = "Option::is_none")] pub mount_path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PersistentDisk { #[serde(rename = "sizeInGB", skip_serializing_if = "Option::is_none")] pub size_in_gb: Option<i32>, #[serde(rename = "usedInGB", skip_serializing)] pub used_in_gb: Option<i32>, #[serde(rename = "mountPath", skip_serializing_if = "Option::is_none")] pub mount_path: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AppResourceCollection { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<AppResource>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceUploadDefinition { #[serde(rename = "relativePath", skip_serializing_if = "Option::is_none")] pub relative_path: Option<String>, #[serde(rename = "uploadUrl", skip_serializing_if = "Option::is_none")] pub upload_url: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BindingResource { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<BindingResourceProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BindingResourceProperties { #[serde(rename = "resourceName", skip_serializing)] pub resource_name: Option<String>, #[serde(rename = "resourceType", skip_serializing)] pub resource_type: Option<String>, #[serde(rename = "resourceId", skip_serializing_if = "Option::is_none")] pub resource_id: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub key: Option<String>, #[serde(rename = "bindingParameters", skip_serializing_if = "Option::is_none")] pub binding_parameters: Option<serde_json::Value>, #[serde(rename = "generatedProperties", skip_serializing)] pub generated_properties: Option<String>, #[serde(rename = "createdAt", skip_serializing)] pub created_at: Option<String>, #[serde(rename = "updatedAt", skip_serializing)] pub updated_at: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BindingResourceCollection { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<BindingResource>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateResource { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<CertificateProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateProperties { #[serde(skip_serializing)] pub thumbprint: Option<String>, #[serde(rename = "vaultUri")] pub vault_uri: String, #[serde(rename = "keyVaultCertName")] pub key_vault_cert_name: String, #[serde(rename = "certVersion", skip_serializing_if = "Option::is_none")] pub cert_version: Option<String>, #[serde(skip_serializing)] pub issuer: Option<String>, #[serde(rename = "issuedDate", skip_serializing)] pub issued_date: Option<String>, #[serde(rename = "expirationDate", skip_serializing)] pub expiration_date: Option<String>, #[serde(rename = "activateDate", skip_serializing)] pub activate_date: Option<String>, #[serde(rename = "subjectName", skip_serializing)] pub subject_name: Option<String>, #[serde(rename = "dnsNames", skip_serializing)] pub dns_names: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CertificateResourceCollection { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<CertificateResource>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NameAvailabilityParameters { #[serde(rename = "type")] pub type_: String, pub name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct NameAvailability { #[serde(rename = "nameAvailable", skip_serializing_if = "Option::is_none")] pub name_available: Option<bool>, #[serde(skip_serializing_if = "Option::is_none")] pub reason: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainResource { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<CustomDomainProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainProperties { #[serde(skip_serializing_if = "Option::is_none")] pub thumbprint: Option<String>, #[serde(rename = "appName", skip_serializing)] pub app_name: Option<String>, #[serde(rename = "certName", skip_serializing_if = "Option::is_none")] pub cert_name: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainResourceCollection { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<CustomDomainResource>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainValidatePayload { pub name: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainValidateResult { #[serde(rename = "isValid", skip_serializing_if = "Option::is_none")] pub is_valid: Option<bool>, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentResource { #[serde(flatten)] pub proxy_resource: ProxyResource, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<DeploymentResourceProperties>, #[serde(skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentResourceProperties { #[serde(skip_serializing_if = "Option::is_none")] pub source: Option<UserSourceInfo>, #[serde(rename = "appName", skip_serializing)] pub app_name: Option<String>, #[serde(rename = "deploymentSettings", skip_serializing_if = "Option::is_none")] pub deployment_settings: Option<DeploymentSettings>, #[serde(rename = "provisioningState", skip_serializing)] pub provisioning_state: Option<deployment_resource_properties::ProvisioningState>, #[serde(skip_serializing)] pub status: Option<deployment_resource_properties::Status>, #[serde(skip_serializing)] pub active: Option<bool>, #[serde(rename = "createdTime", skip_serializing)] pub created_time: Option<String>, #[serde(skip_serializing)] pub instances: Vec<DeploymentInstance>, } pub mod deployment_resource_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Updating, Succeeded, Failed, Deleting, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Unknown, Stopped, Running, Failed, Allocating, Upgrading, Compiling, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct UserSourceInfo { #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option<user_source_info::Type>, #[serde(rename = "relativePath", skip_serializing_if = "Option::is_none")] pub relative_path: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub version: Option<String>, #[serde(rename = "artifactSelector", skip_serializing_if = "Option::is_none")] pub artifact_selector: Option<String>, } pub mod user_source_info { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { Jar, NetCoreZip, Source, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentSettings { #[serde(skip_serializing_if = "Option::is_none")] pub cpu: Option<i32>, #[serde(rename = "memoryInGB", skip_serializing_if = "Option::is_none")] pub memory_in_gb: Option<i32>, #[serde(rename = "jvmOptions", skip_serializing_if = "Option::is_none")] pub jvm_options: Option<String>, #[serde(rename = "netCoreMainEntryPath", skip_serializing_if = "Option::is_none")] pub net_core_main_entry_path: Option<String>, #[serde(rename = "instanceCount", skip_serializing_if = "Option::is_none")] pub instance_count: Option<i32>, #[serde(rename = "environmentVariables", skip_serializing_if = "Option::is_none")] pub environment_variables: Option<serde_json::Value>, #[serde(rename = "runtimeVersion", skip_serializing_if = "Option::is_none")] pub runtime_version: Option<deployment_settings::RuntimeVersion>, } pub mod deployment_settings { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RuntimeVersion { #[serde(rename = "Java_8")] Java8, #[serde(rename = "Java_11")] Java11, #[serde(rename = "NetCore_31")] NetCore31, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentInstance { #[serde(skip_serializing)] pub name: Option<String>, #[serde(skip_serializing)] pub status: Option<String>, #[serde(skip_serializing)] pub reason: Option<String>, #[serde(rename = "discoveryStatus", skip_serializing)] pub discovery_status: Option<String>, #[serde(rename = "startTime", skip_serializing)] pub start_time: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeploymentResourceCollection { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<DeploymentResource>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LogFileUrlResponse { pub url: String, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServiceResourceList { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<ServiceResource>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AvailableOperations { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<OperationDetail>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationDetail { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "isDataAction", skip_serializing_if = "Option::is_none")] pub is_data_action: Option<bool>, #[serde(skip_serializing_if = "Option::is_none")] pub display: Option<OperationDisplay>, #[serde(skip_serializing_if = "Option::is_none")] pub origin: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub properties: Option<OperationProperties>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationDisplay { #[serde(skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub description: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationProperties { #[serde(rename = "serviceSpecification", skip_serializing_if = "Option::is_none")] pub service_specification: Option<ServiceSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ServiceSpecification { #[serde(rename = "logSpecifications", skip_serializing_if = "Vec::is_empty")] pub log_specifications: Vec<LogSpecification>, #[serde(rename = "metricSpecifications", skip_serializing_if = "Vec::is_empty")] pub metric_specifications: Vec<MetricSpecification>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LogSpecification { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "displayName", skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "blobDuration", skip_serializing_if = "Option::is_none")] pub blob_duration: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MetricSpecification { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "displayName", skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "displayDescription", skip_serializing_if = "Option::is_none")] pub display_description: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub unit: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub category: Option<String>, #[serde(rename = "aggregationType", skip_serializing_if = "Option::is_none")] pub aggregation_type: Option<String>, #[serde(rename = "supportedAggregationTypes", skip_serializing_if = "Vec::is_empty")] pub supported_aggregation_types: Vec<String>, #[serde(rename = "supportedTimeGrainTypes", skip_serializing_if = "Vec::is_empty")] pub supported_time_grain_types: Vec<String>, #[serde(rename = "fillGapWithZero", skip_serializing_if = "Option::is_none")] pub fill_gap_with_zero: Option<bool>, #[serde(skip_serializing_if = "Vec::is_empty")] pub dimensions: Vec<MetricDimension>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MetricDimension { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "displayName", skip_serializing_if = "Option::is_none")] pub display_name: Option<String>, #[serde(rename = "toBeExportedForShoebox", skip_serializing_if = "Option::is_none")] pub to_be_exported_for_shoebox: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuCollection { #[serde(skip_serializing_if = "Vec::is_empty")] pub value: Vec<ResourceSku>, #[serde(rename = "nextLink", skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSku { #[serde(rename = "resourceType", skip_serializing_if = "Option::is_none")] pub resource_type: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub tier: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub capacity: Option<SkuCapacity>, #[serde(skip_serializing_if = "Vec::is_empty")] pub locations: Vec<String>, #[serde(rename = "locationInfo", skip_serializing_if = "Vec::is_empty")] pub location_info: Vec<ResourceSkuLocationInfo>, #[serde(skip_serializing_if = "Vec::is_empty")] pub restrictions: Vec<ResourceSkuRestrictions>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SkuCapacity { pub minimum: i32, #[serde(skip_serializing_if = "Option::is_none")] pub maximum: Option<i32>, #[serde(skip_serializing_if = "Option::is_none")] pub default: Option<i32>, #[serde(rename = "scaleType", skip_serializing_if = "Option::is_none")] pub scale_type: Option<sku_capacity::ScaleType>, } pub mod sku_capacity { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScaleType { None, Manual, Automatic, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuLocationInfo { #[serde(skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub zones: Vec<String>, #[serde(rename = "zoneDetails", skip_serializing_if = "Vec::is_empty")] pub zone_details: Vec<ResourceSkuZoneDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuRestrictions { #[serde(rename = "type", skip_serializing_if = "Option::is_none")] pub type_: Option<resource_sku_restrictions::Type>, #[serde(skip_serializing_if = "Vec::is_empty")] pub values: Vec<String>, #[serde(rename = "restrictionInfo", skip_serializing_if = "Option::is_none")] pub restriction_info: Option<ResourceSkuRestrictionInfo>, #[serde(rename = "reasonCode", skip_serializing_if = "Option::is_none")] pub reason_code: Option<resource_sku_restrictions::ReasonCode>, } pub mod resource_sku_restrictions { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Type { Location, Zone, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ReasonCode { QuotaId, NotAvailableForSubscription, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuZoneDetails { #[serde(skip_serializing_if = "Vec::is_empty")] pub name: Vec<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub capabilities: Vec<ResourceSkuCapabilities>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuRestrictionInfo { #[serde(skip_serializing_if = "Vec::is_empty")] pub locations: Vec<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub zones: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceSkuCapabilities { #[serde(skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub value: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudError { #[serde(skip_serializing_if = "Option::is_none")] pub error: Option<CloudErrorBody>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CloudErrorBody { #[serde(skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub message: Option<String>, #[serde(skip_serializing_if = "Option::is_none")] pub target: Option<String>, #[serde(skip_serializing_if = "Vec::is_empty")] pub details: Vec<CloudErrorBody>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AvailableRuntimeVersions { #[serde(skip_serializing)] pub value: Vec<SupportedRuntimeVersion>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SupportedRuntimeVersion { #[serde(skip_serializing_if = "Option::is_none")] pub value: Option<supported_runtime_version::Value>, #[serde(skip_serializing_if = "Option::is_none")] pub platform: Option<supported_runtime_version::Platform>, #[serde(skip_serializing_if = "Option::is_none")] pub version: Option<String>, } pub mod supported_runtime_version { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Value { #[serde(rename = "Java_8")] Java8, #[serde(rename = "Java_11")] Java11, #[serde(rename = "NetCore_31")] NetCore31, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Platform { Java, #[serde(rename = ".NET Core")] NetCore, } }
39.916667
100
0.702106
39a8cd405de33a81f765fcdd1bc295f4d242eed9
12,267
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sorted_map::SortedMap; use rustc_hir as hir; use rustc_hir::def_id::LocalDefId; use rustc_hir::definitions; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; use rustc_hir::*; use rustc_index::vec::{Idx, IndexVec}; use rustc_session::Session; use rustc_span::source_map::SourceMap; use rustc_span::{Span, DUMMY_SP}; use tracing::debug; /// A visitor that walks over the HIR and collects `Node`s into a HIR map. pub(super) struct NodeCollector<'a, 'hir> { /// Source map source_map: &'a SourceMap, bodies: &'a SortedMap<ItemLocalId, &'hir Body<'hir>>, /// Outputs nodes: IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, parenting: FxHashMap<LocalDefId, ItemLocalId>, /// The parent of this node parent_node: hir::ItemLocalId, owner: LocalDefId, definitions: &'a definitions::Definitions, } pub(super) fn index_hir<'hir>( sess: &Session, definitions: &definitions::Definitions, item: hir::OwnerNode<'hir>, bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>, ) -> (IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, FxHashMap<LocalDefId, ItemLocalId>) { let mut nodes = IndexVec::new(); // This node's parent should never be accessed: the owner's parent is computed by the // hir_owner_parent query. Make it invalid (= ItemLocalId::MAX) to force an ICE whenever it is // used. nodes.push(Some(ParentedNode { parent: ItemLocalId::INVALID, node: item.into() })); let mut collector = NodeCollector { source_map: sess.source_map(), definitions, owner: item.def_id(), parent_node: ItemLocalId::new(0), nodes, bodies, parenting: FxHashMap::default(), }; match item { OwnerNode::Crate(citem) => collector.visit_mod(&citem, citem.inner, hir::CRATE_HIR_ID), OwnerNode::Item(item) => collector.visit_item(item), OwnerNode::TraitItem(item) => collector.visit_trait_item(item), OwnerNode::ImplItem(item) => collector.visit_impl_item(item), OwnerNode::ForeignItem(item) => collector.visit_foreign_item(item), }; (collector.nodes, collector.parenting) } impl<'a, 'hir> NodeCollector<'a, 'hir> { fn insert(&mut self, span: Span, hir_id: HirId, node: Node<'hir>) { debug_assert_eq!(self.owner, hir_id.owner); debug_assert_ne!(hir_id.local_id.as_u32(), 0); // Make sure that the DepNode of some node coincides with the HirId // owner of that node. if cfg!(debug_assertions) { if hir_id.owner != self.owner { panic!( "inconsistent DepNode at `{:?}` for `{:?}`: \ current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})", self.source_map.span_to_diagnostic_string(span), node, self.definitions.def_path(self.owner).to_string_no_crate_verbose(), self.owner, self.definitions.def_path(hir_id.owner).to_string_no_crate_verbose(), hir_id.owner, ) } } self.nodes.insert(hir_id.local_id, ParentedNode { parent: self.parent_node, node: node }); } fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_node_id: HirId, f: F) { debug_assert_eq!(parent_node_id.owner, self.owner); let parent_node = self.parent_node; self.parent_node = parent_node_id.local_id; f(self); self.parent_node = parent_node; } fn insert_nested(&mut self, item: LocalDefId) { self.parenting.insert(item, self.parent_node); } } impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { type Map = !; /// Because we want to track parent items and so forth, enable /// deep walking so that we walk nested items in the context of /// their outer items. fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> { panic!("`visit_nested_xxx` must be manually implemented in this visitor"); } fn visit_nested_item(&mut self, item: ItemId) { debug!("visit_nested_item: {:?}", item); self.insert_nested(item.def_id); } fn visit_nested_trait_item(&mut self, item_id: TraitItemId) { self.insert_nested(item_id.def_id); } fn visit_nested_impl_item(&mut self, item_id: ImplItemId) { self.insert_nested(item_id.def_id); } fn visit_nested_foreign_item(&mut self, foreign_id: ForeignItemId) { self.insert_nested(foreign_id.def_id); } fn visit_nested_body(&mut self, id: BodyId) { debug_assert_eq!(id.hir_id.owner, self.owner); let body = self.bodies[&id.hir_id.local_id]; self.visit_body(body); } fn visit_param(&mut self, param: &'hir Param<'hir>) { let node = Node::Param(param); self.insert(param.pat.span, param.hir_id, node); self.with_parent(param.hir_id, |this| { intravisit::walk_param(this, param); }); } fn visit_item(&mut self, i: &'hir Item<'hir>) { debug!("visit_item: {:?}", i); debug_assert_eq!(i.def_id, self.owner); self.with_parent(i.hir_id(), |this| { if let ItemKind::Struct(ref struct_def, _) = i.kind { // If this is a tuple or unit-like struct, register the constructor. if let Some(ctor_hir_id) = struct_def.ctor_hir_id() { this.insert(i.span, ctor_hir_id, Node::Ctor(struct_def)); } } intravisit::walk_item(this, i); }); } fn visit_foreign_item(&mut self, fi: &'hir ForeignItem<'hir>) { debug_assert_eq!(fi.def_id, self.owner); self.with_parent(fi.hir_id(), |this| { intravisit::walk_foreign_item(this, fi); }); } fn visit_generic_param(&mut self, param: &'hir GenericParam<'hir>) { self.insert(param.span, param.hir_id, Node::GenericParam(param)); intravisit::walk_generic_param(self, param); } fn visit_const_param_default(&mut self, param: HirId, ct: &'hir AnonConst) { self.with_parent(param, |this| { intravisit::walk_const_param_default(this, ct); }) } fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) { debug_assert_eq!(ti.def_id, self.owner); self.with_parent(ti.hir_id(), |this| { intravisit::walk_trait_item(this, ti); }); } fn visit_impl_item(&mut self, ii: &'hir ImplItem<'hir>) { debug_assert_eq!(ii.def_id, self.owner); self.with_parent(ii.hir_id(), |this| { intravisit::walk_impl_item(this, ii); }); } fn visit_pat(&mut self, pat: &'hir Pat<'hir>) { let node = if let PatKind::Binding(..) = pat.kind { Node::Binding(pat) } else { Node::Pat(pat) }; self.insert(pat.span, pat.hir_id, node); self.with_parent(pat.hir_id, |this| { intravisit::walk_pat(this, pat); }); } fn visit_arm(&mut self, arm: &'hir Arm<'hir>) { let node = Node::Arm(arm); self.insert(arm.span, arm.hir_id, node); self.with_parent(arm.hir_id, |this| { intravisit::walk_arm(this, arm); }); } fn visit_anon_const(&mut self, constant: &'hir AnonConst) { self.insert(DUMMY_SP, constant.hir_id, Node::AnonConst(constant)); self.with_parent(constant.hir_id, |this| { intravisit::walk_anon_const(this, constant); }); } fn visit_expr(&mut self, expr: &'hir Expr<'hir>) { self.insert(expr.span, expr.hir_id, Node::Expr(expr)); self.with_parent(expr.hir_id, |this| { intravisit::walk_expr(this, expr); }); } fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) { self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt)); self.with_parent(stmt.hir_id, |this| { intravisit::walk_stmt(this, stmt); }); } fn visit_path_segment(&mut self, path_span: Span, path_segment: &'hir PathSegment<'hir>) { if let Some(hir_id) = path_segment.hir_id { self.insert(path_span, hir_id, Node::PathSegment(path_segment)); } intravisit::walk_path_segment(self, path_span, path_segment); } fn visit_ty(&mut self, ty: &'hir Ty<'hir>) { self.insert(ty.span, ty.hir_id, Node::Ty(ty)); self.with_parent(ty.hir_id, |this| { intravisit::walk_ty(this, ty); }); } fn visit_infer(&mut self, inf: &'hir InferArg) { self.insert(inf.span, inf.hir_id, Node::Infer(inf)); self.with_parent(inf.hir_id, |this| { intravisit::walk_inf(this, inf); }); } fn visit_trait_ref(&mut self, tr: &'hir TraitRef<'hir>) { self.insert(tr.path.span, tr.hir_ref_id, Node::TraitRef(tr)); self.with_parent(tr.hir_ref_id, |this| { intravisit::walk_trait_ref(this, tr); }); } fn visit_fn( &mut self, fk: intravisit::FnKind<'hir>, fd: &'hir FnDecl<'hir>, b: BodyId, s: Span, id: HirId, ) { assert_eq!(self.owner, id.owner); assert_eq!(self.parent_node, id.local_id); intravisit::walk_fn(self, fk, fd, b, s, id); } fn visit_block(&mut self, block: &'hir Block<'hir>) { self.insert(block.span, block.hir_id, Node::Block(block)); self.with_parent(block.hir_id, |this| { intravisit::walk_block(this, block); }); } fn visit_local(&mut self, l: &'hir Local<'hir>) { self.insert(l.span, l.hir_id, Node::Local(l)); self.with_parent(l.hir_id, |this| { intravisit::walk_local(this, l); }) } fn visit_lifetime(&mut self, lifetime: &'hir Lifetime) { self.insert(lifetime.span, lifetime.hir_id, Node::Lifetime(lifetime)); } fn visit_vis(&mut self, visibility: &'hir Visibility<'hir>) { match visibility.node { VisibilityKind::Public | VisibilityKind::Crate(_) | VisibilityKind::Inherited => {} VisibilityKind::Restricted { hir_id, .. } => { self.insert(visibility.span, hir_id, Node::Visibility(visibility)); self.with_parent(hir_id, |this| { intravisit::walk_vis(this, visibility); }); } } } fn visit_variant(&mut self, v: &'hir Variant<'hir>, g: &'hir Generics<'hir>, item_id: HirId) { self.insert(v.span, v.id, Node::Variant(v)); self.with_parent(v.id, |this| { // Register the constructor of this variant. if let Some(ctor_hir_id) = v.data.ctor_hir_id() { this.insert(v.span, ctor_hir_id, Node::Ctor(&v.data)); } intravisit::walk_variant(this, v, g, item_id); }); } fn visit_field_def(&mut self, field: &'hir FieldDef<'hir>) { self.insert(field.span, field.hir_id, Node::Field(field)); self.with_parent(field.hir_id, |this| { intravisit::walk_field_def(this, field); }); } fn visit_trait_item_ref(&mut self, ii: &'hir TraitItemRef) { // Do not visit the duplicate information in TraitItemRef. We want to // map the actual nodes, not the duplicate ones in the *Ref. let TraitItemRef { id, ident: _, kind: _, span: _, defaultness: _ } = *ii; self.visit_nested_trait_item(id); } fn visit_impl_item_ref(&mut self, ii: &'hir ImplItemRef) { // Do not visit the duplicate information in ImplItemRef. We want to // map the actual nodes, not the duplicate ones in the *Ref. let ImplItemRef { id, ident: _, kind: _, span: _, defaultness: _, trait_item_def_id: _ } = *ii; self.visit_nested_impl_item(id); } fn visit_foreign_item_ref(&mut self, fi: &'hir ForeignItemRef) { // Do not visit the duplicate information in ForeignItemRef. We want to // map the actual nodes, not the duplicate ones in the *Ref. let ForeignItemRef { id, ident: _, span: _ } = *fi; self.visit_nested_foreign_item(id); } }
34.849432
99
0.600962
1c37fb825c190ef7fb40b4bef874160126e9f214
1,238
extern crate termion; use termion::color::{Bg, Rgb}; fn get_color(index: usize) -> Rgb { let palette: Vec<Rgb> = vec![ Rgb(7, 7, 7), Rgb(31, 7, 7), Rgb(47, 15, 7), Rgb(71, 15, 7), Rgb(87, 23, 7), Rgb(103, 31, 7), Rgb(119, 31, 7), Rgb(143, 39, 7), Rgb(159, 47, 7), Rgb(175, 63, 7), Rgb(191, 71, 7), Rgb(199, 71, 7), Rgb(223, 79, 7), Rgb(223, 87, 7), Rgb(223, 87, 7), Rgb(215, 95, 7), Rgb(215, 95, 7), Rgb(215, 103, 15), Rgb(207, 111, 15), Rgb(207, 119, 15), Rgb(207, 127, 15), Rgb(207, 135, 23), Rgb(207, 135, 23), Rgb(199, 135, 23), Rgb(199, 143, 23), Rgb(199, 151, 31), Rgb(191, 159, 31), Rgb(191, 159, 31), Rgb(191, 167, 39), Rgb(191, 167, 39), Rgb(191, 175, 47), Rgb(183, 175, 47), Rgb(183, 183, 47), Rgb(183, 183, 55), Rgb(207, 207, 111), Rgb(223, 223, 159), Rgb(239, 239, 199), Rgb(255, 255, 255), ]; return palette[index]; } pub fn get_bg_color(index: usize) -> Bg<Rgb> { return Bg(get_color(index)); }
22.925926
46
0.430533
4bb935301526877237f59b4019d58cf003574b4c
821
#[macro_use] extern crate variadic_generics; pub trait OptionTuple { type Values; fn tuplewrap(self) -> Option<Self::Values>; } va_expand!{ ($va_len:tt) ($($va_idents:ident),+) ($($va_indices:tt),+) impl<$($va_idents),+> OptionTuple for ($(Option<$va_idents>,)+) { type Values = ($($va_idents,)+); fn tuplewrap(self) -> Option<Self::Values> { Some(( $(match self.$va_indices { Some(r) => r, None => return None },)+ )) } } } fn main() { let options = (Some("foo"), Some(42), Some(vec![2, 3, 4])); let values = options.clone().tuplewrap().unwrap(); println!("options: {:?}", options); println!("values: {:?}", values); } // OUTPUT // // options: (Some("foo"), Some(42), Some([2, 3, 4])) // values: ("foo", 42, [2, 3, 4])
25.65625
81
0.532278
9bca1d09901590654ce46fc13b70d2b817050a92
71,305
use super::diagnostics::{dummy_arg, ConsumeClosingDelim, Error}; use super::ty::{AllowPlus, RecoverQPath}; use super::{FollowedByType, Parser, PathStyle}; use crate::maybe_whole; use rustc_ast::ast::{self, AttrStyle, AttrVec, Attribute, Ident, DUMMY_NODE_ID}; use rustc_ast::ast::{AssocItem, AssocItemKind, ForeignItemKind, Item, ItemKind}; use rustc_ast::ast::{ Async, Const, Defaultness, IsAuto, PathSegment, Unsafe, UseTree, UseTreeKind, }; use rustc_ast::ast::{ BindingMode, Block, FnDecl, FnSig, Mac, MacArgs, MacDelimiter, Param, SelfKind, }; use rustc_ast::ast::{EnumDef, Generics, StructField, TraitRef, Ty, TyKind, Variant, VariantData}; use rustc_ast::ast::{FnHeader, ForeignItem, Mutability, Visibility, VisibilityKind}; use rustc_ast::ptr::P; use rustc_ast::token; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; use rustc_ast_pretty::pprust; use rustc_errors::{struct_span_err, Applicability, PResult, StashKey}; use rustc_span::edition::Edition; use rustc_span::source_map::{self, Span}; use rustc_span::symbol::{kw, sym, Symbol}; use log::debug; use std::mem; pub(super) type ItemInfo = (Ident, ItemKind); impl<'a> Parser<'a> { pub fn parse_item(&mut self) -> PResult<'a, Option<P<Item>>> { self.parse_item_(|_| true).map(|i| i.map(P)) } fn parse_item_(&mut self, req_name: ReqName) -> PResult<'a, Option<Item>> { let attrs = self.parse_outer_attributes()?; self.parse_item_common(attrs, true, false, req_name) } pub(super) fn parse_item_common( &mut self, mut attrs: Vec<Attribute>, mac_allowed: bool, attrs_allowed: bool, req_name: ReqName, ) -> PResult<'a, Option<Item>> { maybe_whole!(self, NtItem, |item| { let mut item = item; mem::swap(&mut item.attrs, &mut attrs); item.attrs.extend(attrs); Some(item.into_inner()) }); let mut unclosed_delims = vec![]; let (mut item, tokens) = self.collect_tokens(|this| { let item = this.parse_item_common_(attrs, mac_allowed, attrs_allowed, req_name); unclosed_delims.append(&mut this.unclosed_delims); item })?; self.unclosed_delims.append(&mut unclosed_delims); // Once we've parsed an item and recorded the tokens we got while // parsing we may want to store `tokens` into the item we're about to // return. Note, though, that we specifically didn't capture tokens // related to outer attributes. The `tokens` field here may later be // used with procedural macros to convert this item back into a token // stream, but during expansion we may be removing attributes as we go // along. // // If we've got inner attributes then the `tokens` we've got above holds // these inner attributes. If an inner attribute is expanded we won't // actually remove it from the token stream, so we'll just keep yielding // it (bad!). To work around this case for now we just avoid recording // `tokens` if we detect any inner attributes. This should help keep // expansion correct, but we should fix this bug one day! if let Some(item) = &mut item { if !item.attrs.iter().any(|attr| attr.style == AttrStyle::Inner) { item.tokens = Some(tokens); } } Ok(item) } fn parse_item_common_( &mut self, mut attrs: Vec<Attribute>, mac_allowed: bool, attrs_allowed: bool, req_name: ReqName, ) -> PResult<'a, Option<Item>> { let lo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; let mut def = self.parse_defaultness(); let kind = self.parse_item_kind(&mut attrs, mac_allowed, lo, &vis, &mut def, req_name)?; if let Some((ident, kind)) = kind { self.error_on_unconsumed_default(def, &kind); let span = lo.to(self.prev_token.span); let id = DUMMY_NODE_ID; let item = Item { ident, attrs, id, kind, vis, span, tokens: None }; return Ok(Some(item)); } // At this point, we have failed to parse an item. self.error_on_unmatched_vis(&vis); self.error_on_unmatched_defaultness(def); if !attrs_allowed { self.recover_attrs_no_item(&attrs)?; } Ok(None) } /// Error in-case a non-inherited visibility was parsed but no item followed. fn error_on_unmatched_vis(&self, vis: &Visibility) { if let VisibilityKind::Inherited = vis.node { return; } let vs = pprust::vis_to_string(&vis); let vs = vs.trim_end(); self.struct_span_err(vis.span, &format!("visibility `{}` is not followed by an item", vs)) .span_label(vis.span, "the visibility") .help(&format!("you likely meant to define an item, e.g., `{} fn foo() {{}}`", vs)) .emit(); } /// Error in-case a `default` was parsed but no item followed. fn error_on_unmatched_defaultness(&self, def: Defaultness) { if let Defaultness::Default(sp) = def { self.struct_span_err(sp, "`default` is not followed by an item") .span_label(sp, "the `default` qualifier") .note("only `fn`, `const`, `type`, or `impl` items may be prefixed by `default`") .emit(); } } /// Error in-case `default` was parsed in an in-appropriate context. fn error_on_unconsumed_default(&self, def: Defaultness, kind: &ItemKind) { if let Defaultness::Default(span) = def { let msg = format!("{} {} cannot be `default`", kind.article(), kind.descr()); self.struct_span_err(span, &msg) .span_label(span, "`default` because of this") .note("only associated `fn`, `const`, and `type` items can be `default`") .emit(); } } /// Parses one of the items allowed by the flags. fn parse_item_kind( &mut self, attrs: &mut Vec<Attribute>, macros_allowed: bool, lo: Span, vis: &Visibility, def: &mut Defaultness, req_name: ReqName, ) -> PResult<'a, Option<ItemInfo>> { let mut def = || mem::replace(def, Defaultness::Final); let info = if self.eat_keyword(kw::Use) { // USE ITEM let tree = self.parse_use_tree()?; self.expect_semi()?; (Ident::invalid(), ItemKind::Use(P(tree))) } else if self.check_fn_front_matter() { // FUNCTION ITEM let (ident, sig, generics, body) = self.parse_fn(attrs, req_name)?; (ident, ItemKind::Fn(def(), sig, generics, body)) } else if self.eat_keyword(kw::Extern) { if self.eat_keyword(kw::Crate) { // EXTERN CRATE self.parse_item_extern_crate()? } else { // EXTERN BLOCK self.parse_item_foreign_mod(attrs)? } } else if self.is_static_global() { // STATIC ITEM self.bump(); // `static` let m = self.parse_mutability(); let (ident, ty, expr) = self.parse_item_global(Some(m))?; (ident, ItemKind::Static(ty, m, expr)) } else if let Const::Yes(const_span) = self.parse_constness() { // CONST ITEM self.recover_const_mut(const_span); let (ident, ty, expr) = self.parse_item_global(None)?; (ident, ItemKind::Const(def(), ty, expr)) } else if self.check_keyword(kw::Trait) || self.check_auto_or_unsafe_trait_item() { // TRAIT ITEM self.parse_item_trait(attrs, lo)? } else if self.check_keyword(kw::Impl) || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Impl]) { // IMPL ITEM self.parse_item_impl(attrs, def())? } else if self.eat_keyword(kw::Mod) { // MODULE ITEM self.parse_item_mod(attrs)? } else if self.eat_keyword(kw::Type) { // TYPE ITEM self.parse_type_alias(def())? } else if self.eat_keyword(kw::Enum) { // ENUM ITEM self.parse_item_enum()? } else if self.eat_keyword(kw::Struct) { // STRUCT ITEM self.parse_item_struct()? } else if self.is_kw_followed_by_ident(kw::Union) { // UNION ITEM self.bump(); // `union` self.parse_item_union()? } else if self.eat_keyword(kw::Macro) { // MACROS 2.0 ITEM self.parse_item_decl_macro(lo)? } else if self.is_macro_rules_item() { // MACRO_RULES ITEM self.parse_item_macro_rules(vis)? } else if vis.node.is_pub() && self.isnt_macro_invocation() { self.recover_missing_kw_before_item()?; return Ok(None); } else if macros_allowed && self.token.is_path_start() { // MACRO INVOCATION ITEM (Ident::invalid(), ItemKind::Mac(self.parse_item_macro(vis)?)) } else { return Ok(None); }; Ok(Some(info)) } /// When parsing a statement, would the start of a path be an item? pub(super) fn is_path_start_item(&mut self) -> bool { self.is_crate_vis() // no: `crate::b`, yes: `crate $item` || self.is_kw_followed_by_ident(kw::Union) // no: `union::b`, yes: `union U { .. }` || self.check_auto_or_unsafe_trait_item() // no: `auto::b`, yes: `auto trait X { .. }` || self.is_async_fn() // no(2015): `async::b`, yes: `async fn` || self.is_macro_rules_item() // no: `macro_rules::b`, yes: `macro_rules! mac` } /// Are we sure this could not possibly be a macro invocation? fn isnt_macro_invocation(&mut self) -> bool { self.check_ident() && self.look_ahead(1, |t| *t != token::Not && *t != token::ModSep) } /// Recover on encountering a struct or method definition where the user /// forgot to add the `struct` or `fn` keyword after writing `pub`: `pub S {}`. fn recover_missing_kw_before_item(&mut self) -> PResult<'a, ()> { // Space between `pub` keyword and the identifier // // pub S {} // ^^^ `sp` points here let sp = self.prev_token.span.between(self.token.span); let full_sp = self.prev_token.span.to(self.token.span); let ident_sp = self.token.span; if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) { // possible public struct definition where `struct` was forgotten let ident = self.parse_ident().unwrap(); let msg = format!("add `struct` here to parse `{}` as a public struct", ident); let mut err = self.struct_span_err(sp, "missing `struct` for struct definition"); err.span_suggestion_short( sp, &msg, " struct ".into(), Applicability::MaybeIncorrect, // speculative ); return Err(err); } else if self.look_ahead(1, |t| *t == token::OpenDelim(token::Paren)) { let ident = self.parse_ident().unwrap(); self.bump(); // `(` let kw_name = self.recover_first_param(); self.consume_block(token::Paren, ConsumeClosingDelim::Yes); let (kw, kw_name, ambiguous) = if self.check(&token::RArrow) { self.eat_to_tokens(&[&token::OpenDelim(token::Brace)]); self.bump(); // `{` ("fn", kw_name, false) } else if self.check(&token::OpenDelim(token::Brace)) { self.bump(); // `{` ("fn", kw_name, false) } else if self.check(&token::Colon) { let kw = "struct"; (kw, kw, false) } else { ("fn` or `struct", "function or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.struct_span_err(sp, &msg); if !ambiguous { self.consume_block(token::Brace, ConsumeClosingDelim::Yes); let suggestion = format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name); err.span_suggestion_short( sp, &suggestion, format!(" {} ", kw), Applicability::MachineApplicable, ); } else { if let Ok(snippet) = self.span_to_snippet(ident_sp) { err.span_suggestion( full_sp, "if you meant to call a macro, try", format!("{}!", snippet), // this is the `ambiguous` conditional branch Applicability::MaybeIncorrect, ); } else { err.help( "if you meant to call a macro, remove the `pub` \ and add a trailing `!` after the identifier", ); } } return Err(err); } else if self.look_ahead(1, |t| *t == token::Lt) { let ident = self.parse_ident().unwrap(); self.eat_to_tokens(&[&token::Gt]); self.bump(); // `>` let (kw, kw_name, ambiguous) = if self.eat(&token::OpenDelim(token::Paren)) { ("fn", self.recover_first_param(), false) } else if self.check(&token::OpenDelim(token::Brace)) { ("struct", "struct", false) } else { ("fn` or `struct", "function or struct", true) }; let msg = format!("missing `{}` for {} definition", kw, kw_name); let mut err = self.struct_span_err(sp, &msg); if !ambiguous { err.span_suggestion_short( sp, &format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name), format!(" {} ", kw), Applicability::MachineApplicable, ); } return Err(err); } else { Ok(()) } } /// Parses an item macro, e.g., `item!();`. fn parse_item_macro(&mut self, vis: &Visibility) -> PResult<'a, Mac> { let path = self.parse_path(PathStyle::Mod)?; // `foo::bar` self.expect(&token::Not)?; // `!` let args = self.parse_mac_args()?; // `( .. )` or `[ .. ]` (followed by `;`), or `{ .. }`. self.eat_semi_for_macro_if_needed(&args); self.complain_if_pub_macro(vis, false); Ok(Mac { path, args, prior_type_ascription: self.last_type_ascription }) } /// Recover if we parsed attributes and expected an item but there was none. fn recover_attrs_no_item(&mut self, attrs: &[Attribute]) -> PResult<'a, ()> { let (start, end) = match attrs { [] => return Ok(()), [x0] => (x0, x0), [x0, .., xn] => (x0, xn), }; let msg = if end.is_doc_comment() { "expected item after doc comment" } else { "expected item after attributes" }; let mut err = self.struct_span_err(end.span, msg); if end.is_doc_comment() { err.span_label(end.span, "this doc comment doesn't document anything"); } if let [.., penultimate, _] = attrs { err.span_label(start.span.to(penultimate.span), "other attributes here"); } Err(err) } fn is_async_fn(&self) -> bool { self.token.is_keyword(kw::Async) && self.is_keyword_ahead(1, &[kw::Fn]) } /// Parses an implementation item. /// /// ``` /// impl<'a, T> TYPE { /* impl items */ } /// impl<'a, T> TRAIT for TYPE { /* impl items */ } /// impl<'a, T> !TRAIT for TYPE { /* impl items */ } /// impl<'a, T> const TRAIT for TYPE { /* impl items */ } /// ``` /// /// We actually parse slightly more relaxed grammar for better error reporting and recovery. /// ``` /// "impl" GENERICS "const"? "!"? TYPE "for"? (TYPE | "..") ("where" PREDICATES)? "{" BODY "}" /// "impl" GENERICS "const"? "!"? TYPE ("where" PREDICATES)? "{" BODY "}" /// ``` fn parse_item_impl( &mut self, attrs: &mut Vec<Attribute>, defaultness: Defaultness, ) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); self.expect_keyword(kw::Impl)?; // First, parse generic parameters if necessary. let mut generics = if self.choose_generics_over_qpath() { self.parse_generics()? } else { let mut generics = Generics::default(); // impl A for B {} // /\ this is where `generics.span` should point when there are no type params. generics.span = self.prev_token.span.shrink_to_hi(); generics }; let constness = self.parse_constness(); if let Const::Yes(span) = constness { self.sess.gated_spans.gate(sym::const_trait_impl, span); } // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type. let polarity = if self.check(&token::Not) && self.look_ahead(1, |t| t.can_begin_type()) { self.bump(); // `!` ast::ImplPolarity::Negative } else { ast::ImplPolarity::Positive }; // Parse both types and traits as a type, then reinterpret if necessary. let err_path = |span| ast::Path::from_ident(Ident::new(kw::Invalid, span)); let ty_first = if self.token.is_keyword(kw::For) && self.look_ahead(1, |t| t != &token::Lt) { let span = self.prev_token.span.between(self.token.span); self.struct_span_err(span, "missing trait in a trait impl").emit(); P(Ty { kind: TyKind::Path(None, err_path(span)), span, id: DUMMY_NODE_ID }) } else { self.parse_ty()? }; // If `for` is missing we try to recover. let has_for = self.eat_keyword(kw::For); let missing_for_span = self.prev_token.span.between(self.token.span); let ty_second = if self.token == token::DotDot { // We need to report this error after `cfg` expansion for compatibility reasons self.bump(); // `..`, do not add it to expected tokens Some(self.mk_ty(self.prev_token.span, TyKind::Err)) } else if has_for || self.token.can_begin_type() { Some(self.parse_ty()?) } else { None }; generics.where_clause = self.parse_where_clause()?; let impl_items = self.parse_item_list(attrs, |p| p.parse_impl_item())?; let item_kind = match ty_second { Some(ty_second) => { // impl Trait for Type if !has_for { self.struct_span_err(missing_for_span, "missing `for` in a trait impl") .span_suggestion_short( missing_for_span, "add `for` here", " for ".to_string(), Applicability::MachineApplicable, ) .emit(); } let ty_first = ty_first.into_inner(); let path = match ty_first.kind { // This notably includes paths passed through `ty` macro fragments (#46438). TyKind::Path(None, path) => path, _ => { self.struct_span_err(ty_first.span, "expected a trait, found type").emit(); err_path(ty_first.span) } }; let trait_ref = TraitRef { path, ref_id: ty_first.id }; ItemKind::Impl { unsafety, polarity, defaultness, constness, generics, of_trait: Some(trait_ref), self_ty: ty_second, items: impl_items, } } None => { // impl Type ItemKind::Impl { unsafety, polarity, defaultness, constness, generics, of_trait: None, self_ty: ty_first, items: impl_items, } } }; Ok((Ident::invalid(), item_kind)) } fn parse_item_list<T>( &mut self, attrs: &mut Vec<Attribute>, mut parse_item: impl FnMut(&mut Parser<'a>) -> PResult<'a, Option<Option<T>>>, ) -> PResult<'a, Vec<T>> { let open_brace_span = self.token.span; self.expect(&token::OpenDelim(token::Brace))?; attrs.append(&mut self.parse_inner_attributes()?); let mut items = Vec::new(); while !self.eat(&token::CloseDelim(token::Brace)) { if self.recover_doc_comment_before_brace() { continue; } match parse_item(self) { Ok(None) => { // We have to bail or we'll potentially never make progress. let non_item_span = self.token.span; self.consume_block(token::Brace, ConsumeClosingDelim::Yes); self.struct_span_err(non_item_span, "non-item in item list") .span_label(open_brace_span, "item list starts here") .span_label(non_item_span, "non-item starts here") .span_label(self.prev_token.span, "item list ends here") .emit(); break; } Ok(Some(item)) => items.extend(item), Err(mut err) => { self.consume_block(token::Brace, ConsumeClosingDelim::Yes); err.span_label(open_brace_span, "while parsing this item list starting here") .span_label(self.prev_token.span, "the item list ends here") .emit(); break; } } } Ok(items) } /// Recover on a doc comment before `}`. fn recover_doc_comment_before_brace(&mut self) -> bool { if let token::DocComment(_) = self.token.kind { if self.look_ahead(1, |tok| tok == &token::CloseDelim(token::Brace)) { struct_span_err!( self.diagnostic(), self.token.span, E0584, "found a documentation comment that doesn't document anything", ) .span_label(self.token.span, "this doc comment doesn't document anything") .help( "doc comments must come before what they document, maybe a \ comment was intended with `//`?", ) .emit(); self.bump(); return true; } } false } /// Parses defaultness (i.e., `default` or nothing). fn parse_defaultness(&mut self) -> Defaultness { // We are interested in `default` followed by another identifier. // However, we must avoid keywords that occur as binary operators. // Currently, the only applicable keyword is `as` (`default as Ty`). if self.check_keyword(kw::Default) && self.look_ahead(1, |t| t.is_non_raw_ident_where(|i| i.name != kw::As)) { self.bump(); // `default` Defaultness::Default(self.normalized_prev_token.span) } else { Defaultness::Final } } /// Is this an `(unsafe auto? | auto) trait` item? fn check_auto_or_unsafe_trait_item(&mut self) -> bool { // auto trait self.check_keyword(kw::Auto) && self.is_keyword_ahead(1, &[kw::Trait]) // unsafe auto trait || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Trait, kw::Auto]) } /// Parses `unsafe? auto? trait Foo { ... }` or `trait Foo = Bar;`. fn parse_item_trait(&mut self, attrs: &mut Vec<Attribute>, lo: Span) -> PResult<'a, ItemInfo> { let unsafety = self.parse_unsafety(); // Parse optional `auto` prefix. let is_auto = if self.eat_keyword(kw::Auto) { IsAuto::Yes } else { IsAuto::No }; self.expect_keyword(kw::Trait)?; let ident = self.parse_ident()?; let mut tps = self.parse_generics()?; // Parse optional colon and supertrait bounds. let had_colon = self.eat(&token::Colon); let span_at_colon = self.prev_token.span; let bounds = if had_colon { self.parse_generic_bounds(Some(self.prev_token.span))? } else { Vec::new() }; let span_before_eq = self.prev_token.span; if self.eat(&token::Eq) { // It's a trait alias. if had_colon { let span = span_at_colon.to(span_before_eq); self.struct_span_err(span, "bounds are not allowed on trait aliases").emit(); } let bounds = self.parse_generic_bounds(None)?; tps.where_clause = self.parse_where_clause()?; self.expect_semi()?; let whole_span = lo.to(self.prev_token.span); if is_auto == IsAuto::Yes { let msg = "trait aliases cannot be `auto`"; self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit(); } if let Unsafe::Yes(_) = unsafety { let msg = "trait aliases cannot be `unsafe`"; self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit(); } self.sess.gated_spans.gate(sym::trait_alias, whole_span); Ok((ident, ItemKind::TraitAlias(tps, bounds))) } else { // It's a normal trait. tps.where_clause = self.parse_where_clause()?; let items = self.parse_item_list(attrs, |p| p.parse_trait_item())?; Ok((ident, ItemKind::Trait(is_auto, unsafety, tps, bounds, items))) } } pub fn parse_impl_item(&mut self) -> PResult<'a, Option<Option<P<AssocItem>>>> { self.parse_assoc_item(|_| true) } pub fn parse_trait_item(&mut self) -> PResult<'a, Option<Option<P<AssocItem>>>> { self.parse_assoc_item(|edition| edition >= Edition::Edition2018) } /// Parses associated items. fn parse_assoc_item(&mut self, req_name: ReqName) -> PResult<'a, Option<Option<P<AssocItem>>>> { Ok(self.parse_item_(req_name)?.map(|Item { attrs, id, span, vis, ident, kind, tokens }| { let kind = match kind { ItemKind::Mac(a) => AssocItemKind::Macro(a), ItemKind::Fn(a, b, c, d) => AssocItemKind::Fn(a, b, c, d), ItemKind::TyAlias(a, b, c, d) => AssocItemKind::TyAlias(a, b, c, d), ItemKind::Const(a, b, c) => AssocItemKind::Const(a, b, c), ItemKind::Static(a, _, b) => { self.struct_span_err(span, "associated `static` items are not allowed").emit(); AssocItemKind::Const(Defaultness::Final, a, b) } _ => return self.error_bad_item_kind(span, &kind, "`trait`s or `impl`s"), }; Some(P(Item { attrs, id, span, vis, ident, kind, tokens })) })) } /// Parses a `type` alias with the following grammar: /// ``` /// TypeAlias = "type" Ident Generics {":" GenericBounds}? {"=" Ty}? ";" ; /// ``` /// The `"type"` has already been eaten. fn parse_type_alias(&mut self, def: Defaultness) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let mut generics = self.parse_generics()?; // Parse optional colon and param bounds. let bounds = if self.eat(&token::Colon) { self.parse_generic_bounds(None)? } else { Vec::new() }; generics.where_clause = self.parse_where_clause()?; let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None }; self.expect_semi()?; Ok((ident, ItemKind::TyAlias(def, generics, bounds, default))) } /// Parses a `UseTree`. /// /// ``` /// USE_TREE = [`::`] `*` | /// [`::`] `{` USE_TREE_LIST `}` | /// PATH `::` `*` | /// PATH `::` `{` USE_TREE_LIST `}` | /// PATH [`as` IDENT] /// ``` fn parse_use_tree(&mut self) -> PResult<'a, UseTree> { let lo = self.token.span; let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo() }; let kind = if self.check(&token::OpenDelim(token::Brace)) || self.check(&token::BinOp(token::Star)) || self.is_import_coupler() { // `use *;` or `use ::*;` or `use {...};` or `use ::{...};` let mod_sep_ctxt = self.token.span.ctxt(); if self.eat(&token::ModSep) { prefix .segments .push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt))); } self.parse_use_tree_glob_or_nested()? } else { // `use path::*;` or `use path::{...};` or `use path;` or `use path as bar;` prefix = self.parse_path(PathStyle::Mod)?; if self.eat(&token::ModSep) { self.parse_use_tree_glob_or_nested()? } else { UseTreeKind::Simple(self.parse_rename()?, DUMMY_NODE_ID, DUMMY_NODE_ID) } }; Ok(UseTree { prefix, kind, span: lo.to(self.prev_token.span) }) } /// Parses `*` or `{...}`. fn parse_use_tree_glob_or_nested(&mut self) -> PResult<'a, UseTreeKind> { Ok(if self.eat(&token::BinOp(token::Star)) { UseTreeKind::Glob } else { UseTreeKind::Nested(self.parse_use_tree_list()?) }) } /// Parses a `UseTreeKind::Nested(list)`. /// /// ``` /// USE_TREE_LIST = Ø | (USE_TREE `,`)* USE_TREE [`,`] /// ``` fn parse_use_tree_list(&mut self) -> PResult<'a, Vec<(UseTree, ast::NodeId)>> { self.parse_delim_comma_seq(token::Brace, |p| Ok((p.parse_use_tree()?, DUMMY_NODE_ID))) .map(|(r, _)| r) } fn parse_rename(&mut self) -> PResult<'a, Option<Ident>> { if self.eat_keyword(kw::As) { self.parse_ident_or_underscore().map(Some) } else { Ok(None) } } fn parse_ident_or_underscore(&mut self) -> PResult<'a, ast::Ident> { match self.normalized_token.kind { token::Ident(name @ kw::Underscore, false) => { self.bump(); Ok(Ident::new(name, self.normalized_prev_token.span)) } _ => self.parse_ident(), } } /// Parses `extern crate` links. /// /// # Examples /// /// ``` /// extern crate foo; /// extern crate bar as foo; /// ``` fn parse_item_extern_crate(&mut self) -> PResult<'a, ItemInfo> { // Accept `extern crate name-like-this` for better diagnostics let orig_name = self.parse_crate_name_with_dashes()?; let (item_name, orig_name) = if let Some(rename) = self.parse_rename()? { (rename, Some(orig_name.name)) } else { (orig_name, None) }; self.expect_semi()?; Ok((item_name, ItemKind::ExternCrate(orig_name))) } fn parse_crate_name_with_dashes(&mut self) -> PResult<'a, ast::Ident> { let error_msg = "crate name using dashes are not valid in `extern crate` statements"; let suggestion_msg = "if the original crate name uses dashes you need to use underscores \ in the code"; let mut ident = if self.token.is_keyword(kw::SelfLower) { self.parse_path_segment_ident() } else { self.parse_ident() }?; let mut idents = vec![]; let mut replacement = vec![]; let mut fixed_crate_name = false; // Accept `extern crate name-like-this` for better diagnostics. let dash = token::BinOp(token::BinOpToken::Minus); if self.token == dash { // Do not include `-` as part of the expected tokens list. while self.eat(&dash) { fixed_crate_name = true; replacement.push((self.prev_token.span, "_".to_string())); idents.push(self.parse_ident()?); } } if fixed_crate_name { let fixed_name_sp = ident.span.to(idents.last().unwrap().span); let mut fixed_name = format!("{}", ident.name); for part in idents { fixed_name.push_str(&format!("_{}", part.name)); } ident = Ident::from_str_and_span(&fixed_name, fixed_name_sp); self.struct_span_err(fixed_name_sp, error_msg) .span_label(fixed_name_sp, "dash-separated idents are not valid") .multipart_suggestion(suggestion_msg, replacement, Applicability::MachineApplicable) .emit(); } Ok(ident) } /// Parses `extern` for foreign ABIs modules. /// /// `extern` is expected to have been consumed before calling this method. /// /// # Examples /// /// ```ignore (only-for-syntax-highlight) /// extern "C" {} /// extern {} /// ``` fn parse_item_foreign_mod(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, ItemInfo> { let abi = self.parse_abi(); // ABI? let items = self.parse_item_list(attrs, |p| p.parse_foreign_item())?; let module = ast::ForeignMod { abi, items }; Ok((Ident::invalid(), ItemKind::ForeignMod(module))) } /// Parses a foreign item (one in an `extern { ... }` block). pub fn parse_foreign_item(&mut self) -> PResult<'a, Option<Option<P<ForeignItem>>>> { Ok(self.parse_item_(|_| true)?.map(|Item { attrs, id, span, vis, ident, kind, tokens }| { let kind = match kind { ItemKind::Mac(a) => ForeignItemKind::Macro(a), ItemKind::Fn(a, b, c, d) => ForeignItemKind::Fn(a, b, c, d), ItemKind::TyAlias(a, b, c, d) => ForeignItemKind::TyAlias(a, b, c, d), ItemKind::Static(a, b, c) => ForeignItemKind::Static(a, b, c), ItemKind::Const(_, a, b) => { self.error_on_foreign_const(span, ident); ForeignItemKind::Static(a, Mutability::Not, b) } _ => return self.error_bad_item_kind(span, &kind, "`extern` blocks"), }; Some(P(Item { attrs, id, span, vis, ident, kind, tokens })) })) } fn error_bad_item_kind<T>(&self, span: Span, kind: &ItemKind, ctx: &str) -> Option<T> { let span = self.sess.source_map().def_span(span); let msg = format!("{} is not supported in {}", kind.descr(), ctx); self.struct_span_err(span, &msg).emit(); return None; } fn error_on_foreign_const(&self, span: Span, ident: Ident) { self.struct_span_err(ident.span, "extern items cannot be `const`") .span_suggestion( span.with_hi(ident.span.lo()), "try using a static value", "static ".to_string(), Applicability::MachineApplicable, ) .note("for more information, visit https://doc.rust-lang.org/std/keyword.extern.html") .emit(); } fn is_static_global(&mut self) -> bool { if self.check_keyword(kw::Static) { // Check if this could be a closure. !self.look_ahead(1, |token| { if token.is_keyword(kw::Move) { return true; } match token.kind { token::BinOp(token::Or) | token::OrOr => true, _ => false, } }) } else { false } } /// Recover on `const mut` with `const` already eaten. fn recover_const_mut(&mut self, const_span: Span) { if self.eat_keyword(kw::Mut) { let span = self.prev_token.span; self.struct_span_err(span, "const globals cannot be mutable") .span_label(span, "cannot be mutable") .span_suggestion( const_span, "you might want to declare a static instead", "static".to_owned(), Applicability::MaybeIncorrect, ) .emit(); } } /// Parse `["const" | ("static" "mut"?)] $ident ":" $ty (= $expr)?` with /// `["const" | ("static" "mut"?)]` already parsed and stored in `m`. /// /// When `m` is `"const"`, `$ident` may also be `"_"`. fn parse_item_global( &mut self, m: Option<Mutability>, ) -> PResult<'a, (Ident, P<Ty>, Option<P<ast::Expr>>)> { let id = if m.is_none() { self.parse_ident_or_underscore() } else { self.parse_ident() }?; // Parse the type of a `const` or `static mut?` item. // That is, the `":" $ty` fragment. let ty = if self.eat(&token::Colon) { self.parse_ty()? } else { self.recover_missing_const_type(id, m) }; let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None }; self.expect_semi()?; Ok((id, ty, expr)) } /// We were supposed to parse `:` but the `:` was missing. /// This means that the type is missing. fn recover_missing_const_type(&mut self, id: Ident, m: Option<Mutability>) -> P<Ty> { // Construct the error and stash it away with the hope // that typeck will later enrich the error with a type. let kind = match m { Some(Mutability::Mut) => "static mut", Some(Mutability::Not) => "static", None => "const", }; let mut err = self.struct_span_err(id.span, &format!("missing type for `{}` item", kind)); err.span_suggestion( id.span, "provide a type for the item", format!("{}: <type>", id), Applicability::HasPlaceholders, ); err.stash(id.span, StashKey::ItemNoType); // The user intended that the type be inferred, // so treat this as if the user wrote e.g. `const A: _ = expr;`. P(Ty { kind: TyKind::Infer, span: id.span, id: ast::DUMMY_NODE_ID }) } /// Parses an enum declaration. fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> { let id = self.parse_ident()?; let mut generics = self.parse_generics()?; generics.where_clause = self.parse_where_clause()?; let (variants, _) = self.parse_delim_comma_seq(token::Brace, |p| p.parse_enum_variant()).map_err(|e| { self.recover_stmt(); e })?; let enum_definition = EnumDef { variants: variants.into_iter().filter_map(|v| v).collect() }; Ok((id, ItemKind::Enum(enum_definition, generics))) } fn parse_enum_variant(&mut self) -> PResult<'a, Option<Variant>> { let variant_attrs = self.parse_outer_attributes()?; let vlo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; if !self.recover_nested_adt_item(kw::Enum)? { return Ok(None); } let ident = self.parse_ident()?; let struct_def = if self.check(&token::OpenDelim(token::Brace)) { // Parse a struct variant. let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } else if self.check(&token::OpenDelim(token::Paren)) { VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID) } else { VariantData::Unit(DUMMY_NODE_ID) }; let disr_expr = if self.eat(&token::Eq) { Some(self.parse_anon_const_expr()?) } else { None }; let vr = ast::Variant { ident, vis, id: DUMMY_NODE_ID, attrs: variant_attrs, data: struct_def, disr_expr, span: vlo.to(self.prev_token.span), is_placeholder: false, }; Ok(Some(vr)) } /// Parses `struct Foo { ... }`. fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; // There is a special case worth noting here, as reported in issue #17904. // If we are parsing a tuple struct it is the case that the where clause // should follow the field list. Like so: // // struct Foo<T>(T) where T: Copy; // // If we are parsing a normal record-style struct it is the case // that the where clause comes before the body, and after the generics. // So if we look ahead and see a brace or a where-clause we begin // parsing a record style struct. // // Otherwise if we look ahead and see a paren we parse a tuple-style // struct. let vdata = if self.token.is_keyword(kw::Where) { generics.where_clause = self.parse_where_clause()?; if self.eat(&token::Semi) { // If we see a: `struct Foo<T> where T: Copy;` style decl. VariantData::Unit(DUMMY_NODE_ID) } else { // If we see: `struct Foo<T> where T: Copy { ... }` let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } // No `where` so: `struct Foo<T>;` } else if self.eat(&token::Semi) { VariantData::Unit(DUMMY_NODE_ID) // Record-style struct definition } else if self.token == token::OpenDelim(token::Brace) { let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) // Tuple-style struct definition with optional where-clause. } else if self.token == token::OpenDelim(token::Paren) { let body = VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID); generics.where_clause = self.parse_where_clause()?; self.expect_semi()?; body } else { let token_str = super::token_descr(&self.token); let msg = &format!( "expected `where`, `{{`, `(`, or `;` after struct name, found {}", token_str ); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where`, `{`, `(`, or `;` after struct name"); return Err(err); }; Ok((class_name, ItemKind::Struct(vdata, generics))) } /// Parses `union Foo { ... }`. fn parse_item_union(&mut self) -> PResult<'a, ItemInfo> { let class_name = self.parse_ident()?; let mut generics = self.parse_generics()?; let vdata = if self.token.is_keyword(kw::Where) { generics.where_clause = self.parse_where_clause()?; let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } else if self.token == token::OpenDelim(token::Brace) { let (fields, recovered) = self.parse_record_struct_body()?; VariantData::Struct(fields, recovered) } else { let token_str = super::token_descr(&self.token); let msg = &format!("expected `where` or `{{` after union name, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where` or `{` after union name"); return Err(err); }; Ok((class_name, ItemKind::Union(vdata, generics))) } fn parse_record_struct_body( &mut self, ) -> PResult<'a, (Vec<StructField>, /* recovered */ bool)> { let mut fields = Vec::new(); let mut recovered = false; if self.eat(&token::OpenDelim(token::Brace)) { while self.token != token::CloseDelim(token::Brace) { let field = self.parse_struct_decl_field().map_err(|e| { self.consume_block(token::Brace, ConsumeClosingDelim::No); recovered = true; e }); match field { Ok(field) => fields.push(field), Err(mut err) => { err.emit(); break; } } } self.eat(&token::CloseDelim(token::Brace)); } else { let token_str = super::token_descr(&self.token); let msg = &format!("expected `where`, or `{{` after struct name, found {}", token_str); let mut err = self.struct_span_err(self.token.span, msg); err.span_label(self.token.span, "expected `where`, or `{` after struct name"); return Err(err); } Ok((fields, recovered)) } fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<StructField>> { // This is the case where we find `struct Foo<T>(T) where T: Copy;` // Unit like structs are handled in parse_item_struct function self.parse_paren_comma_seq(|p| { let attrs = p.parse_outer_attributes()?; let lo = p.token.span; let vis = p.parse_visibility(FollowedByType::Yes)?; let ty = p.parse_ty()?; Ok(StructField { span: lo.to(ty.span), vis, ident: None, id: DUMMY_NODE_ID, ty, attrs, is_placeholder: false, }) }) .map(|(r, _)| r) } /// Parses an element of a struct declaration. fn parse_struct_decl_field(&mut self) -> PResult<'a, StructField> { let attrs = self.parse_outer_attributes()?; let lo = self.token.span; let vis = self.parse_visibility(FollowedByType::No)?; self.parse_single_struct_field(lo, vis, attrs) } /// Parses a structure field declaration. fn parse_single_struct_field( &mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute>, ) -> PResult<'a, StructField> { let mut seen_comma: bool = false; let a_var = self.parse_name_and_ty(lo, vis, attrs)?; if self.token == token::Comma { seen_comma = true; } match self.token.kind { token::Comma => { self.bump(); } token::CloseDelim(token::Brace) => {} token::DocComment(_) => { let previous_span = self.prev_token.span; let mut err = self.span_fatal_err(self.token.span, Error::UselessDocComment); self.bump(); // consume the doc comment let comma_after_doc_seen = self.eat(&token::Comma); // `seen_comma` is always false, because we are inside doc block // condition is here to make code more readable if !seen_comma && comma_after_doc_seen { seen_comma = true; } if comma_after_doc_seen || self.token == token::CloseDelim(token::Brace) { err.emit(); } else { if !seen_comma { let sp = self.sess.source_map().next_point(previous_span); err.span_suggestion( sp, "missing comma here", ",".into(), Applicability::MachineApplicable, ); } return Err(err); } } _ => { let sp = self.prev_token.span.shrink_to_hi(); let mut err = self.struct_span_err( sp, &format!("expected `,`, or `}}`, found {}", super::token_descr(&self.token)), ); if self.token.is_ident() { // This is likely another field; emit the diagnostic and keep going err.span_suggestion( sp, "try adding a comma", ",".into(), Applicability::MachineApplicable, ); err.emit(); } else { return Err(err); } } } Ok(a_var) } /// Parses a structure field. fn parse_name_and_ty( &mut self, lo: Span, vis: Visibility, attrs: Vec<Attribute>, ) -> PResult<'a, StructField> { let name = self.parse_ident()?; self.expect(&token::Colon)?; let ty = self.parse_ty()?; Ok(StructField { span: lo.to(self.prev_token.span), ident: Some(name), vis, id: DUMMY_NODE_ID, ty, attrs, is_placeholder: false, }) } /// Parses a declarative macro 2.0 definition. /// The `macro` keyword has already been parsed. /// ``` /// MacBody = "{" TOKEN_STREAM "}" ; /// MacParams = "(" TOKEN_STREAM ")" ; /// DeclMac = "macro" Ident MacParams? MacBody ; /// ``` fn parse_item_decl_macro(&mut self, lo: Span) -> PResult<'a, ItemInfo> { let ident = self.parse_ident()?; let body = if self.check(&token::OpenDelim(token::Brace)) { self.parse_mac_args()? // `MacBody` } else if self.check(&token::OpenDelim(token::Paren)) { let params = self.parse_token_tree(); // `MacParams` let pspan = params.span(); if !self.check(&token::OpenDelim(token::Brace)) { return self.unexpected(); } let body = self.parse_token_tree(); // `MacBody` // Convert `MacParams MacBody` into `{ MacParams => MacBody }`. let bspan = body.span(); let arrow = TokenTree::token(token::FatArrow, pspan.between(bspan)); // `=>` let tokens = TokenStream::new(vec![params.into(), arrow.into(), body.into()]); let dspan = DelimSpan::from_pair(pspan.shrink_to_lo(), bspan.shrink_to_hi()); P(MacArgs::Delimited(dspan, MacDelimiter::Brace, tokens)) } else { return self.unexpected(); }; self.sess.gated_spans.gate(sym::decl_macro, lo.to(self.prev_token.span)); Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, legacy: false }))) } /// Is this unambiguously the start of a `macro_rules! foo` item defnition? fn is_macro_rules_item(&mut self) -> bool { self.check_keyword(kw::MacroRules) && self.look_ahead(1, |t| *t == token::Not) && self.look_ahead(2, |t| t.is_ident()) } /// Parses a legacy `macro_rules! foo { ... }` declarative macro. fn parse_item_macro_rules(&mut self, vis: &Visibility) -> PResult<'a, ItemInfo> { self.expect_keyword(kw::MacroRules)?; // `macro_rules` self.expect(&token::Not)?; // `!` let ident = self.parse_ident()?; let body = self.parse_mac_args()?; self.eat_semi_for_macro_if_needed(&body); self.complain_if_pub_macro(vis, true); Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, legacy: true }))) } /// Item macro invocations or `macro_rules!` definitions need inherited visibility. /// If that's not the case, emit an error. fn complain_if_pub_macro(&self, vis: &Visibility, macro_rules: bool) { if let VisibilityKind::Inherited = vis.node { return; } let vstr = pprust::vis_to_string(vis); let vstr = vstr.trim_end(); if macro_rules { let msg = format!("can't qualify macro_rules invocation with `{}`", vstr); self.struct_span_err(vis.span, &msg) .span_suggestion( vis.span, "try exporting the macro", "#[macro_export]".to_owned(), Applicability::MaybeIncorrect, // speculative ) .emit(); } else { self.struct_span_err(vis.span, "can't qualify macro invocation with `pub`") .span_suggestion( vis.span, "remove the visibility", String::new(), Applicability::MachineApplicable, ) .help(&format!("try adjusting the macro to put `{}` inside the invocation", vstr)) .emit(); } } fn eat_semi_for_macro_if_needed(&mut self, args: &MacArgs) { if args.need_semicolon() && !self.eat(&token::Semi) { self.report_invalid_macro_expansion_item(args); } } fn report_invalid_macro_expansion_item(&self, args: &MacArgs) { let span = args.span().expect("undelimited macro call"); let mut err = self.struct_span_err( span, "macros that expand to items must be delimited with braces or followed by a semicolon", ); if self.unclosed_delims.is_empty() { let DelimSpan { open, close } = match args { MacArgs::Empty | MacArgs::Eq(..) => unreachable!(), MacArgs::Delimited(dspan, ..) => *dspan, }; err.multipart_suggestion( "change the delimiters to curly braces", vec![(open, "{".to_string()), (close, '}'.to_string())], Applicability::MaybeIncorrect, ); } else { err.span_suggestion( span, "change the delimiters to curly braces", " { /* items */ }".to_string(), Applicability::HasPlaceholders, ); } err.span_suggestion( span.shrink_to_hi(), "add a semicolon", ';'.to_string(), Applicability::MaybeIncorrect, ); err.emit(); } /// Checks if current token is one of tokens which cannot be nested like `kw::Enum`. In case /// it is, we try to parse the item and report error about nested types. fn recover_nested_adt_item(&mut self, keyword: Symbol) -> PResult<'a, bool> { if (self.token.is_keyword(kw::Enum) || self.token.is_keyword(kw::Struct) || self.token.is_keyword(kw::Union)) && self.look_ahead(1, |t| t.is_ident()) { let kw_token = self.token.clone(); let kw_str = pprust::token_to_string(&kw_token); let item = self.parse_item()?; self.struct_span_err( kw_token.span, &format!("`{}` definition cannot be nested inside `{}`", kw_str, keyword), ) .span_suggestion( item.unwrap().span, &format!("consider creating a new `{}` definition instead of nesting", kw_str), String::new(), Applicability::MaybeIncorrect, ) .emit(); // We successfully parsed the item but we must inform the caller about nested problem. return Ok(false); } Ok(true) } } /// The parsing configuration used to parse a parameter list (see `parse_fn_params`). /// /// The function decides if, per-parameter `p`, `p` must have a pattern or just a type. type ReqName = fn(Edition) -> bool; /// Parsing of functions and methods. impl<'a> Parser<'a> { /// Parse a function starting from the front matter (`const ...`) to the body `{ ... }` or `;`. fn parse_fn( &mut self, attrs: &mut Vec<Attribute>, req_name: ReqName, ) -> PResult<'a, (Ident, FnSig, Generics, Option<P<Block>>)> { let header = self.parse_fn_front_matter()?; // `const ... fn` let ident = self.parse_ident()?; // `foo` let mut generics = self.parse_generics()?; // `<'a, T, ...>` let decl = self.parse_fn_decl(req_name, AllowPlus::Yes)?; // `(p: u8, ...)` generics.where_clause = self.parse_where_clause()?; // `where T: Ord` let body = self.parse_fn_body(attrs)?; // `;` or `{ ... }`. Ok((ident, FnSig { header, decl }, generics, body)) } /// Parse the "body" of a function. /// This can either be `;` when there's no body, /// or e.g. a block when the function is a provided one. fn parse_fn_body(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, Option<P<Block>>> { let (inner_attrs, body) = match self.token.kind { token::Semi => { self.bump(); (Vec::new(), None) } token::OpenDelim(token::Brace) => { let (attrs, body) = self.parse_inner_attrs_and_block()?; (attrs, Some(body)) } token::Interpolated(ref nt) => match **nt { token::NtBlock(..) => { let (attrs, body) = self.parse_inner_attrs_and_block()?; (attrs, Some(body)) } _ => return self.expected_semi_or_open_brace(), }, _ => return self.expected_semi_or_open_brace(), }; attrs.extend(inner_attrs); Ok(body) } /// Is the current token the start of an `FnHeader` / not a valid parse? fn check_fn_front_matter(&mut self) -> bool { // We use an over-approximation here. // `const const`, `fn const` won't parse, but we're not stepping over other syntax either. const QUALS: [Symbol; 4] = [kw::Const, kw::Async, kw::Unsafe, kw::Extern]; self.check_keyword(kw::Fn) // Definitely an `fn`. // `$qual fn` or `$qual $qual`: || QUALS.iter().any(|&kw| self.check_keyword(kw)) && self.look_ahead(1, |t| { // ...qualified and then `fn`, e.g. `const fn`. t.is_keyword(kw::Fn) // Two qualifiers. This is enough. Due `async` we need to check that it's reserved. || t.is_non_raw_ident_where(|i| QUALS.contains(&i.name) && i.is_reserved()) }) // `extern ABI fn` || self.check_keyword(kw::Extern) && self.look_ahead(1, |t| t.can_begin_literal_or_bool()) && self.look_ahead(2, |t| t.is_keyword(kw::Fn)) } /// Parses all the "front matter" (or "qualifiers") for a `fn` declaration, /// up to and including the `fn` keyword. The formal grammar is: /// /// ``` /// Extern = "extern" StringLit ; /// FnQual = "const"? "async"? "unsafe"? Extern? ; /// FnFrontMatter = FnQual? "fn" ; /// ``` fn parse_fn_front_matter(&mut self) -> PResult<'a, FnHeader> { let constness = self.parse_constness(); let asyncness = self.parse_asyncness(); let unsafety = self.parse_unsafety(); let ext = self.parse_extern()?; if let Async::Yes { span, .. } = asyncness { self.ban_async_in_2015(span); } if !self.eat_keyword(kw::Fn) { // It is possible for `expect_one_of` to recover given the contents of // `self.expected_tokens`, therefore, do not use `self.unexpected()` which doesn't // account for this. if !self.expect_one_of(&[], &[])? { unreachable!() } } Ok(FnHeader { constness, unsafety, asyncness, ext }) } /// We are parsing `async fn`. If we are on Rust 2015, emit an error. fn ban_async_in_2015(&self, span: Span) { if span.rust_2015() { let diag = self.diagnostic(); struct_span_err!(diag, span, E0670, "`async fn` is not permitted in the 2015 edition") .note("to use `async fn`, switch to Rust 2018") .help("set `edition = \"2018\"` in `Cargo.toml`") .note("for more on editions, read https://doc.rust-lang.org/edition-guide") .emit(); } } /// Parses the parameter list and result type of a function declaration. pub(super) fn parse_fn_decl( &mut self, req_name: ReqName, ret_allow_plus: AllowPlus, ) -> PResult<'a, P<FnDecl>> { Ok(P(FnDecl { inputs: self.parse_fn_params(req_name)?, output: self.parse_ret_ty(ret_allow_plus, RecoverQPath::Yes)?, })) } /// Parses the parameter list of a function, including the `(` and `)` delimiters. fn parse_fn_params(&mut self, req_name: ReqName) -> PResult<'a, Vec<Param>> { let mut first_param = true; // Parse the arguments, starting out with `self` being allowed... let (mut params, _) = self.parse_paren_comma_seq(|p| { let param = p.parse_param_general(req_name, first_param).or_else(|mut e| { e.emit(); let lo = p.prev_token.span; // Skip every token until next possible arg or end. p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(token::Paren)]); // Create a placeholder argument for proper arg count (issue #34264). Ok(dummy_arg(Ident::new(kw::Invalid, lo.to(p.prev_token.span)))) }); // ...now that we've parsed the first argument, `self` is no longer allowed. first_param = false; param })?; // Replace duplicated recovered params with `_` pattern to avoid unnecessary errors. self.deduplicate_recovered_params_names(&mut params); Ok(params) } /// Parses a single function parameter. /// /// - `self` is syntactically allowed when `first_param` holds. fn parse_param_general(&mut self, req_name: ReqName, first_param: bool) -> PResult<'a, Param> { let lo = self.token.span; let attrs = self.parse_outer_attributes()?; // Possibly parse `self`. Recover if we parsed it and it wasn't allowed here. if let Some(mut param) = self.parse_self_param()? { param.attrs = attrs.into(); return if first_param { Ok(param) } else { self.recover_bad_self_param(param) }; } let is_name_required = match self.token.kind { token::DotDotDot => false, _ => req_name(self.normalized_token.span.edition()), }; let (pat, ty) = if is_name_required || self.is_named_param() { debug!("parse_param_general parse_pat (is_name_required:{})", is_name_required); let pat = self.parse_fn_param_pat()?; if let Err(mut err) = self.expect(&token::Colon) { return if let Some(ident) = self.parameter_without_type(&mut err, pat, is_name_required, first_param) { err.emit(); Ok(dummy_arg(ident)) } else { Err(err) }; } self.eat_incorrect_doc_comment_for_param_type(); (pat, self.parse_ty_for_param()?) } else { debug!("parse_param_general ident_to_pat"); let parser_snapshot_before_ty = self.clone(); self.eat_incorrect_doc_comment_for_param_type(); let mut ty = self.parse_ty_for_param(); if ty.is_ok() && self.token != token::Comma && self.token != token::CloseDelim(token::Paren) { // This wasn't actually a type, but a pattern looking like a type, // so we are going to rollback and re-parse for recovery. ty = self.unexpected(); } match ty { Ok(ty) => { let ident = Ident::new(kw::Invalid, self.prev_token.span); let bm = BindingMode::ByValue(Mutability::Not); let pat = self.mk_pat_ident(ty.span, bm, ident); (pat, ty) } // If this is a C-variadic argument and we hit an error, return the error. Err(err) if self.token == token::DotDotDot => return Err(err), // Recover from attempting to parse the argument as a type without pattern. Err(mut err) => { err.cancel(); mem::replace(self, parser_snapshot_before_ty); self.recover_arg_parse()? } } }; let span = lo.to(self.token.span); Ok(Param { attrs: attrs.into(), id: ast::DUMMY_NODE_ID, is_placeholder: false, pat, span, ty, }) } /// Returns the parsed optional self parameter and whether a self shortcut was used. fn parse_self_param(&mut self) -> PResult<'a, Option<Param>> { // Extract an identifier *after* having confirmed that the token is one. let expect_self_ident = |this: &mut Self| { match this.normalized_token.kind { // Preserve hygienic context. token::Ident(name, _) => { this.bump(); Ident::new(name, this.normalized_prev_token.span) } _ => unreachable!(), } }; // Is `self` `n` tokens ahead? let is_isolated_self = |this: &Self, n| { this.is_keyword_ahead(n, &[kw::SelfLower]) && this.look_ahead(n + 1, |t| t != &token::ModSep) }; // Is `mut self` `n` tokens ahead? let is_isolated_mut_self = |this: &Self, n| this.is_keyword_ahead(n, &[kw::Mut]) && is_isolated_self(this, n + 1); // Parse `self` or `self: TYPE`. We already know the current token is `self`. let parse_self_possibly_typed = |this: &mut Self, m| { let eself_ident = expect_self_ident(this); let eself_hi = this.prev_token.span; let eself = if this.eat(&token::Colon) { SelfKind::Explicit(this.parse_ty()?, m) } else { SelfKind::Value(m) }; Ok((eself, eself_ident, eself_hi)) }; // Recover for the grammar `*self`, `*const self`, and `*mut self`. let recover_self_ptr = |this: &mut Self| { let msg = "cannot pass `self` by raw pointer"; let span = this.token.span; this.struct_span_err(span, msg).span_label(span, msg).emit(); Ok((SelfKind::Value(Mutability::Not), expect_self_ident(this), this.prev_token.span)) }; // Parse optional `self` parameter of a method. // Only a limited set of initial token sequences is considered `self` parameters; anything // else is parsed as a normal function parameter list, so some lookahead is required. let eself_lo = self.token.span; let (eself, eself_ident, eself_hi) = match self.normalized_token.kind { token::BinOp(token::And) => { let eself = if is_isolated_self(self, 1) { // `&self` self.bump(); SelfKind::Region(None, Mutability::Not) } else if is_isolated_mut_self(self, 1) { // `&mut self` self.bump(); self.bump(); SelfKind::Region(None, Mutability::Mut) } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_self(self, 2) { // `&'lt self` self.bump(); let lt = self.expect_lifetime(); SelfKind::Region(Some(lt), Mutability::Not) } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_mut_self(self, 2) { // `&'lt mut self` self.bump(); let lt = self.expect_lifetime(); self.bump(); SelfKind::Region(Some(lt), Mutability::Mut) } else { // `&not_self` return Ok(None); }; (eself, expect_self_ident(self), self.prev_token.span) } // `*self` token::BinOp(token::Star) if is_isolated_self(self, 1) => { self.bump(); recover_self_ptr(self)? } // `*mut self` and `*const self` token::BinOp(token::Star) if self.look_ahead(1, |t| t.is_mutability()) && is_isolated_self(self, 2) => { self.bump(); self.bump(); recover_self_ptr(self)? } // `self` and `self: TYPE` token::Ident(..) if is_isolated_self(self, 0) => { parse_self_possibly_typed(self, Mutability::Not)? } // `mut self` and `mut self: TYPE` token::Ident(..) if is_isolated_mut_self(self, 0) => { self.bump(); parse_self_possibly_typed(self, Mutability::Mut)? } _ => return Ok(None), }; let eself = source_map::respan(eself_lo.to(eself_hi), eself); Ok(Some(Param::from_self(AttrVec::default(), eself, eself_ident))) } fn is_named_param(&self) -> bool { let offset = match self.token.kind { token::Interpolated(ref nt) => match **nt { token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), _ => 0, }, token::BinOp(token::And) | token::AndAnd => 1, _ if self.token.is_keyword(kw::Mut) => 1, _ => 0, }; self.look_ahead(offset, |t| t.is_ident()) && self.look_ahead(offset + 1, |t| t == &token::Colon) } fn recover_first_param(&mut self) -> &'static str { match self .parse_outer_attributes() .and_then(|_| self.parse_self_param()) .map_err(|mut e| e.cancel()) { Ok(Some(_)) => "method", _ => "function", } } }
41.050662
103
0.529921
e5fec6ea5bd37a78dade5a3b1de28dcee0a82007
2,750
use serde::{Deserialize, Serialize}; /// TODO(doc): @doitian #[derive(Clone, Debug, Copy, Eq, PartialEq, Serialize, Deserialize)] pub enum Module { /// TODO(doc): @doitian Net, /// TODO(doc): @doitian Chain, /// TODO(doc): @doitian Miner, /// TODO(doc): @doitian Pool, /// TODO(doc): @doitian Experiment, /// TODO(doc): @doitian Stats, /// TODO(doc): @doitian Indexer, /// TODO(doc): @doitian IntegrationTest, /// TODO(doc): @doitian Alert, /// TODO(doc): @doitian Subscription, /// TODO(doc): @doitian Debug, } /// TODO(doc): @doitian #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Config { /// TODO(doc): @doitian pub listen_address: String, /// TODO(doc): @doitian #[serde(default)] pub tcp_listen_address: Option<String>, /// TODO(doc): @doitian #[serde(default)] pub ws_listen_address: Option<String>, /// TODO(doc): @doitian pub max_request_body_size: usize, /// TODO(doc): @doitian pub threads: Option<usize>, /// TODO(doc): @doitian pub modules: Vec<Module>, /// Rejects txs with scripts that might trigger known bugs #[serde(default)] pub reject_ill_transactions: bool, /// TODO(doc): @doitian #[serde(default)] pub enable_deprecated_rpc: bool, } impl Config { /// TODO(doc): @doitian pub fn net_enable(&self) -> bool { self.modules.contains(&Module::Net) } /// TODO(doc): @doitian pub fn chain_enable(&self) -> bool { self.modules.contains(&Module::Chain) } /// TODO(doc): @doitian pub fn miner_enable(&self) -> bool { self.modules.contains(&Module::Miner) } /// TODO(doc): @doitian pub fn pool_enable(&self) -> bool { self.modules.contains(&Module::Pool) } /// TODO(doc): @doitian pub fn experiment_enable(&self) -> bool { self.modules.contains(&Module::Experiment) } /// TODO(doc): @doitian pub fn stats_enable(&self) -> bool { self.modules.contains(&Module::Stats) } /// TODO(doc): @doitian pub fn subscription_enable(&self) -> bool { self.modules.contains(&Module::Subscription) } /// TODO(doc): @doitian pub fn indexer_enable(&self) -> bool { self.modules.contains(&Module::Indexer) } /// TODO(doc): @doitian pub fn integration_test_enable(&self) -> bool { self.modules.contains(&Module::IntegrationTest) } /// TODO(doc): @doitian pub fn alert_enable(&self) -> bool { self.modules.contains(&Module::Alert) } /// TODO(doc): @doitian pub fn debug_enable(&self) -> bool { self.modules.contains(&Module::Debug) } }
24.774775
68
0.592364
9cf61ea1b329cf5f75bbbcb1c6ab2269d9de8b2d
20,771
use crate::helpers::{rotating_color, ColorScheme, ID}; use crate::render::area::DrawArea; use crate::render::building::DrawBuilding; use crate::render::bus_stop::DrawBusStop; use crate::render::extra_shape::{DrawExtraShape, ExtraShapeID}; use crate::render::intersection::DrawIntersection; use crate::render::lane::DrawLane; use crate::render::road::DrawRoad; use crate::render::{draw_vehicle, DrawPedCrowd, DrawPedestrian, Renderable}; use crate::ui::{Flags, UI}; use aabb_quadtree::QuadTree; use abstutil::{Cloneable, Timer}; use ezgui::{Color, Drawable, EventCtx, GeomBatch, GfxCtx, Prerender}; use geom::{Bounds, Circle, Distance, Duration, FindClosest, Time}; use map_model::{ AreaID, BuildingID, BusStopID, DirectedRoadID, Intersection, IntersectionID, LaneID, Map, Road, RoadID, Traversable, LANE_THICKNESS, }; use sim::{GetDrawAgents, UnzoomedAgent, VehicleType}; use std::borrow::Borrow; use std::cell::RefCell; use std::collections::HashMap; pub struct DrawMap { pub roads: Vec<DrawRoad>, pub lanes: Vec<DrawLane>, pub intersections: Vec<DrawIntersection>, pub buildings: Vec<DrawBuilding>, pub extra_shapes: Vec<DrawExtraShape>, pub bus_stops: HashMap<BusStopID, DrawBusStop>, pub areas: Vec<DrawArea>, // TODO Move? pub agents: RefCell<AgentCache>, pub boundary_polygon: Drawable, pub draw_all_thick_roads: Drawable, pub draw_all_unzoomed_intersections: Drawable, pub draw_all_buildings: Drawable, pub draw_all_areas: Drawable, quadtree: QuadTree<ID>, } impl DrawMap { pub fn new( map: &Map, flags: &Flags, cs: &ColorScheme, ctx: &EventCtx, timer: &mut Timer, ) -> DrawMap { let mut roads: Vec<DrawRoad> = Vec::new(); timer.start_iter("make DrawRoads", map.all_roads().len()); for r in map.all_roads() { timer.next(); roads.push(DrawRoad::new(r, map, cs, ctx.prerender)); } timer.start("generate thick roads"); let mut road_refs: Vec<&Road> = map.all_roads().iter().collect(); road_refs.sort_by_key(|r| r.get_zorder()); let mut all_roads = GeomBatch::new(); for r in road_refs { all_roads.push( osm_rank_to_color(cs, r.get_rank()), r.get_thick_polygon().get(timer), ); if false { all_roads.push( cs.get_def("unzoomed outline", Color::BLACK), roads[r.id.0].get_outline(map), ); } } let draw_all_thick_roads = all_roads.upload(ctx); timer.stop("generate thick roads"); let almost_lanes = timer.parallelize("prepare DrawLanes", map.all_lanes().iter().collect(), |l| { DrawLane::new( l, map, flags.draw_lane_markings, cs, // TODO Really parallelize should give us something thread-safe that can at // least take notes. &mut Timer::throwaway(), ) }); timer.start_iter("finalize DrawLanes", almost_lanes.len()); let mut lanes: Vec<DrawLane> = Vec::new(); for almost in almost_lanes { timer.next(); lanes.push(almost.finish(ctx.prerender)); } let mut intersections: Vec<DrawIntersection> = Vec::new(); timer.start_iter("make DrawIntersections", map.all_intersections().len()); for i in map.all_intersections() { timer.next(); intersections.push(DrawIntersection::new(i, map, cs, ctx.prerender, timer)); } timer.start("generate unzoomed intersections"); let mut intersection_refs: Vec<&Intersection> = map.all_intersections().iter().collect(); intersection_refs.sort_by_key(|i| i.get_zorder(map)); let mut all_intersections = GeomBatch::new(); for i in intersection_refs { // TODO Would be neat to show closed intersections here, but then edits need to // regenerate this if i.is_stop_sign() { all_intersections.push(osm_rank_to_color(cs, i.get_rank(map)), i.polygon.clone()); if false { all_intersections.push( cs.get("unzoomed outline"), intersections[i.id.0].get_outline(map), ); } } else { all_intersections.push( cs.get_def("unzoomed interesting intersection", Color::BLACK), i.polygon.clone(), ); } } let draw_all_unzoomed_intersections = all_intersections.upload(ctx); timer.stop("generate unzoomed intersections"); let mut buildings: Vec<DrawBuilding> = Vec::new(); let mut all_buildings = GeomBatch::new(); timer.start_iter("make DrawBuildings", map.all_buildings().len()); for b in map.all_buildings() { timer.next(); buildings.push(DrawBuilding::new(b, cs, &mut all_buildings)); } timer.start("upload all buildings"); let draw_all_buildings = all_buildings.upload(ctx); timer.stop("upload all buildings"); let mut extra_shapes: Vec<DrawExtraShape> = Vec::new(); if let Some(ref path) = flags.kml { let raw_shapes = if path.ends_with(".kml") { kml::load(&path, &map.get_gps_bounds(), timer) .expect("Couldn't load extra KML shapes") .shapes } else { let shapes: kml::ExtraShapes = abstutil::read_binary(path.clone(), timer); shapes.shapes }; let mut closest: FindClosest<DirectedRoadID> = FindClosest::new(&map.get_bounds()); for r in map.all_roads().iter() { closest.add( r.id.forwards(), r.center_pts.shift_right(LANE_THICKNESS).get(timer).points(), ); closest.add( r.id.backwards(), r.center_pts.shift_left(LANE_THICKNESS).get(timer).points(), ); } let gps_bounds = map.get_gps_bounds(); for s in raw_shapes.into_iter() { if let Some(es) = DrawExtraShape::new( ExtraShapeID(extra_shapes.len()), s, gps_bounds, &closest, ctx.prerender, cs, ) { extra_shapes.push(es); } } } timer.start_iter("make DrawBusStop", map.all_bus_stops().len()); let mut bus_stops: HashMap<BusStopID, DrawBusStop> = HashMap::new(); for s in map.all_bus_stops().values() { timer.next(); bus_stops.insert(s.id, DrawBusStop::new(s, map, cs, ctx.prerender)); } let mut areas: Vec<DrawArea> = Vec::new(); let mut all_areas = GeomBatch::new(); timer.start_iter("make DrawAreas", map.all_areas().len()); for a in map.all_areas() { timer.next(); areas.push(DrawArea::new(a, cs, &mut all_areas)); } timer.start("upload all areas"); let draw_all_areas = all_areas.upload(ctx); timer.stop("upload all areas"); let boundary_polygon = ctx.prerender.upload_borrowed(vec![( cs.get_def("map background", Color::grey(0.87)), map.get_boundary_polygon(), )]); timer.start("create quadtree"); let mut quadtree = QuadTree::default(map.get_bounds().as_bbox()); // TODO use iter chain if everything was boxed as a renderable... for obj in &roads { quadtree.insert_with_box(obj.get_id(), obj.get_outline(map).get_bounds().as_bbox()); } for obj in &lanes { quadtree.insert_with_box(obj.get_id(), obj.get_outline(map).get_bounds().as_bbox()); } for obj in &intersections { quadtree.insert_with_box(obj.get_id(), obj.get_outline(map).get_bounds().as_bbox()); } for obj in &buildings { quadtree.insert_with_box(obj.get_id(), obj.get_outline(map).get_bounds().as_bbox()); } for obj in &extra_shapes { quadtree.insert_with_box(obj.get_id(), obj.get_outline(map).get_bounds().as_bbox()); } // Don't put BusStops in the quadtree for obj in &areas { quadtree.insert_with_box(obj.get_id(), obj.get_outline(map).get_bounds().as_bbox()); } timer.stop("create quadtree"); timer.note(format!( "static DrawMap consumes {} MB on the GPU", abstutil::prettyprint_usize(ctx.prerender.get_total_bytes_uploaded() / 1024 / 1024) )); DrawMap { roads, lanes, intersections, buildings, extra_shapes, bus_stops, areas, boundary_polygon, draw_all_thick_roads, draw_all_unzoomed_intersections, draw_all_buildings, draw_all_areas, agents: RefCell::new(AgentCache { time: None, agents_per_on: HashMap::new(), unzoomed: None, }), quadtree, } } // The alt to these is implementing std::ops::Index, but that's way more verbose! pub fn get_r(&self, id: RoadID) -> &DrawRoad { &self.roads[id.0] } pub fn get_l(&self, id: LaneID) -> &DrawLane { &self.lanes[id.0] } pub fn get_i(&self, id: IntersectionID) -> &DrawIntersection { &self.intersections[id.0] } pub fn get_b(&self, id: BuildingID) -> &DrawBuilding { &self.buildings[id.0] } pub fn get_es(&self, id: ExtraShapeID) -> &DrawExtraShape { &self.extra_shapes[id.0] } pub fn get_bs(&self, id: BusStopID) -> &DrawBusStop { &self.bus_stops[&id] } pub fn get_a(&self, id: AreaID) -> &DrawArea { &self.areas[id.0] } pub fn get_obj<'a>( &'a self, id: ID, ui: &UI, agents: &'a mut AgentCache, prerender: &Prerender, ) -> Option<&'a dyn Renderable> { let on = match id { ID::Road(id) => { return Some(self.get_r(id)); } ID::Lane(id) => { return Some(self.get_l(id)); } ID::Intersection(id) => { return Some(self.get_i(id)); } ID::Turn(_) => unreachable!(), ID::Building(id) => { return Some(self.get_b(id)); } ID::Car(id) => ui.primary.sim.get_draw_car(id, &ui.primary.map).unwrap().on, ID::Pedestrian(id) => ui.primary.sim.get_draw_ped(id, &ui.primary.map).unwrap().on, ID::PedCrowd(ref members) => { // If the first member has vanished, just give up ui.primary.sim.get_draw_ped(members[0], &ui.primary.map)?.on } ID::ExtraShape(id) => { return Some(self.get_es(id)); } ID::BusStop(id) => { return Some(self.get_bs(id)); } ID::Area(id) => { return Some(self.get_a(id)); } ID::Trip(_) => unreachable!(), }; agents.populate_if_needed(on, &ui.primary.map, &ui.primary.sim, &ui.cs, prerender); // Why might this fail? Pedestrians merge into crowds, and crowds dissipate into // individuals agents.get(on).into_iter().find(|r| r.get_id() == id) } // Unsorted, unexpanded, raw result. pub fn get_matching_objects(&self, bounds: Bounds) -> Vec<ID> { let mut results: Vec<ID> = Vec::new(); for &(id, _, _) in &self.quadtree.query(bounds.as_bbox()) { results.push(id.clone()); } results } } pub struct AgentCache { // This time applies to agents_per_on. unzoomed has its own possibly separate Time! time: Option<Time>, agents_per_on: HashMap<Traversable, Vec<Box<dyn Renderable>>>, // cam_zoom and agent radius also matters unzoomed: Option<(Time, f64, Distance, AgentColorScheme, Drawable)>, } impl AgentCache { pub fn get(&self, on: Traversable) -> Vec<&dyn Renderable> { self.agents_per_on[&on] .iter() .map(|obj| obj.borrow()) .collect() } pub fn populate_if_needed( &mut self, on: Traversable, map: &Map, source: &dyn GetDrawAgents, cs: &ColorScheme, prerender: &Prerender, ) { let now = source.time(); if Some(now) == self.time && self.agents_per_on.contains_key(&on) { return; } let step_count = source.step_count(); let mut list: Vec<Box<dyn Renderable>> = Vec::new(); for c in source.get_draw_cars(on, map).into_iter() { list.push(draw_vehicle(c, map, prerender, cs)); } let (loners, crowds) = source.get_draw_peds(on, map); for p in loners { list.push(Box::new(DrawPedestrian::new( p, step_count, map, prerender, cs, ))); } for c in crowds { list.push(Box::new(DrawPedCrowd::new(c, map, prerender, cs))); } if Some(now) != self.time { self.agents_per_on.clear(); self.time = Some(now); } self.agents_per_on.insert(on, list); } // TODO GetDrawAgents indirection added for time traveling, but that's been removed. Maybe // simplify this. pub fn draw_unzoomed_agents( &mut self, source: &dyn GetDrawAgents, map: &Map, acs: &AgentColorScheme, g: &mut GfxCtx, cam_zoom: f64, radius: Distance, ) { let now = source.time(); if let Some((time, z, r, ref orig_acs, ref draw)) = self.unzoomed { if cam_zoom == z && now == time && radius == r && acs == orig_acs { g.redraw(draw); return; } } // TODO The perf is a little slow compared to when we just returned a bunch of Pt2Ds // without the extra data. Try plumbing a callback that directly populates batch. let mut batch = GeomBatch::new(); for agent in source.get_unzoomed_agents(map) { if let Some(color) = acs.color(&agent) { batch.push( color, Circle::new(agent.pos, radius / cam_zoom).to_polygon(), ); } } let draw = g.upload(batch); g.redraw(&draw); self.unzoomed = Some((now, cam_zoom, radius, acs.clone(), draw)); } } fn osm_rank_to_color(cs: &ColorScheme, rank: usize) -> Color { if rank >= 16 { cs.get_def("unzoomed highway road", Color::rgb(232, 146, 162)) } else if rank >= 6 { cs.get_def("unzoomed arterial road", Color::rgb(255, 199, 62)) } else { cs.get_def("unzoomed residential road", Color::WHITE) } } // TODO ETA till goal... #[derive(Clone, Copy, PartialEq)] pub enum InnerAgentColorScheme { VehicleTypes, Delay, TripTimeSoFar, DistanceCrossedSoFar, } impl InnerAgentColorScheme { fn data(self, cs: &ColorScheme) -> (&str, Vec<(&str, Color)>) { match self { InnerAgentColorScheme::VehicleTypes => ( "vehicle types", vec![ ("car", cs.get_def("unzoomed car", Color::RED.alpha(0.5))), ("bike", cs.get_def("unzoomed bike", Color::GREEN.alpha(0.5))), ("bus", cs.get_def("unzoomed bus", Color::BLUE.alpha(0.5))), ( "pedestrian", cs.get_def("unzoomed pedestrian", Color::ORANGE.alpha(0.5)), ), ], ), InnerAgentColorScheme::Delay => ( "time spent delayed/blocked", vec![ ("<= 1 minute", Color::BLUE.alpha(0.3)), ("<= 5 minutes", Color::ORANGE.alpha(0.5)), ("> 5 minutes", Color::RED.alpha(0.8)), ], ), InnerAgentColorScheme::TripTimeSoFar => ( "trip time so far", vec![ ("<= 1 minute", Color::BLUE.alpha(0.3)), ("<= 5 minutes", Color::ORANGE.alpha(0.5)), ("> 5 minutes", Color::RED.alpha(0.8)), ], ), InnerAgentColorScheme::DistanceCrossedSoFar => ( "distance crossed to goal so far", vec![ ("<= 10%", rotating_color(0)), ("<= 20%", rotating_color(1)), ("<= 30%", rotating_color(2)), ("<= 40%", rotating_color(3)), ("<= 50%", rotating_color(4)), ("<= 60%", rotating_color(5)), ("<= 70%", rotating_color(6)), ("<= 80%", rotating_color(7)), ("<= 90%", rotating_color(8)), ("> 90%", rotating_color(9)), ], ), } } fn classify(self, agent: &UnzoomedAgent) -> String { match self { InnerAgentColorScheme::VehicleTypes => match agent.vehicle_type { Some(VehicleType::Car) => "car".to_string(), Some(VehicleType::Bike) => "bike".to_string(), Some(VehicleType::Bus) => "bus".to_string(), None => "pedestrian".to_string(), }, InnerAgentColorScheme::Delay => classify_delay(agent.metadata.time_spent_blocked), InnerAgentColorScheme::TripTimeSoFar => classify_delay(agent.metadata.trip_time_so_far), InnerAgentColorScheme::DistanceCrossedSoFar => { classify_percent(agent.metadata.percent_dist_crossed) } } } } #[derive(Clone, PartialEq)] pub struct AgentColorScheme { pub acs: InnerAgentColorScheme, pub title: String, pub rows: Vec<(String, Color, bool)>, } impl Cloneable for AgentColorScheme {} impl AgentColorScheme { pub fn new(acs: InnerAgentColorScheme, cs: &ColorScheme) -> AgentColorScheme { let (title, rows) = acs.data(cs); AgentColorScheme { acs, title: title.to_string(), rows: rows .into_iter() .map(|(name, color)| (name.to_string(), color, true)) .collect(), } } pub fn default(cs: &ColorScheme) -> AgentColorScheme { AgentColorScheme::new(InnerAgentColorScheme::VehicleTypes, cs) } pub fn all(cs: &ColorScheme) -> Vec<(AgentColorScheme, String)> { vec![ InnerAgentColorScheme::VehicleTypes, InnerAgentColorScheme::Delay, InnerAgentColorScheme::TripTimeSoFar, InnerAgentColorScheme::DistanceCrossedSoFar, ] .into_iter() .map(|acs| { let x = AgentColorScheme::new(acs, cs); let title = x.title.clone(); (x, title) }) .collect() } pub fn toggle(&mut self, name: String) { for (n, _, enabled) in &mut self.rows { if &name == n { *enabled = !*enabled; return; } } panic!("Can't toggle category {}", name); } fn color(&self, agent: &UnzoomedAgent) -> Option<Color> { let category = self.acs.classify(agent); for (name, color, enabled) in &self.rows { if name == &category { if *enabled { return Some(*color); } return None; } } panic!("Unknown AgentColorScheme category {}", category); } } fn classify_delay(delay: Duration) -> String { if delay <= Duration::minutes(1) { return "<= 1 minute".to_string(); } if delay <= Duration::minutes(5) { return "<= 5 minutes".to_string(); } "> 5 minutes".to_string() } fn classify_percent(percent: f64) -> String { if percent > 0.9 { return "> 90%".to_string(); } if percent <= 0.1 { return "<= 10%".to_string(); } format!("<= {}%", ((percent * 10.0).round() as usize) * 10) }
34.676127
100
0.532329
bb75a058314b5750e05e857270bb9962965385d5
2,606
#![cfg(all(feature = "serde", feature = "std"))] extern crate std; use chrono::{DateTime, Utc}; use serde_incl::de::DeserializeOwned; use std::collections::HashMap; use std::prelude::rust_2021::*; use uuid::Uuid; #[derive(serde_derive::Serialize, serde_derive::Deserialize, PartialEq, Debug)] #[serde(crate = "serde_incl")] pub struct MyStruct { name: String, } #[derive(serde_derive::Serialize, serde_derive::Deserialize, PartialEq, Debug)] #[serde(crate = "serde_incl")] pub struct CustomerTest { pub id: Option<Uuid>, pub email_address: Option<String>, pub is_active: Option<bool>, pub date_stamp: Option<DateTime<Utc>>, } #[test] fn test() { let test = MyStruct { name: "Test Value".into(), }; let cache_id = Uuid::nil(); let cache = MemCache::default(); cache.set_data::<MyStruct>(&cache_id, &test, 5).unwrap(); let model = cache.get_data::<MyStruct>(&cache_id).unwrap(); assert_eq!(test, model); let test = CustomerTest { id: Some(Uuid::nil()), email_address: Some("foo@bar".into()), is_active: None, date_stamp: Some(Utc::now()), }; cache.set_data::<CustomerTest>(&cache_id, &test, 5).unwrap(); let model = cache.get_data::<CustomerTest>(&cache_id).unwrap(); assert_eq!(test, model); } #[derive(Default)] struct MemCache { cache: std::sync::RwLock<HashMap<Uuid, CacheItem>>, } impl MemCache { fn set_data<T>( &self, key: &Uuid, cache_data: &T, expire_seconds: i64, ) -> Result<(), bincode::error::EncodeError> where T: Send + Sync + serde_incl::Serialize, { let config = bincode::config::standard().write_fixed_array_length(); let mut guard = self.cache.write().unwrap(); let encoded = bincode::serde::encode_to_vec(&cache_data, config)?; let cache_item = CacheItem::new(encoded, expire_seconds); guard.insert(key.clone(), cache_item); Ok(()) } fn get_data<T>(&self, key: &Uuid) -> Result<T, bincode::error::DecodeError> where T: Send + Sync + DeserializeOwned, { let config = bincode::config::standard().write_fixed_array_length(); let guard = self.cache.read().unwrap(); let cache_item = guard.get(key).unwrap(); let (decoded, _len): (T, usize) = bincode::serde::decode_from_slice(&cache_item.payload[..], config)?; Ok(decoded) } } struct CacheItem { payload: Vec<u8>, } impl CacheItem { fn new(payload: Vec<u8>, _expire_seconds: i64) -> Self { Self { payload } } }
26.865979
80
0.618573
1cf0b8356edb48c0779b8f0d28e5ba8c5d341401
6,980
use std::time::Instant; use ike_core::*; use ike_input::{Input, Mouse, TextInput}; use ike_render::*; use winit::{ event::{DeviceEvent, ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::Window, }; pub type Key = VirtualKeyCode; pub use winit::event::MouseButton; pub struct WinitRunner; impl AppRunner for WinitRunner { #[inline] fn run(&mut self, mut app: App) { let event_loop = EventLoop::new(); let window = Window::new(&event_loop).unwrap(); let (render_ctx, render_surface) = pollster::block_on(unsafe { wgpu_init(&window) }).unwrap(); let window = crate::Window::from_raw(window); app.world_mut().insert_resource(render_surface); app.world_mut().insert_resource(window); app.world_mut().init_resource::<TextInput>(); app.world_mut().init_resource::<Time>(); app.world_mut().insert_resource(Input::<Key>::default()); app.world_mut() .insert_resource(Input::<MouseButton>::default()); app.world_mut().insert_resource(Mouse::default()); set_render_ctx(render_ctx); app.execute_startup(); let mut last_frame = Instant::now(); event_loop.run(move |event, _, control_flow| match event { Event::RedrawRequested(_) => { let now = Instant::now(); let frame_time = now - last_frame; last_frame = now; app.world() .write_resource::<Time>() .unwrap() .advance_frame(frame_time.as_secs_f32()); app.update_components(); app.execute(); app.world_mut().clear_trackers(); app.world().write_resource::<TextInput>().unwrap().0.clear(); app.world().write_resource::<Input<Key>>().unwrap().update(); app.world() .write_resource::<Input<MouseButton>>() .unwrap() .update(); let mut mouse = app.world().write_resource::<Mouse>().unwrap(); mouse.update(); let window = app.world().read_resource::<crate::Window>().unwrap(); window.get_raw().set_cursor_visible(mouse.visible); window.get_raw().set_cursor_grab(mouse.grabbed).unwrap(); } Event::MainEventsCleared => { let window = app.world().read_resource::<crate::Window>().unwrap(); window.get_raw().request_redraw(); } Event::DeviceEvent { event, .. } => match event { DeviceEvent::MouseMotion { delta: (dx, dy) } => { let mut mouse = app.world_mut().write_resource::<Mouse>().unwrap(); mouse.movement.x += dx as f32; mouse.movement.y += dy as f32; } _ => {} }, Event::WindowEvent { event, .. } => match event { WindowEvent::CloseRequested => { *control_flow = ControlFlow::Exit; } WindowEvent::Resized(size) | WindowEvent::ScaleFactorChanged { new_inner_size: &mut size, .. } => { let mut render_surface = app.world().write_resource::<RenderSurface>().unwrap(); render_surface.configure().width = size.width; render_surface.configure().height = size.height; } WindowEvent::KeyboardInput { input: KeyboardInput { virtual_keycode: Some(key), state, .. }, .. } => { let mut input = app.world_mut().write_resource::<Input<Key>>().unwrap(); match state { ElementState::Pressed => { input.press(key); } ElementState::Released => { input.release(key); } } } WindowEvent::MouseInput { button, state, .. } => { let mut input = app .world_mut() .write_resource::<Input<MouseButton>>() .unwrap(); match state { ElementState::Pressed => { input.press(button); } ElementState::Released => { input.release(button); } } } WindowEvent::CursorMoved { position, .. } => { let mut mouse = app.world_mut().write_resource::<Mouse>().unwrap(); mouse.position.x = position.x as f32; mouse.position.y = position.y as f32; } WindowEvent::ReceivedCharacter(c) => { app.world().write_resource::<TextInput>().unwrap().0.push(c); } _ => {} }, _ => {} }); } } async unsafe fn wgpu_init( window: &winit::window::Window, ) -> anyhow::Result<(RenderCtx, RenderSurface)> { let instance = ::wgpu::Instance::new(wgpu::Backends::PRIMARY); let surface = unsafe { instance.create_surface(window) }; let adapter = instance .request_adapter(&wgpu::RequestAdapterOptions { force_fallback_adapter: false, power_preference: wgpu::PowerPreference::HighPerformance, compatible_surface: Some(&surface), }) .await .unwrap(); let (device, queue) = adapter .request_device( &wgpu::DeviceDescriptor { label: Some("main device"), features: wgpu::Features::empty(), limits: wgpu::Limits { max_texture_dimension_2d: 16_384, ..Default::default() }, }, None, ) .await?; let size = window.inner_size(); let config = wgpu::SurfaceConfiguration { width: size.width, height: size.height, format: surface.get_preferred_format(&adapter).unwrap(), present_mode: wgpu::PresentMode::Immediate, usage: wgpu::TextureUsages::RENDER_ATTACHMENT, }; surface.configure(&device, &config); Ok(( RenderCtx { device: ike_wgpu::Device::new(device), queue: ike_wgpu::Queue::new(queue), }, RenderSurface::new(ike_wgpu::Surface::new(surface), config), )) }
34.215686
100
0.482092
abb2a4f26b7a78c201026aabad27ee42c2e37c4b
2,828
use std::{ fs, path::{Path, PathBuf}, str::FromStr, }; use abscissa_core::{Command, Options, Runnable}; use ibc::ics24_host::identifier::ChainId; use ibc_relayer::{ config::{ChainConfig, Config}, keyring::{HDPath, KeyEntry, KeyRing, Store}, }; use crate::application::app_config; use crate::conclude::Output; #[derive(Clone, Command, Debug, Options)] pub struct KeysAddCmd { #[options(free, required, help = "identifier of the chain")] chain_id: ChainId, #[options(short = "f", required, help = "path to the key file")] file: PathBuf, #[options( short = "n", help = "name of the key (defaults to the `key_name` defined in the config)" )] name: Option<String>, #[options( short = "p", help = "derivation path for this key", default = "m/44'/118'/0'/0/0" )] hd_path: String, } impl KeysAddCmd { fn options(&self, config: &Config) -> Result<KeysAddOptions, Box<dyn std::error::Error>> { let chain_config = config .find_chain(&self.chain_id) .ok_or_else(|| format!("chain '{}' not found in configuration file", self.chain_id))?; let name = self .name .clone() .unwrap_or_else(|| chain_config.key_name.clone()); let hd_path = HDPath::from_str(&self.hd_path) .map_err(|_| format!("invalid derivation path: {}", self.hd_path))?; Ok(KeysAddOptions { config: chain_config.clone(), file: self.file.clone(), name, hd_path, }) } } #[derive(Clone, Debug)] pub struct KeysAddOptions { pub name: String, pub config: ChainConfig, pub file: PathBuf, pub hd_path: HDPath, } impl Runnable for KeysAddCmd { fn run(&self) { let config = app_config(); let opts = match self.options(&config) { Err(err) => return Output::error(err).exit(), Ok(result) => result, }; let key = add_key(&opts.config, &opts.name, &opts.file, &opts.hd_path); match key { Ok(key) => Output::success_msg(format!( "Added key '{}' ({}) on chain {}", opts.name, key.account, opts.config.id )) .exit(), Err(e) => Output::error(format!("{}", e)).exit(), } } } pub fn add_key( config: &ChainConfig, key_name: &str, file: &Path, hd_path: &HDPath, ) -> Result<KeyEntry, Box<dyn std::error::Error>> { let mut keyring = KeyRing::new(Store::Test, &config.account_prefix, &config.id)?; let key_contents = fs::read_to_string(file).map_err(|_| "error reading the key file")?; let key = keyring.key_from_seed_file(&key_contents, hd_path)?; keyring.add_key(key_name, key.clone())?; Ok(key) }
26.429907
98
0.572489
de4e14f7ecb4af932e3013cfded053da5a6747e6
16,202
mod kprobe; mod perf_event; mod perf_event_array; mod raw_tracepoint; mod tracepoint; mod uprobe; use bcc_sys::bccapi::*; pub(crate) use self::kprobe::Kprobe; pub(crate) use self::perf_event::PerfEvent; pub(crate) use self::perf_event_array::PerfEventArray; pub(crate) use self::raw_tracepoint::RawTracepoint; pub(crate) use self::tracepoint::Tracepoint; pub(crate) use self::uprobe::Uprobe; use crate::perf_event::PerfReader; use crate::symbol::SymbolCache; use crate::table::Table; use crate::BccError; use core::ffi::c_void; use core::sync::atomic::{AtomicPtr, Ordering}; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use std::ffi::CString; use std::fs::File; use std::ops::Drop; use std::os::raw::c_char; use std::os::unix::prelude::*; use std::ptr; const SYSCALL_PREFIXES: [&str; 7] = [ "sys_", "__x64_sys_", "__x32_compat_sys_", "__ia32_compat_sys_", "__arm64_sys_", "__s390x_sys_", "__s390_sys_", ]; #[derive(Debug)] /// The `BPF` struct contains the compiled BPF code, any probes or programs that /// have been attached, and can provide access to a userspace view of the /// results of the running BPF programs. pub struct BPF { p: AtomicPtr<c_void>, pub(crate) kprobes: HashSet<Kprobe>, pub(crate) uprobes: HashSet<Uprobe>, pub(crate) tracepoints: HashSet<Tracepoint>, pub(crate) raw_tracepoints: HashSet<RawTracepoint>, pub(crate) perf_events: HashSet<PerfEvent>, pub(crate) perf_events_array: HashSet<PerfEventArray>, perf_readers: Vec<PerfReader>, sym_caches: HashMap<pid_t, SymbolCache>, cflags: Vec<CString>, } // helper function that converts non-alphanumeric characters to underscores pub(crate) fn make_alphanumeric(s: &str) -> String { s.replace( |c| !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')), "_", ) } // create a mutable pointer from a vector of bytes fn null_or_mut_ptr<T>(s: &mut Vec<u8>) -> *mut T { if s.capacity() == 0 { ptr::null_mut() } else { s.as_mut_ptr() as *mut T } } /// A builder struct which allows one to initialize a BPF module with additional /// options. pub struct BPFBuilder { code: CString, cflags: Vec<CString>, } impl BPFBuilder { /// Create a new builder with the given code pub fn new(code: &str) -> Result<Self, BccError> { let code = CString::new(code)?; Ok(Self { code, cflags: Vec::new(), }) } /// Set CFLAGS to be used pub fn cflags<T: AsRef<str>>(mut self, cflags: &[T]) -> Result<Self, BccError> { self.cflags.clear(); for f in cflags { let cs = CString::new(f.as_ref())?; self.cflags.push(cs); } Ok(self) } #[cfg(any( feature = "v0_4_0", feature = "v0_5_0", feature = "v0_6_0", feature = "v0_6_1", feature = "v0_7_0", feature = "v0_8_0", ))] /// Try constructing a BPF module from the builder pub fn build(self) -> Result<BPF, BccError> { let cflags_ptr = if self.cflags.is_empty() { ptr::null_mut() } else { self.cflags.as_ptr() as *mut *const c_char }; let ptr = unsafe { bpf_module_create_c_from_string( self.code.as_ptr(), 2, cflags_ptr, self.cflags.len().try_into().unwrap(), ) }; if ptr.is_null() { return Err(BccError::Compilation); } Ok(BPF { p: AtomicPtr::new(ptr), uprobes: HashSet::new(), kprobes: HashSet::new(), tracepoints: HashSet::new(), raw_tracepoints: HashSet::new(), perf_events: HashSet::new(), perf_events_array: HashSet::new(), perf_readers: Vec::new(), sym_caches: HashMap::new(), cflags: self.cflags, }) } // 0.9.0 changes the API for bpf_module_create_c_from_string() #[cfg(any(feature = "v0_9_0", feature = "v0_10_0"))] /// Try constructing a BPF module from the builder pub fn build(self) -> Result<BPF, BccError> { let cflags_ptr = if self.cflags.is_empty() { ptr::null_mut() } else { self.cflags.as_ptr() as *mut *const c_char }; let ptr = unsafe { bpf_module_create_c_from_string( self.code.as_ptr(), 2, cflags_ptr, self.cflags.len().try_into().unwrap(), true, ) }; if ptr.is_null() { return Err(BccError::Compilation); } Ok(BPF { p: AtomicPtr::new(ptr), uprobes: HashSet::new(), kprobes: HashSet::new(), tracepoints: HashSet::new(), raw_tracepoints: HashSet::new(), perf_events: HashSet::new(), perf_events_array: HashSet::new(), perf_readers: Vec::new(), sym_caches: HashMap::new(), cflags: self.cflags, }) } // 0.11.0 changes the API for bpf_module_create_c_from_string() #[cfg(any( feature = "v0_11_0", feature = "v0_12_0", feature = "v0_13_0", feature = "v0_14_0", feature = "v0_15_0", not(feature = "specific"), ))] /// Try constructing a BPF module from the builder pub fn build(self) -> Result<BPF, BccError> { let cflags_ptr = if self.cflags.is_empty() { ptr::null_mut() } else { self.cflags.as_ptr() as *mut *const c_char }; let ptr = unsafe { bpf_module_create_c_from_string( self.code.as_ptr(), 2, cflags_ptr, self.cflags.len().try_into().unwrap(), true, ptr::null_mut(), ) }; if ptr.is_null() { return Err(BccError::Compilation); } Ok(BPF { p: AtomicPtr::new(ptr), uprobes: HashSet::new(), kprobes: HashSet::new(), tracepoints: HashSet::new(), raw_tracepoints: HashSet::new(), perf_events: HashSet::new(), perf_events_array: HashSet::new(), perf_readers: Vec::new(), sym_caches: HashMap::new(), cflags: self.cflags, }) } } impl BPF { #[cfg(any( feature = "v0_4_0", feature = "v0_5_0", feature = "v0_6_0", feature = "v0_6_1", feature = "v0_7_0", feature = "v0_8_0", ))] /// `code` is a string containing C code. See https://github.com/iovisor/bcc for examples pub fn new(code: &str) -> Result<BPF, BccError> { BPFBuilder::new(code)?.build() } // 0.9.0 changes the API for bpf_module_create_c_from_string() #[cfg(any(feature = "v0_9_0", feature = "v0_10_0",))] /// `code` is a string containing C code. See https://github.com/iovisor/bcc for examples pub fn new(code: &str) -> Result<BPF, BccError> { BPFBuilder::new(code)?.build() } // 0.11.0 changes the API for bpf_module_create_c_from_string() #[cfg(any( feature = "v0_11_0", feature = "v0_12_0", feature = "v0_13_0", feature = "v0_14_0", feature = "v0_15_0", not(feature = "specific"), ))] /// `code` is a string containing C code. See https://github.com/iovisor/bcc for examples pub fn new(code: &str) -> Result<BPF, BccError> { BPFBuilder::new(code)?.build() } // get access to the interal pointer for the bpf module fn ptr(&self) -> *mut c_void { self.p.load(Ordering::SeqCst) } /// Get access to a named table within the running BPF program. pub fn table(&self, name: &str) -> Table { // TODO: clean up this unwrap (and all the rest in this file) let cname = CString::new(name).unwrap(); let id = unsafe { bpf_table_id(self.ptr(), cname.as_ptr()) }; Table::new(id, self.ptr()) } // Get the table file descriptor pub(crate) fn table_fd(&self, name: &str) -> i32 { let cname = CString::new(name).unwrap(); unsafe { bpf_table_fd(self.ptr(), cname.as_ptr()) } } /// Load a network traffic-control action which has the provided name within /// the BPF program pub fn load_net(&mut self, name: &str) -> Result<File, BccError> { self.load(name, bpf_prog_type_BPF_PROG_TYPE_SCHED_ACT, 0, 0) } #[cfg(feature = "v0_4_0")] /// load the named BPF program from within the compiled BPF code pub fn load( &mut self, name: &str, prog_type: u32, _log_level: i32, log_size: u32, ) -> Result<File, BccError> { let cname = CString::new(name).unwrap(); unsafe { let start: *mut bpf_insn = bpf_function_start(self.ptr(), cname.as_ptr()) as *mut bpf_insn; let size = bpf_function_size(self.ptr(), cname.as_ptr()) as i32; let license = bpf_module_license(self.ptr()); let version = bpf_module_kern_version(self.ptr()); if start.is_null() { return Err(BccError::Loading { name: name.to_string(), }); } let mut log_buf: Vec<u8> = Vec::with_capacity(log_size as usize); // TODO: we're ignoring any changes bpf_prog_load made to log_buf right now // We should instead do something with this log buffer (I'm not clear on what it's for // yet though) let fd = bpf_prog_load( prog_type, start, size, license, version, null_or_mut_ptr(&mut log_buf), log_buf.capacity() as u32, ); if fd < 0 { return Err(BccError::Loading { name: name.to_string(), }); } Ok(File::from_raw_fd(fd)) } } #[cfg(any( feature = "v0_5_0", feature = "v0_6_0", feature = "v0_6_1", feature = "v0_7_0", feature = "v0_8_0" ))] /// load the named BPF program from within the compiled BPF code pub fn load( &mut self, name: &str, prog_type: u32, log_level: i32, log_size: u32, ) -> Result<File, BccError> { let cname = CString::new(name).unwrap(); unsafe { let start: *mut bpf_insn = bpf_function_start(self.ptr(), cname.as_ptr()) as *mut bpf_insn; let size = bpf_function_size(self.ptr(), cname.as_ptr()) as i32; let license = bpf_module_license(self.ptr()); let version = bpf_module_kern_version(self.ptr()); if start.is_null() { return Err(BccError::Loading { name: name.to_string(), }); } let mut log_buf: Vec<u8> = Vec::with_capacity(log_size as usize); // TODO: we're ignoring any changes bpf_prog_load made to log_buf right now // We should instead do something with this log buffer (I'm not clear on what it's for // yet though) let fd = bpf_prog_load( prog_type, cname.as_ptr(), start, size, license, version, log_level, null_or_mut_ptr(&mut log_buf), log_buf.capacity() as u32, ); if fd < 0 { return Err(BccError::Loading { name: name.to_string(), }); } Ok(File::from_raw_fd(fd)) } } #[cfg(any( feature = "v0_9_0", feature = "v0_10_0", feature = "v0_11_0", feature = "v0_12_0", feature = "v0_13_0", feature = "v0_14_0", feature = "v0_15_0", not(feature = "specific"), ))] /// load the named BPF program from within the compiled BPF code pub fn load( &mut self, name: &str, prog_type: u32, log_level: i32, log_size: u32, ) -> Result<File, BccError> { let cname = CString::new(name).unwrap(); unsafe { let start: *mut bpf_insn = bpf_function_start(self.ptr(), cname.as_ptr()) as *mut bpf_insn; let size = bpf_function_size(self.ptr(), cname.as_ptr()) as i32; let license = bpf_module_license(self.ptr()); let version = bpf_module_kern_version(self.ptr()); if start.is_null() { return Err(BccError::Loading { name: name.to_string(), }); } let mut log_buf: Vec<u8> = Vec::with_capacity(log_size as usize); // TODO: we're ignoring any changes bpf_prog_load made to log_buf right now // We should instead do something with this log buffer (I'm not clear on what it's for // yet though) let fd = bcc_prog_load( prog_type, cname.as_ptr(), start, size, license, version, log_level, null_or_mut_ptr(&mut log_buf), log_buf.capacity() as u32, ); if fd < 0 { return Err(BccError::Loading { name: name.to_string(), }); } Ok(File::from_raw_fd(fd)) } } /// Returns the syscall prefix for the running kernel pub fn get_syscall_prefix(&mut self) -> String { for prefix in SYSCALL_PREFIXES.iter() { if self.ksymname(prefix).is_ok() { return (*prefix).to_string(); } } SYSCALL_PREFIXES[0].to_string() } /// Converts a syscall function name to a fully-qualified function name pub fn get_syscall_fnname(&mut self, name: &str) -> String { self.get_syscall_prefix() + name } /// Returns a list of kernel functions matching a provided regular /// expression pub fn get_kprobe_functions(&mut self, event_re: &str) -> Result<Vec<String>, BccError> { crate::kprobe::get_kprobe_functions(event_re) } /// Resulves the name to a kernel symbol pub fn ksymname(&mut self, name: &str) -> Result<u64, BccError> { self.sym_caches .entry(-1) .or_insert_with(|| SymbolCache::new(-1)); let cache = self.sym_caches.get(&-1).unwrap(); cache.resolve_name("", name) } #[cfg(any( feature = "v0_6_0", feature = "v0_6_1", feature = "v0_7_0", feature = "v0_8_0", feature = "v0_9_0", feature = "v0_10_0", feature = "v0_11_0", feature = "v0_12_0", feature = "v0_13_0", feature = "v0_14_0", feature = "v0_15_0", not(feature = "specific"), ))] /// Returns true if raw tracepoints are supported by the running kernel pub fn support_raw_tracepoint(&mut self) -> bool { self.ksymname("bpf_find_raw_tracepoint").is_ok() || self.ksymname("bpf_get_raw_tracepoint").is_ok() } pub fn init_perf_map<F>(&mut self, table: Table, cb: F) -> Result<(), BccError> where F: Fn() -> Box<dyn FnMut(&[u8]) + Send>, { let perf_map = crate::perf_event::init_perf_map(table, cb)?; self.perf_readers.extend(perf_map.readers); Ok(()) } pub fn perf_map_poll(&mut self, timeout: i32) { unsafe { perf_reader_poll( self.perf_readers.len() as i32, self.perf_readers.as_ptr() as *mut *mut perf_reader, timeout, ); }; } } impl Drop for BPF { fn drop(&mut self) { unsafe { bpf_module_destroy(self.ptr()); }; } }
31.338491
98
0.535489
bb80e1478027c76d2fae2a3df220dbb3cdc34cc7
42,390
//! Provides an implementation for [B-splines] that uses jets as control points. //! //! Finding control points that adheres some constraints is a non-trivial task. //! One approach is to use a numeric solver to approximate the solution. With jets we //! can calculate the derivatives with respect to the control points. This allows us to //! use very efficient numeric solvers (e.g. [`NewtonsMethod`](crate::newton::NewtonsMethod)). //! //! [B-splines]: https://en.wikipedia.org/wiki/B-spline use std::{cmp::Ordering, fmt::Display, iter::repeat}; use crate::{Dim, Infinitesimal, Jet, NoInfinitesimal, Number}; /// Error that occurred during [`BSpline`] construction. #[derive(Debug, Clone)] pub struct BSplineError<N: Number> { /// The reason why the construction failed. pub reason: BSplineErrorReason, /// The requested degree of the [`BSpline`]. pub degree: usize, /// The knots that were used to construct the [`BSpline`]. pub knots: Vec<N>, } /// The reason why [`BSplineError`] was returned. #[derive(Debug, Clone, PartialEq, Eq)] pub enum BSplineErrorReason { /// There were not enough knots for constructing the [`BSpline`] with the requested degree. NotEnoughKnots, /// The knots needs to be strictly increasing, but there is at least one knot that violates /// this property. NotStrictlyIncreasingKnots { /// The index of the knot that violates this property. wrong_knot_index: usize, }, } impl<N: Number> Display for BSplineError<N> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.reason { BSplineErrorReason::NotEnoughKnots => { write!( f, "A B-spline with degree {} needs at least {} knots, but {} knots were given", self.degree, min_knot_count(self.degree), self.knots.len(), ) } BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index } => { write!( f, "The knots of the B-spline must be strictly increasing, but the knot at index {} is either smaller than or equal to its predecessor", wrong_knot_index, ) } } } } impl<N: Number, I: Infinitesimal<N>> Display for BSplineCurveError<N, I> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let knot_count = self.bspline.knot_count(); write!( f, "A B-spline curve with {} knots needs exactly {} control points, but {} control points were given", knot_count, necessary_control_point_count(self.bspline.degree, knot_count), self.control_points.len(), ) } } /// A [B-spline] that constructed from a set of knots. /// /// Creating a [`BSpline`] is the first step of creating a [`BSplineCurve`]. The knots define /// the borders of the polynomial function pieces. The next step is to choose control points /// to construct a [`BSplineCurve`] that has the desired properties (interpolated points, /// specific derivative, etc.). There are different strategies for choosing the right /// control points, though this library encourage you to use a numeric solver to find a solution. /// /// [B-spline]: https://en.wikipedia.org/wiki/B-spline #[derive(Debug, Clone)] pub struct BSpline<N: Number> { // The degree of the polynomial function pieces. degree: usize, // This consists of // - The original knots passed to the constructor and // - `degree` padding knots at the front and // - `degree` paddings knots at the end. // The paddings knots are necessary for De Boor's algorithm. We create them by simply // repeating the first and last original knot. padded_knots: Vec<N>, } impl<N: Number> BSpline<N> { /// Tries to create a B-spline. /// /// The degree parameters defines the maximal degree of the the polynomial function pieces /// and the knots define their borders. There must be at least `degree + 1` knots and /// they must be strictly increasing. pub fn new(degree: usize, knots: Vec<N>) -> Result<Self, BSplineError<N>> { let knots = knots.into_iter().collect::<Vec<_>>(); if knots.len() < min_knot_count(degree) { return Err(BSplineError { reason: BSplineErrorReason::NotEnoughKnots, degree, knots, }); } let strictly_increasing_violation = knots.windows(2).enumerate().find(|(_, ks)| { !matches!( PartialOrd::partial_cmp(&ks[0], &ks[1]), Some(Ordering::Less) ) }); if let Some((i, _)) = strictly_increasing_violation { return Err(BSplineError { reason: BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index: i + 1, }, degree, knots, }); } let padded_knots = if knots.is_empty() { knots } else { let first = knots.first().unwrap(); let last = knots.last().unwrap(); let padding_init = repeat(first.clone()).take(degree); let padding_tail = repeat(last.clone()).take(degree); padding_init.chain(knots).chain(padding_tail).collect() }; Ok(Self { degree, padded_knots, }) } /// The degree of the polynomial function pieces of the B-spline. pub fn degree(&self) -> usize { self.degree } /// Returns the number of knots that were used to create the B-spline. pub fn knot_count(&self) -> usize { knot_count_without_padding(self.degree, &self.padded_knots) } /// The knots that were used to create the B-spline. pub fn knots(&self) -> &[N] { knots_without_padding(self.degree, &self.padded_knots) } /// The number of control points that are necessary for construction a [`BSplineCurve`]. /// /// The necessary number of control points is `knot_count + max(0, degree - 1)`. pub fn necessary_control_point_count(&self) -> usize { necessary_control_point_count(self.degree, self.knot_count()) } } /// Buffer for temporary values during the [`BSplineCurve`] evaluation. /// /// Exists only for performance reasons. #[derive(Debug, Clone)] pub struct BSplineCurveBuffer<N: Number, I: Infinitesimal<N>>(Vec<Jet<N, I>>); impl<N: Number, I: Infinitesimal<N>> BSplineCurveBuffer<N, I> { /// Creates a buffer with an ideal preallocated size for B-spline curves with the given degree. pub fn new(degree: usize) -> Self { Self(Vec::with_capacity(degree + 1)) } } impl<N: Number, I: Infinitesimal<N>> Default for BSplineCurveBuffer<N, I> { fn default() -> Self { Self::new(0) } } /// Error that occurred during [`BSplineCurve`] construction because a wrong number of /// control points was used. #[derive(Debug, Clone)] pub struct BSplineCurveError<N: Number, I: Infinitesimal<N>> { /// The B-spline that was used to construct the [`BSplineCurve`]. pub bspline: BSpline<N>, /// The control points that was used to construct the [`BSplineCurve`]. pub control_points: Vec<Jet<N, I>>, } /// A curve (more specific: a [spline]) that is constructed as a linear combination of a B-spline. /// /// The underlying [`BSpline`] and control points can be either borrowed or owned, hence it has /// either a static lifetime or the lifetime of its underlying data. /// /// [spline]: https://en.wikipedia.org/wiki/Spline_(mathematics) #[derive(Debug, Clone)] pub struct BSplineCurve<N: Number, I: Infinitesimal<N>> { dim: Dim, bspline: BSpline<N>, control_points: Vec<Jet<N, I>>, } impl<N: Number, I: Infinitesimal<N>> BSplineCurve<N, I> { /// Tries to create a B-spline curve. /// /// The number of control points must match the number returned by /// [`BSpline::necessary_control_point_count`]. pub fn new( dim: Dim, bspline: BSpline<N>, control_points: Vec<Jet<N, I>>, ) -> Result<Self, BSplineCurveError<N, I>> { if control_points.len() != bspline.necessary_control_point_count() { return Err(BSplineCurveError { bspline, control_points, }); } Ok(Self { dim, bspline, control_points, }) } /// Returns the B-Spline of this curve. pub fn bspline(&self) -> &BSpline<N> { &self.bspline } /// Returns the control points of this curve. pub fn control_points(&self) -> &[Jet<N, I>] { &self.control_points } /// Evaluates the curve for the given x-value. pub fn value(&self, x: &N, buffer: &mut BSplineCurveBuffer<N, I>) -> Jet<N, I> { calc_value( self.bspline.degree, &self.bspline.padded_knots, &self.control_points, x, buffer, ) } /// Evaluates the curve's derivative of the given order and for the given x-value. pub fn derivative( &self, order: usize, x: &N, buffer: &mut BSplineCurveBuffer<N, I>, ) -> Jet<N, I> { calc_derivative( self.dim, self.bspline.degree, &self.bspline.padded_knots, &self.control_points, order, x, buffer, ) } /// Returns a [`BSplineCurveRef`] that uses the underlying data of self. pub fn as_ref(&self) -> BSplineCurveRef<N, I> { BSplineCurveRef { dim: self.dim, bspline: &self.bspline, control_points: &self.control_points, } } } impl<N: Number> BSplineCurve<N, NoInfinitesimal> { /// Tries to create a B-spline curve using control points without an infinitesimal part. /// /// The number of control points must match the number returned by /// [`BSpline::necessary_control_point_count`]. pub fn without_infinitesimal( bspline: BSpline<N>, real_control_points: Vec<N>, ) -> Result<Self, BSplineCurveError<N, NoInfinitesimal>> { let control_points = real_control_points .into_iter() .map(|p| Jet::new(p, NoInfinitesimal)) .collect::<Vec<_>>(); Self::new(Dim(0), bspline, control_points) } } /// Same as [`BSplineCurve`] but it borrows it underlying data. #[derive(Debug, Clone)] pub struct BSplineCurveRef<'a, N: Number, I: Infinitesimal<N>> { dim: Dim, bspline: &'a BSpline<N>, control_points: &'a [Jet<N, I>], } impl<'a, N: Number, I: Infinitesimal<N>> BSplineCurveRef<'a, N, I> { /// Tries to create a B-spline curve. /// /// The number of control points must match the number returned by /// [`BSpline::necessary_control_point_count`]. pub fn new(dim: Dim, bspline: &'a BSpline<N>, control_points: &'a [Jet<N, I>]) -> Option<Self> { if control_points.len() != bspline.necessary_control_point_count() { return None; } Some(Self { bspline, dim, control_points, }) } /// Returns the B-Spline of this curve. pub fn bspline(&self) -> &BSpline<N> { self.bspline } /// Returns the control points of this curve. pub fn control_points(&self) -> &[Jet<N, I>] { self.control_points } /// Evaluates the curve for the given x-value. pub fn value(&self, x: &N, buffer: &mut BSplineCurveBuffer<N, I>) -> Jet<N, I> { calc_value( self.bspline.degree, &self.bspline.padded_knots, self.control_points, x, buffer, ) } /// Evaluates the curve's derivative of the given order and for the given x-value. pub fn derivative( &self, order: usize, x: &N, buffer: &mut BSplineCurveBuffer<N, I>, ) -> Jet<N, I> { calc_derivative( self.dim, self.bspline.degree, &self.bspline.padded_knots, self.control_points, order, x, buffer, ) } } fn min_knot_count(degree: usize) -> usize { degree + 1 } pub fn knot_count_without_padding<N: Number>(degree: usize, padded_knots: &[N]) -> usize { padded_knots.len() - 2 * degree } pub fn knots_without_padding<N: Number>(degree: usize, padded_knots: &[N]) -> &[N] { &padded_knots[degree..padded_knots.len() - degree] } fn necessary_control_point_count(degree: usize, knot_count: usize) -> usize { knot_count + degree.saturating_sub(1) } fn calc_value<N: Number, I: Infinitesimal<N>>( degree: usize, padded_knots: &[N], control_points: &[Jet<N, I>], x: &N, buffer: &mut BSplineCurveBuffer<N, I>, ) -> Jet<N, I> { // The algorithm is inspired by https://en.wikipedia.org/wiki/De_Boor%27s_algorithm // We assume that the number of control points correct because it was checked before debug_assert_eq!( control_points.len(), necessary_control_point_count(degree, knot_count_without_padding(degree, padded_knots)), ); let interval_index = find_interval(degree, padded_knots, x); // Prepare the buffer with the relevant control points buffer.0.clear(); buffer.0.extend(relevant_control_points( degree, interval_index, control_points, )); // Calculate the value de_boor_algorithm(degree, padded_knots, interval_index, x, buffer) } fn calc_derivative<N: Number, I: Infinitesimal<N>>( dim: Dim, degree: usize, padded_knots: &[N], control_points: &[Jet<N, I>], order: usize, x: &N, buffer: &mut BSplineCurveBuffer<N, I>, ) -> Jet<N, I> { if order == 0 { // We interpret the derivative of order 0 as the original curve return calc_value(degree, padded_knots, control_points, x, buffer); } // We assume that the number of control points correct because it was checked before debug_assert_eq!( control_points.len(), necessary_control_point_count(degree, knot_count_without_padding(degree, padded_knots)), ); if order > degree { // If the order is greater than the degree, we knot that derivative is always zero return Jet::new(N::zero(), I::zeros(dim)); } let derivative_degree = degree - order; let interval_index = find_interval(degree, padded_knots, x); // Prepare the buffer with the relevant control points buffer.0.clear(); buffer.0.extend(relevant_control_points( degree, interval_index, control_points, )); // Calculate the control points for the derivative calc_derivative_control_points( degree, derivative_degree, padded_knots, interval_index, buffer, ); // Calculate the derivative de_boor_algorithm(derivative_degree, padded_knots, interval_index, x, buffer) } fn find_interval<N: Number>(degree: usize, padded_knots: &[N], x: &N) -> usize { let knots = knots_without_padding(degree, padded_knots); // Calculate the knots between the intervals let interval_knots = if degree == 0 { // Because `degree` is at least 0, we know that we have at least 1 knot debug_assert!(!knots.is_empty()); // `degree` 0 is asymmetrical and "right-heavy" and we are not allowed to // omit the last knot, therefore omit only the first knot &knots[1..] } else { // Because `degree` is at least 1, we know that we have at least 2 knots debug_assert!(knots.len() >= 2); // We omit the first and last knot &knots[1..knots.len() - 1] }; // Find the interval that contains `x` let interval_knot_index = match interval_knots.binary_search_by(|k| N::total_cmp(k, x)) { Ok(index) => index, Err(index) => index, }; interval_knot_index + degree } fn relevant_control_points<N: Number, I: Infinitesimal<N>>( degree: usize, interval_index: usize, control_points: &[Jet<N, I>], ) -> impl Iterator<Item = Jet<N, I>> + '_ { control_points[interval_index - degree..=interval_index] .iter() .cloned() } // Implements the recursive part of the De Boor's algorithm. // // Expects that the buffer contains `degree + 1` control points. // // Inspired by https://en.wikipedia.org/wiki/De_Boor%27s_algorithm fn de_boor_algorithm<N: Number, I: Infinitesimal<N>>( degree: usize, padded_knots: &[N], interval_index: usize, x: &N, buffer: &mut BSplineCurveBuffer<N, I>, ) -> Jet<N, I> { for step in 1..=degree { for i in (step..=degree).rev() { let left_knot = &padded_knots[interval_index + i - degree]; let right_knot = &padded_knots[interval_index + 1 + i - step]; let knot_diff = right_knot.clone() - left_knot.clone(); // `knot_diff` is non-zero because we assume that the knots are strictly increasing // and at most one padding knot is used for the difference debug_assert!(!knot_diff.is_zero()); let alpha = (x.clone() - left_knot.clone()) / knot_diff; let beta = N::one() - alpha.clone(); let left_temp = buffer.0[i - 1].clone(); let right_temp = &mut buffer.0[i]; *right_temp = left_temp * beta + right_temp.clone() * alpha; } } buffer.0.pop().unwrap() } // Calculates the control points for the curve's derivative with the given degree. // // Expects that the buffer contains `degree + 1` control points of the B-spline curve. // After this functions returns the buffer contains `derivative_degree + 1` control points of // the curve's derivative. // // Inspired by https://stackoverflow.com/questions/57507696/b-spline-derivative-using-de-boors-algorithm fn calc_derivative_control_points<N: Number, I: Infinitesimal<N>>( degree: usize, derivative_degree: usize, padded_knots: &[N], interval_index: usize, buffer: &mut BSplineCurveBuffer<N, I>, ) { for step in (derivative_degree..degree).rev() { for i in 0..=step { let left_knot = &padded_knots[i + interval_index - step]; let right_knot = &padded_knots[i + interval_index + 1]; let knot_diff = right_knot.clone() - left_knot.clone(); // `knot_diff` is non-zero because we assume that the knots are strictly increasing // and at most one padding knot is used for the difference debug_assert!(!knot_diff.is_zero()); let alpha = N::from_integer((step + 1) as i32) / knot_diff; let right_temp = buffer.0[i + 1].clone(); let left_temp = &mut buffer.0[i]; *left_temp = (right_temp - left_temp.clone()) * alpha; } } // Remove unnecessary entries from buffer buffer.0.drain(derivative_degree + 1..); } #[cfg(test)] mod tests { use std::iter::repeat; use crate::bspline::{BSpline, BSplineCurve, BSplineCurveBuffer, BSplineErrorReason}; use crate::{Dim, Jet, NoInfinitesimal}; #[test] fn bspline_new_examples() { assert_eq!( BSpline::<f32>::new(0, vec![]).unwrap_err().reason, BSplineErrorReason::NotEnoughKnots, ); assert_eq!( BSpline::<f32>::new(0, vec![1.0]).unwrap().padded_knots, [1.0] ); assert_eq!( BSpline::<f32>::new(1, vec![1.0]).unwrap_err().reason, BSplineErrorReason::NotEnoughKnots, ); assert_eq!( BSpline::<f32>::new(1, vec![1.0, 2.0]).unwrap().padded_knots, [1.0, 1.0, 2.0, 2.0] ); assert_eq!( BSpline::<f32>::new(1, vec![1.0, 1.0]).unwrap_err().reason, BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index: 1 }, ); assert_eq!( BSpline::<f32>::new(1, vec![2.0, 1.0]).unwrap_err().reason, BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index: 1 }, ); assert_eq!( BSpline::<f32>::new(1, vec![f32::NAN, 1.0]) .unwrap_err() .reason, BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index: 1 }, ); assert_eq!( BSpline::<f32>::new(1, vec![1.0, f32::NAN]) .unwrap_err() .reason, BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index: 1 }, ); assert_eq!( BSpline::<f32>::new(1, vec![f32::NAN, f32::NAN]) .unwrap_err() .reason, BSplineErrorReason::NotStrictlyIncreasingKnots { wrong_knot_index: 1 }, ); assert_eq!( BSpline::<f32>::new(2, vec![1.0, 2.0, 3.0]) .unwrap() .padded_knots, [1.0, 1.0, 1.0, 2.0, 3.0, 3.0, 3.0] ); assert_eq!( BSpline::<f32>::new(3, vec![1.0, 2.0, 3.0, 4.0]) .unwrap() .padded_knots, [1.0, 1.0, 1.0, 1.0, 2.0, 3.0, 4.0, 4.0, 4.0, 4.0] ); } #[test] fn bspline_curve_new_example() { let dim = Dim(0); let control_points = |len: usize| { repeat(Jet::new(1.0 as f32, NoInfinitesimal)) .take(len) .collect() }; { let bspline = BSpline::<f32>::new(0, vec![1.0]).unwrap(); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(0)).is_err()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(1)).is_ok()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(2)).is_err()); } { let bspline = BSpline::<f32>::new(0, vec![1.0, 2.0]).unwrap(); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(1)).is_err()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(2)).is_ok()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(3)).is_err()); } { let bspline = BSpline::<f32>::new(1, vec![1.0, 2.0]).unwrap(); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(1)).is_err()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(2)).is_ok()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(3)).is_err()); } { let bspline = BSpline::<f32>::new(1, vec![1.0, 2.0, 3.0]).unwrap(); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(2)).is_err()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(3)).is_ok()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(4)).is_err()); } { let bspline = BSpline::<f32>::new(2, vec![1.0, 2.0, 3.0]).unwrap(); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(3)).is_err()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(4)).is_ok()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(5)).is_err()); } { let bspline = BSpline::<f32>::new(3, vec![1.0, 2.0, 3.0, 4.0]).unwrap(); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(5)).is_err()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(6)).is_ok()); assert!(BSplineCurve::new(dim, bspline.clone(), control_points(7)).is_err()); } } fn create_bspline_example( degree: usize, knots: Vec<f32>, control_points: Vec<f32>, ) -> BSplineCurve<f32, NoInfinitesimal> { let dim = Dim(0); let control_points = control_points .into_iter() .map(|p| Jet::new(p, NoInfinitesimal)) .collect(); let bspline = BSpline::<f32>::new(degree, knots).unwrap(); BSplineCurve::new(dim, bspline, control_points).unwrap() } fn check_bspline_example( curve: BSplineCurve<f32, NoInfinitesimal>, expected_results: Vec<(Option<usize>, f32, f32)>, ) { let mut violations = Vec::new(); let mut buffer = BSplineCurveBuffer::new(curve.bspline().degree); for (order, x, expected_y) in expected_results { let actual_y = match order { None => curve.value(&x, &mut buffer).real, Some(order) => curve.derivative(order, &x, &mut buffer).real, }; if expected_y != actual_y { violations.push((order, x, expected_y, actual_y)) } } let no_violation = violations.is_empty(); let violation_message = move || { let mut message = String::new(); message += "B-spline produces unexpected results:\n"; for (order, x, expected_y, actual_y) in violations { let prefix = match order { None => String::from("f"), Some(order) => format!("f^{order}"), }; message += &format!("\t{prefix}({x}) is {actual_y}, but {expected_y} was expected\n"); } message }; assert!(no_violation, "{}", violation_message()); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_0_a() { let curve = create_bspline_example(0, vec![1.0], vec![2.0]); check_bspline_example( curve, vec![ (None, 0.5, 2.0), (None, 1.0, 2.0), (None, 1.5, 2.0), (Some(0), 0.5, 2.0), (Some(0), 1.0, 2.0), (Some(0), 1.5, 2.0), (Some(1), 0.5, 0.0), (Some(1), 1.0, 0.0), (Some(1), 1.5, 0.0), (Some(2), 0.5, 0.0), (Some(2), 1.0, 0.0), (Some(2), 1.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_0_b() { let curve = create_bspline_example(0, vec![1.0, 2.0, 3.0], vec![2.0, 3.0, 1.0]); check_bspline_example( curve, vec![ (None, 0.5, 2.0), (None, 1.0, 2.0), (None, 1.5, 2.0), (None, 2.0, 2.0), (None, 2.5, 3.0), (None, 3.0, 3.0), (None, 3.5, 1.0), (Some(0), 0.5, 2.0), (Some(0), 1.0, 2.0), (Some(0), 1.5, 2.0), (Some(0), 2.0, 2.0), (Some(0), 2.5, 3.0), (Some(0), 3.0, 3.0), (Some(0), 3.5, 1.0), (Some(1), 0.5, 0.0), (Some(1), 1.0, 0.0), (Some(1), 1.5, 0.0), (Some(1), 2.0, 0.0), (Some(1), 2.5, 0.0), (Some(1), 3.0, 0.0), (Some(1), 3.5, 0.0), (Some(2), 0.5, 0.0), (Some(2), 1.0, 0.0), (Some(2), 1.5, 0.0), (Some(2), 2.0, 0.0), (Some(2), 2.5, 0.0), (Some(2), 3.0, 0.0), (Some(2), 3.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_1_a() { let curve = create_bspline_example(1, vec![1.0, 2.0], vec![1.0, 3.0]); check_bspline_example( curve, vec![ (None, 0.5, 0.0), (None, 1.0, 1.0), (None, 1.5, 2.0), (None, 2.0, 3.0), (None, 2.5, 4.0), (Some(0), 0.5, 0.0), (Some(0), 1.0, 1.0), (Some(0), 1.5, 2.0), (Some(0), 2.0, 3.0), (Some(0), 2.5, 4.0), (Some(1), 0.5, 2.0), (Some(1), 1.0, 2.0), (Some(1), 1.5, 2.0), (Some(1), 2.0, 2.0), (Some(1), 2.5, 2.0), (Some(2), 0.5, 0.0), (Some(2), 1.0, 0.0), (Some(2), 1.5, 0.0), (Some(2), 2.0, 0.0), (Some(2), 2.5, 0.0), (Some(3), 0.5, 0.0), (Some(3), 1.0, 0.0), (Some(3), 1.5, 0.0), (Some(3), 2.0, 0.0), (Some(3), 2.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_1_b() { let curve = create_bspline_example(1, vec![1.0, 2.0, 3.0, 4.0], vec![1.0, 3.0, 2.0, 5.0]); check_bspline_example( curve, vec![ (None, 0.5, 0.0), (None, 1.0, 1.0), (None, 1.5, 2.0), (None, 2.0, 3.0), (None, 2.5, 2.5), (None, 3.0, 2.0), (None, 3.5, 3.5), (None, 4.0, 5.0), (None, 4.5, 6.5), (Some(0), 0.5, 0.0), (Some(0), 1.0, 1.0), (Some(0), 1.5, 2.0), (Some(0), 2.0, 3.0), (Some(0), 2.5, 2.5), (Some(0), 3.0, 2.0), (Some(0), 3.5, 3.5), (Some(0), 4.0, 5.0), (Some(0), 4.5, 6.5), (Some(1), 0.5, 2.0), (Some(1), 1.0, 2.0), (Some(1), 1.5, 2.0), (Some(1), 2.0, 2.0), (Some(1), 2.5, -1.0), (Some(1), 3.0, -1.0), (Some(1), 3.5, 3.0), (Some(1), 4.0, 3.0), (Some(1), 4.5, 3.0), (Some(2), 0.5, 0.0), (Some(2), 1.0, 0.0), (Some(2), 1.5, 0.0), (Some(2), 2.0, 0.0), (Some(2), 2.5, 0.0), (Some(2), 3.0, 0.0), (Some(2), 3.5, 0.0), (Some(2), 4.0, 0.0), (Some(2), 4.5, 0.0), (Some(3), 0.5, 0.0), (Some(3), 1.0, 0.0), (Some(3), 1.5, 0.0), (Some(3), 2.0, 0.0), (Some(3), 2.5, 0.0), (Some(3), 3.0, 0.0), (Some(3), 3.5, 0.0), (Some(3), 4.0, 0.0), (Some(3), 4.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_2_a() { let curve = create_bspline_example(2, vec![1.0, 2.0, 3.0], vec![1.0, 3.0, 2.0, 4.0]); check_bspline_example( curve, vec![ (None, 0.5, -1.625), (None, 1.0, 1.0), (None, 1.5, 2.375), (None, 2.0, 2.5), (None, 2.5, 2.625), (None, 3.0, 4.0), (None, 3.5, 6.625), (Some(0), 0.5, -1.625), (Some(0), 1.0, 1.0), (Some(0), 1.5, 2.375), (Some(0), 2.0, 2.5), (Some(0), 2.5, 2.625), (Some(0), 3.0, 4.0), (Some(0), 3.5, 6.625), (Some(1), 0.5, 6.5), (Some(1), 1.0, 4.0), (Some(1), 1.5, 1.5), (Some(1), 2.0, -1.0), (Some(1), 2.5, 1.5), (Some(1), 3.0, 4.0), (Some(1), 3.5, 6.5), (Some(2), 0.5, -5.0), (Some(2), 1.0, -5.0), (Some(2), 1.5, -5.0), (Some(2), 2.0, -5.0), (Some(2), 2.5, 5.0), (Some(2), 3.0, 5.0), (Some(2), 3.5, 5.0), (Some(3), 0.5, 0.0), (Some(3), 1.0, 0.0), (Some(3), 1.5, 0.0), (Some(3), 2.0, 0.0), (Some(3), 2.5, 0.0), (Some(3), 3.0, 0.0), (Some(3), 3.5, 0.0), (Some(4), 0.5, 0.0), (Some(4), 1.0, 0.0), (Some(4), 1.5, 0.0), (Some(4), 2.0, 0.0), (Some(4), 2.5, 0.0), (Some(4), 3.0, 0.0), (Some(4), 3.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_2_b() { let curve = create_bspline_example( 2, vec![1.0, 2.0, 3.0, 4.0, 5.0], vec![1.0, 3.0, 2.0, 4.0, 1.0, 5.0], ); check_bspline_example( curve, vec![ (None, 0.5, -1.625), (None, 1.0, 1.0), (None, 1.5, 2.375), (None, 2.0, 2.5), (None, 2.5, 2.375), (None, 3.0, 3.0), (None, 3.5, 3.375), (None, 4.0, 2.5), (None, 4.5, 2.375), (None, 5.0, 5.0), (None, 5.5, 10.375), (Some(0), 0.5, -1.625), (Some(0), 1.0, 1.0), (Some(0), 1.5, 2.375), (Some(0), 2.0, 2.5), (Some(0), 2.5, 2.375), (Some(0), 3.0, 3.0), (Some(0), 3.5, 3.375), (Some(0), 4.0, 2.5), (Some(0), 4.5, 2.375), (Some(0), 5.0, 5.0), (Some(0), 5.5, 10.375), (Some(1), 0.5, 6.5), (Some(1), 1.0, 4.0), (Some(1), 1.5, 1.5), (Some(1), 2.0, -1.0), (Some(1), 2.5, 0.5), (Some(1), 3.0, 2.0), (Some(1), 3.5, -0.5), (Some(1), 4.0, -3.0), (Some(1), 4.5, 2.5), (Some(1), 5.0, 8.0), (Some(1), 5.5, 13.5), (Some(2), 0.5, -5.0), (Some(2), 1.0, -5.0), (Some(2), 1.5, -5.0), (Some(2), 2.0, -5.0), (Some(2), 2.5, 3.0), (Some(2), 3.0, 3.0), (Some(2), 3.5, -5.0), (Some(2), 4.0, -5.0), (Some(2), 4.5, 11.0), (Some(2), 5.0, 11.0), (Some(2), 5.5, 11.0), (Some(3), 0.5, 0.0), (Some(3), 1.0, 0.0), (Some(3), 1.5, 0.0), (Some(3), 2.0, 0.0), (Some(3), 2.5, 0.0), (Some(3), 3.0, 0.0), (Some(3), 3.5, 0.0), (Some(3), 4.0, 0.0), (Some(3), 4.5, 0.0), (Some(3), 5.0, 0.0), (Some(3), 5.5, 0.0), (Some(4), 0.5, 0.0), (Some(4), 1.0, 0.0), (Some(4), 1.5, 0.0), (Some(4), 2.0, 0.0), (Some(4), 2.5, 0.0), (Some(4), 3.0, 0.0), (Some(4), 3.5, 0.0), (Some(4), 4.0, 0.0), (Some(4), 4.5, 0.0), (Some(4), 5.0, 0.0), (Some(4), 5.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_3_a() { let curve = create_bspline_example( 3, vec![1.0, 2.0, 3.0, 4.0], vec![1.0, 3.0, 2.0, 4.0, 1.0, 5.0], ); check_bspline_example( curve, vec![ (None, 0.5, -4.260417), (None, 1.0, 1.0), (None, 1.5, 2.5104165), (None, 2.0, 2.5833333), (None, 2.5, 2.9375), (None, 3.0, 2.9166667), (None, 3.5, 2.3020833), (None, 4.0, 5.0), (None, 4.5, 15.947917), (Some(0), 0.5, -4.260417), (Some(0), 1.0, 1.0), (Some(0), 1.5, 2.5104165), (Some(0), 2.0, 2.5833333), (Some(0), 2.5, 2.9375), (Some(0), 3.0, 2.9166667), (Some(0), 3.5, 2.3020833), (Some(0), 4.0, 5.0), (Some(0), 4.5, 15.947917), (Some(1), 0.5, 15.8125), (Some(1), 1.0, 6.0), (Some(1), 1.5, 0.8125), (Some(1), 2.0, 0.25), (Some(1), 2.5, 0.75), (Some(1), 3.0, -1.25), (Some(1), 3.5, 0.4375), (Some(1), 4.0, 12.0), (Some(1), 4.5, 33.4375), (Some(2), 0.5, -24.25), (Some(2), 1.0, -15.0), (Some(2), 1.5, -5.75), (Some(2), 2.0, 3.5), (Some(2), 2.5, -1.5), (Some(2), 3.0, -6.5), (Some(2), 3.5, 13.25), (Some(2), 4.0, 33.0), (Some(2), 4.5, 52.75), (Some(3), 0.5, 18.5), (Some(3), 1.0, 18.50), (Some(3), 1.5, 18.5), (Some(3), 2.0, 18.5), (Some(3), 2.5, -10.0), (Some(3), 3.0, -10.0), (Some(3), 3.5, 39.5), (Some(3), 4.0, 39.5), (Some(3), 4.5, 39.5), (Some(4), 0.5, 0.0), (Some(4), 1.0, 0.0), (Some(4), 1.5, 0.0), (Some(4), 2.0, 0.0), (Some(4), 2.5, 0.0), (Some(4), 3.0, 0.0), (Some(4), 3.5, 0.0), (Some(4), 4.0, 0.0), (Some(4), 4.5, 0.0), (Some(5), 0.5, 0.0), (Some(5), 1.0, 0.0), (Some(5), 1.5, 0.0), (Some(5), 2.0, 0.0), (Some(5), 2.5, 0.0), (Some(5), 3.0, 0.0), (Some(5), 3.5, 0.0), (Some(5), 4.0, 0.0), (Some(5), 4.5, 0.0), ], ); } // Note: This B-spline curve is plotted in `bspline_examples.rs` #[test] fn bspline_example_3_b() { let curve = create_bspline_example( 3, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0], vec![1.0, 3.0, 2.0, 4.0, 1.0, 5.0, 2.0, 3.0], ); check_bspline_example( curve, vec![ (None, 0.5, -4.260417), (None, 1.0, 1.0), (None, 1.5, 2.5104165), (None, 2.0, 2.5833333), (None, 2.5, 2.96875), (None, 3.0, 3.1666665), (None, 3.5, 2.5416665), (None, 4.0, 2.1666667), (None, 4.5, 2.96875), (None, 5.0, 3.5833335), (None, 5.5, 2.8854165), (None, 6.0, 3.0), (None, 6.5, 6.8645835), (Some(0), 0.5, -4.260417), (Some(0), 1.0, 1.0), (Some(0), 1.5, 2.5104165), (Some(0), 2.0, 2.5833333), (Some(0), 2.5, 2.96875), (Some(0), 3.0, 3.1666665), (Some(0), 3.5, 2.5416665), (Some(0), 4.0, 2.1666667), (Some(0), 4.5, 2.96875), (Some(0), 5.0, 3.5833335), (Some(0), 5.5, 2.8854165), (Some(0), 6.0, 3.0), (Some(0), 6.5, 6.8645835), (Some(1), 0.5, 15.8125), (Some(1), 1.0, 6.0), (Some(1), 1.5, 0.8125), (Some(1), 2.0, 0.25), (Some(1), 2.5, 0.9375), (Some(1), 3.0, -0.5), (Some(1), 3.5, -1.5), (Some(1), 4.0, 0.5), (Some(1), 4.5, 2.0625), (Some(1), 5.0, -0.25), (Some(1), 5.5, -1.5625), (Some(1), 6.0, 3.0), (Some(1), 6.5, 13.4375), (Some(2), 0.5, -24.25), (Some(2), 1.0, -15.0), (Some(2), 1.5, -5.75), (Some(2), 2.0, 3.5), (Some(2), 2.5, -0.75), (Some(2), 3.0, -5.0), (Some(2), 3.5, 1.0), (Some(2), 4.0, 7.0), (Some(2), 4.5, -0.75), (Some(2), 5.0, -8.5), (Some(2), 5.5, 3.25), (Some(2), 6.0, 15.0), (Some(2), 6.5, 26.75), (Some(3), 0.5, 18.5), (Some(3), 1.0, 18.5), (Some(3), 1.5, 18.5), (Some(3), 2.0, 18.5), (Some(3), 2.5, -8.5), (Some(3), 3.0, -8.5), (Some(3), 3.5, 12.0), (Some(3), 4.0, 12.0), (Some(3), 4.5, -15.5), (Some(3), 5.0, -15.5), (Some(3), 5.5, 23.5), (Some(3), 6.0, 23.5), (Some(3), 6.5, 23.5), (Some(4), 0.5, 0.0), (Some(4), 1.0, 0.0), (Some(4), 1.5, 0.0), (Some(4), 2.0, 0.0), (Some(4), 2.5, 0.0), (Some(4), 3.0, 0.0), (Some(4), 3.5, 0.0), (Some(4), 4.0, 0.0), (Some(4), 4.5, 0.0), (Some(4), 5.0, 0.0), (Some(4), 5.5, 0.0), (Some(4), 6.0, 0.0), (Some(4), 6.5, 0.0), (Some(5), 0.5, 0.0), (Some(5), 1.0, 0.0), (Some(5), 1.5, 0.0), (Some(5), 2.0, 0.0), (Some(5), 2.5, 0.0), (Some(5), 3.0, 0.0), (Some(5), 3.5, 0.0), (Some(5), 4.0, 0.0), (Some(5), 4.5, 0.0), (Some(5), 5.0, 0.0), (Some(5), 5.5, 0.0), (Some(5), 6.0, 0.0), (Some(5), 6.5, 0.0), ], ); } }
34.020867
153
0.471739
214a579f062c758e9ef93f890536eb567ed2109b
182
fn main() { let inc = | i | i + 1; // type is optional let a = 13; let b = inc( a ); // same as ordinary routine println!( "a : {}", a ); println!( "b : {}", b ); }
13
29
0.450549
56cf7bb0487eba2528a186cdf0a7fe2190246167
7,773
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use byteorder::{ByteOrder, NativeEndian}; use fidl_fuchsia_media::*; use fuchsia_zircon as zx; use itertools::Itertools; use stream_processor_test::*; const PCM_SAMPLE_SIZE: usize = 2; const PCM_MIME_TYPE: &str = "audio/pcm"; #[derive(Clone, Debug)] pub struct PcmAudio { pcm_format: PcmFormat, buffer: Vec<u8>, } impl PcmAudio { pub fn create_saw_wave(pcm_format: PcmFormat, frame_count: usize) -> Self { const FREQUENCY: f32 = 20.0; const AMPLITUDE: f32 = 0.2; let pcm_frame_size = PCM_SAMPLE_SIZE * pcm_format.channel_map.len(); let samples_per_frame = pcm_format.channel_map.len(); let sample_count = frame_count * samples_per_frame; let mut buffer = vec![0; frame_count * pcm_frame_size]; for i in 0..sample_count { let frame = (i / samples_per_frame) as f32; let value = ((frame * FREQUENCY / (pcm_format.frames_per_second as f32)) % 1.0) * AMPLITUDE; let sample = (value * i16::max_value() as f32) as i16; let mut sample_bytes = [0; std::mem::size_of::<i16>()]; NativeEndian::write_i16(&mut sample_bytes, sample); let offset = i * PCM_SAMPLE_SIZE; buffer[offset] = sample_bytes[0]; buffer[offset + 1] = sample_bytes[1]; } Self { pcm_format, buffer } } pub fn frame_size(&self) -> usize { self.pcm_format.channel_map.len() * PCM_SAMPLE_SIZE } } /// Generates timestamps according to a timebase and rate of playback of uncompressed audio. /// /// Since the rate is constant, this can also be used to extrapolate timestamps. pub struct TimestampGenerator { bytes_per_second: usize, timebase: u64, } impl TimestampGenerator { pub fn timestamp_at(&self, input_index: usize) -> u64 { let bps = self.bytes_per_second as u64; (input_index as u64) * self.timebase / bps } } #[allow(dead_code)] pub struct PcmAudioStream<I, E> { pub pcm_audio: PcmAudio, pub encoder_settings: E, pub frames_per_packet: I, pub timebase: Option<u64>, } impl<I, E> PcmAudioStream<I, E> where I: Iterator<Item = usize> + Clone, E: Fn() -> EncoderSettings, { pub fn bytes_per_second(&self) -> usize { self.pcm_audio.pcm_format.frames_per_second as usize * std::mem::size_of::<i16>() * self.pcm_audio.pcm_format.channel_map.len() } pub fn timestamp_generator(&self) -> Option<TimestampGenerator> { self.timebase.map(|timebase| TimestampGenerator { bytes_per_second: self.bytes_per_second(), timebase, }) } } impl<I, E> ElementaryStream for PcmAudioStream<I, E> where I: Iterator<Item = usize> + Clone, E: Fn() -> EncoderSettings, { fn format_details(&self, format_details_version_ordinal: u64) -> FormatDetails { FormatDetails { domain: Some(DomainFormat::Audio(AudioFormat::Uncompressed( AudioUncompressedFormat::Pcm(self.pcm_audio.pcm_format.clone()), ))), encoder_settings: Some((self.encoder_settings)()), format_details_version_ordinal: Some(format_details_version_ordinal), mime_type: Some(String::from(PCM_MIME_TYPE)), oob_bytes: None, pass_through_parameters: None, timebase: self.timebase, } } fn is_access_units(&self) -> bool { false } fn stream<'a>(&'a self) -> Box<dyn Iterator<Item = ElementaryStreamChunk> + 'a> { let data = self.pcm_audio.buffer.as_slice(); let frame_size = self.pcm_audio.frame_size(); let mut offset = 0; let mut frames_per_packet = self.frames_per_packet.clone(); let chunks = (0..) .map(move |_| { let number_of_frames_for_this_packet = frames_per_packet.next()?; let payload_size = number_of_frames_for_this_packet * frame_size; let payload_size = data .len() .checked_sub(offset) .map(|remaining_bytes| std::cmp::min(remaining_bytes, payload_size)) .filter(|payload_size| *payload_size > 0)?; let range = offset..(offset + payload_size); let result = data.get(range).map(|range| (offset, range))?; offset += payload_size; Some(result) }) .while_some(); Box::new(chunks.map(move |(input_index, data)| { ElementaryStreamChunk { start_access_unit: false, known_end_access_unit: false, data: data.to_vec(), significance: Significance::Audio(AudioSignificance::PcmFrames), timestamp: self .timestamp_generator() .as_ref() .map(|timestamp_generator| timestamp_generator.timestamp_at(input_index)), } })) } } fn dummy_encode_settings() -> EncoderSettings { // Settings are arbitrary; we just need to construct an instance. EncoderSettings::Sbc(SbcEncoderSettings { sub_bands: SbcSubBands::SubBands8, allocation: SbcAllocation::AllocLoudness, block_count: SbcBlockCount::BlockCount16, channel_mode: SbcChannelMode::JointStereo, bit_pool: 59, }) } #[test] fn elementary_chunk_data() { let pcm_format = PcmFormat { pcm_mode: AudioPcmMode::Linear, bits_per_sample: 16, frames_per_second: 44100, channel_map: vec![AudioChannelId::Lf, AudioChannelId::Rf], }; let pcm_audio = PcmAudio::create_saw_wave(pcm_format, /*frame_count=*/ 100); let stream = PcmAudioStream { pcm_audio: pcm_audio.clone(), encoder_settings: dummy_encode_settings, frames_per_packet: (0..).map(|_| 40), timebase: None, }; let actual: Vec<u8> = stream.stream().flat_map(|chunk| chunk.data.clone()).collect(); assert_eq!(pcm_audio.buffer, actual); } #[test] fn saw_wave_matches_hash() { use hex; use mundane::hash::*; /// This was obtained by writing the buffer out to file and inspecting the wave on each channel. const GOLDEN_DIGEST: &str = "2bf4f233a179f0cb572b72570a28c07a334e406baa7fb4fc65f641b82d0ae64a"; let pcm_audio = PcmAudio::create_saw_wave( PcmFormat { pcm_mode: AudioPcmMode::Linear, bits_per_sample: 16, frames_per_second: 44100, channel_map: vec![AudioChannelId::Lf, AudioChannelId::Rf], }, /*frame_count=*/ 50000, ); let actual_digest = hex::encode(Sha256::hash(&pcm_audio.buffer).bytes()); assert_eq!(&actual_digest, GOLDEN_DIGEST); } #[test] fn stream_timestamps() { let pcm_format = PcmFormat { pcm_mode: AudioPcmMode::Linear, bits_per_sample: 16, frames_per_second: 50, channel_map: vec![AudioChannelId::Lf, AudioChannelId::Rf], }; let pcm_audio = PcmAudio::create_saw_wave(pcm_format, /*frame_count=*/ 100); let stream = PcmAudioStream { pcm_audio: pcm_audio.clone(), encoder_settings: dummy_encode_settings, frames_per_packet: (0..).map(|_| 50), timebase: Some(zx::Duration::from_seconds(1).into_nanos() as u64), }; let mut chunks = stream.stream(); assert_eq!(chunks.next().and_then(|chunk| chunk.timestamp), Some(0)); assert_eq!( chunks.next().and_then(|chunk| chunk.timestamp), Some(zx::Duration::from_seconds(1).into_nanos() as u64) ); }
33.076596
100
0.623697
f88c684036965ce17a1ae83ea5f00f860e3a4086
13,320
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use crate::util::PointerAddress; use graphql_ir::{ FragmentDefinition, InlineFragment, LinkedField, OperationDefinition, Program, Selection, }; use graphql_printer::{write_arguments, write_directives}; use schema::{Schema, Type, TypeReference}; use std::fmt::Write; use std::collections::HashMap; use std::sync::Arc; type FlattenedSelectionMap = HashMap<String, Selection>; type SeenLinkedFields = HashMap<PointerAddress, Arc<LinkedField>>; /// /// Transform that flattens inline fragments, fragment spreads, merges linked fields selections. /// /// Inline fragments are inlined (replaced with their selections) when: /// - The fragment type matches the type of its parent. /// - The fragment has an abstract type and the `flattenAbstractTypes` option has /// been set. /// /// pub fn flatten<'s>(program: &'s Program<'s>, should_flatten_abstract_types: bool) -> Program<'s> { let mut next_program = Program::new(program.schema()); let mut transform = FlattenTransform::new(program, should_flatten_abstract_types); for operation in program.operations() { next_program.insert_operation(Arc::new(transform.transform_operation(operation))); } for fragment in program.fragments() { next_program.insert_fragment(Arc::new(transform.transform_fragment(fragment))); } next_program } struct FlattenTransform<'s> { program: &'s Program<'s>, should_flatten_abstract_types: bool, seen_linked_fields: SeenLinkedFields, } impl<'s> FlattenTransform<'s> { fn new(program: &'s Program<'s>, should_flatten_abstract_types: bool) -> Self { Self { program, should_flatten_abstract_types, seen_linked_fields: Default::default(), } } fn transform_operation(&mut self, operation: &OperationDefinition) -> OperationDefinition { OperationDefinition { kind: operation.kind, name: operation.name, type_: operation.type_, directives: operation.directives.clone(), variable_definitions: operation.variable_definitions.clone(), selections: self.tranform_selections( &operation.selections, &TypeReference::Named(operation.type_), ), } } fn transform_fragment(&mut self, fragment: &FragmentDefinition) -> FragmentDefinition { FragmentDefinition { name: fragment.name, type_condition: fragment.type_condition, directives: fragment.directives.clone(), variable_definitions: fragment.variable_definitions.clone(), used_global_variables: fragment.used_global_variables.clone(), selections: self.tranform_selections( &fragment.selections, &TypeReference::Named(fragment.type_condition), ), } } fn tranform_selections( &mut self, selections: &[Selection], parent_type: &TypeReference, ) -> Vec<Selection> { let next_selections = selections .iter() .map(|s| self.transform_selection(s, parent_type)) .collect::<Vec<_>>(); let mut flattened_selections_map: FlattenedSelectionMap = Default::default(); self.flatten_selections(&mut flattened_selections_map, &next_selections, parent_type); flattened_selections_map.values().cloned().collect() } fn transform_linked_field(&mut self, linked_field: &Arc<LinkedField>) -> Arc<LinkedField> { let key = PointerAddress::new(Arc::as_ref(linked_field)); if let Some(prev) = self.seen_linked_fields.get(&key) { return Arc::clone(prev); } let result = Arc::new(LinkedField { alias: linked_field.alias, definition: linked_field.definition, arguments: linked_field.arguments.clone(), directives: linked_field.directives.clone(), selections: self.tranform_selections( &linked_field.selections, &self .program .schema() .field(linked_field.definition.item) .type_, ), }); self.seen_linked_fields.insert(key, Arc::clone(&result)); result } fn transform_selection( &mut self, selection: &Selection, parent_type: &TypeReference, ) -> Selection { match selection { Selection::InlineFragment(node) => { let next_parent_type: TypeReference = match node.type_condition { Some(type_condition) => TypeReference::Named(type_condition), None => parent_type.clone(), }; Selection::InlineFragment(Arc::new(InlineFragment { type_condition: node.type_condition, directives: node.directives.clone(), selections: self.tranform_selections(&node.selections, &next_parent_type), })) } Selection::LinkedField(node) => { Selection::LinkedField(self.transform_linked_field(node)) } Selection::FragmentSpread(node) => Selection::FragmentSpread(Arc::clone(node)), Selection::ScalarField(node) => Selection::ScalarField(Arc::clone(node)), } } fn flatten_selections( &mut self, flattened_selections_map: &mut FlattenedSelectionMap, selections: &[Selection], parent_type: &TypeReference, ) { for selection in selections { if let Selection::InlineFragment(inline_fragment) = selection { if should_flatten_inline_fragment( self.program.schema(), inline_fragment.type_condition, parent_type, self.should_flatten_abstract_types, ) { self.flatten_selections( flattened_selections_map, &inline_fragment.selections, parent_type, ); continue; } } let node_identifier = get_identifier_for_selection(self.program.schema(), &selection); let flattened_selection_value = flattened_selections_map.get(&node_identifier); match flattened_selection_value { None => { flattened_selections_map.insert(node_identifier, selection.clone()); } Some(flattened_selection) => match flattened_selection { Selection::InlineFragment(flattened_node) => { let type_condition: TypeReference = match flattened_node.type_condition { Some(type_condition) => TypeReference::Named(type_condition), None => parent_type.clone(), }; let node_selections = match selection { Selection::InlineFragment(node) => &node.selections, _ => unreachable!("FlattenTransform: Expected an InlineFragment."), }; let next_selection = Selection::InlineFragment(Arc::new(InlineFragment { type_condition: flattened_node.type_condition, directives: flattened_node.directives.clone(), selections: self.merge_selections( &node_selections, &flattened_node.selections, &type_condition, ), })); flattened_selections_map.insert(node_identifier, next_selection); } Selection::LinkedField(flattened_node) => { let node_selections = match selection { Selection::LinkedField(node) => &node.selections, _ => unreachable!("FlattenTransform: Expected a LinkedField."), }; let next_selection = Selection::LinkedField(Arc::new(LinkedField { alias: flattened_node.alias, definition: flattened_node.definition, arguments: flattened_node.arguments.clone(), directives: flattened_node.directives.clone(), selections: self.merge_selections( &node_selections, &flattened_node.selections, &self .program .schema() .field(flattened_node.definition.item) .type_, ), })); flattened_selections_map.insert(node_identifier, next_selection); } Selection::ScalarField(node) => { let next_selection = Selection::ScalarField(Arc::clone(node)); flattened_selections_map.insert(node_identifier, next_selection); } Selection::FragmentSpread(node) => { let next_selection = Selection::FragmentSpread(Arc::clone(node)); flattened_selections_map.insert(node_identifier, next_selection); } }, } } } fn merge_selections( &mut self, selections_a: &[Selection], selections_b: &[Selection], parent_type: &TypeReference, ) -> Vec<Selection> { let mut flattened_selections_map: FlattenedSelectionMap = Default::default(); self.flatten_selections(&mut flattened_selections_map, selections_a, parent_type); self.flatten_selections(&mut flattened_selections_map, selections_b, parent_type); flattened_selections_map.values().cloned().collect() } } fn get_identifier_for_selection<'s>(schema: &'s Schema, selection: &Selection) -> String { let mut writer = String::new(); match selection { Selection::InlineFragment(node) => { write!(writer, "InlineFragment:").unwrap(); if let Some(type_condition) = node.type_condition { write!(writer, "{}", schema.get_type_name(type_condition).lookup()).unwrap(); } if !node.directives.is_empty() { write_directives(schema, &node.directives, &mut writer).unwrap(); } } Selection::LinkedField(node) => { write!(writer, "LinkedField:").unwrap(); match node.alias { Some(alias) => write!(writer, "{}", alias.item.lookup()), None => write!( writer, "{}", schema.field(node.definition.item).name.lookup() ), } .unwrap(); if !node.arguments.is_empty() { write_arguments(schema, &node.arguments, &mut writer).unwrap(); } if !node.directives.is_empty() { write_directives(schema, &node.directives, &mut writer).unwrap(); } } Selection::ScalarField(node) => { write!(writer, "ScalarField:").unwrap(); match node.alias { Some(alias) => write!(writer, "{}", alias.item.lookup()), None => write!( writer, "{}", schema.field(node.definition.item).name.lookup() ), } .unwrap(); if !node.arguments.is_empty() { write_arguments(schema, &node.arguments, &mut writer).unwrap(); } if !node.directives.is_empty() { write_directives(schema, &node.directives, &mut writer).unwrap(); } } Selection::FragmentSpread(node) => { write!(writer, "FragmentSpread: {}", node.fragment.item.lookup()).unwrap(); if !node.arguments.is_empty() { write_arguments(schema, &node.arguments, &mut writer).unwrap(); } if !node.directives.is_empty() { write_directives(schema, &node.directives, &mut writer).unwrap(); } } }; writer } fn should_flatten_inline_fragment<'s>( schema: &'s Schema, inline_fragment_type_condition: Option<Type>, parent_type: &TypeReference, should_flatten_abstract_types: bool, ) -> bool { if let Some(type_condition) = inline_fragment_type_condition { return (schema.is_abstract_type(type_condition) && should_flatten_abstract_types) || &TypeReference::Named(type_condition) == parent_type; }; false }
41.111111
98
0.557357
6737861ffe18305eb0d2b131434c9303fb3d28d6
733
// Copyright 2020 AXIA Technologies // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![cfg_attr(not(feature = "std"), no_std)] mod hash; mod uint; pub use ethbloom::{Bloom, BloomRef, Input as BloomInput}; pub use hash::{BigEndianHash, H128, H160, H256, H264, H32, H512, H520, H64}; pub use uint::{FromDecStrErr, FromStrRadixErr, FromStrRadixErrKind, U128, U256, U512, U64}; pub type Address = H160; pub type Secret = H256; pub type Public = H512; pub type Signature = H520;
33.318182
91
0.728513
1ead79eedf4808644946fa0ef2c16a524e8ce718
742
//! Tests auto-converted from "sass-spec/spec/non_conformant/parser/interpolate/10_escaped_backslash/01_inline.hrx" #[test] fn test() { assert_eq!( crate::rsass( ".result {\ \n output: \\\\;\ \n output: #{\\\\};\ \n output: \"[#{\\\\}]\";\ \n output: \"#{\\\\}\";\ \n output: \'#{\\\\}\';\ \n output: \"[\'#{\\\\}\']\";\ \n}\ \n" ) .unwrap(), ".result {\ \n output: \\\\;\ \n output: \\\\;\ \n output: \"[\\\\\\\\]\";\ \n output: \"\\\\\\\\\";\ \n output: \"\\\\\\\\\";\ \n output: \"[\'\\\\\\\\\']\";\ \n}\ \n" ); }
25.586207
115
0.312668
87219511e6ff3a3608649989039983a33babe49e
17,720
use rustc::hir; use rustc::middle::const_val::{ConstEvalErr, ConstVal, ErrKind}; use rustc::middle::const_val::ErrKind::{TypeckError, CheckMatchError}; use rustc::mir; use rustc::ty::{self, TyCtxt, Ty, Instance}; use rustc::ty::layout::{self, LayoutOf}; use rustc::ty::subst::Subst; use syntax::ast::Mutability; use syntax::codemap::Span; use rustc::mir::interpret::{EvalResult, EvalError, EvalErrorKind, GlobalId, Value, MemoryPointer, Pointer, PrimVal, AllocId}; use super::{Place, EvalContext, StackPopCleanup, ValTy, PlaceExtra, Memory}; use std::fmt; use std::error::Error; use rustc_data_structures::sync::Lrc; pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>, mir: &'mir mir::Mir<'tcx>, span: Span, ) -> EvalResult<'tcx, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>> { debug!("mk_borrowck_eval_cx: {:?}", instance); let param_env = tcx.param_env(instance.def_id()); let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeEvaluator, ()); // insert a stack frame so any queries have the correct substs ecx.push_stack_frame( instance, span, mir, Place::undef(), StackPopCleanup::None, )?; Ok(ecx) } pub fn mk_eval_cx<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> EvalResult<'tcx, EvalContext<'a, 'tcx, 'tcx, CompileTimeEvaluator>> { debug!("mk_eval_cx: {:?}, {:?}", instance, param_env); let span = tcx.def_span(instance.def_id()); let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeEvaluator, ()); let mir = ecx.load_mir(instance.def)?; // insert a stack frame so any queries have the correct substs ecx.push_stack_frame( instance, mir.span, mir, Place::undef(), StackPopCleanup::None, )?; Ok(ecx) } pub fn eval_body_with_mir<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cid: GlobalId<'tcx>, mir: &'mir mir::Mir<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Option<(Value, Pointer, Ty<'tcx>)> { let (res, ecx) = eval_body_and_ecx(tcx, cid, Some(mir), param_env); match res { Ok(val) => Some(val), Err(mut err) => { ecx.report(&mut err, true, None); None } } } pub fn eval_body<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cid: GlobalId<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Option<(Value, Pointer, Ty<'tcx>)> { let (res, ecx) = eval_body_and_ecx(tcx, cid, None, param_env); match res { Ok(val) => Some(val), Err(mut err) => { ecx.report(&mut err, true, None); None } } } fn eval_body_and_ecx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cid: GlobalId<'tcx>, mir: Option<&'mir mir::Mir<'tcx>>, param_env: ty::ParamEnv<'tcx>, ) -> (EvalResult<'tcx, (Value, Pointer, Ty<'tcx>)>, EvalContext<'a, 'mir, 'tcx, CompileTimeEvaluator>) { debug!("eval_body: {:?}, {:?}", cid, param_env); // we start out with the best span we have // and try improving it down the road when more information is available let span = tcx.def_span(cid.instance.def_id()); let mut span = mir.map(|mir| mir.span).unwrap_or(span); let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeEvaluator, ()); let res = (|| { let mut mir = match mir { Some(mir) => mir, None => ecx.load_mir(cid.instance.def)?, }; if let Some(index) = cid.promoted { mir = &mir.promoted[index]; } span = mir.span; let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; let alloc = tcx.interpret_interner.get_cached(cid.instance.def_id()); let is_static = tcx.is_static(cid.instance.def_id()).is_some(); let alloc = match alloc { Some(alloc) => { assert!(cid.promoted.is_none()); assert!(param_env.caller_bounds.is_empty()); alloc }, None => { assert!(!layout.is_unsized()); let ptr = ecx.memory.allocate( layout.size.bytes(), layout.align, None, )?; if is_static { tcx.interpret_interner.cache(cid.instance.def_id(), ptr.alloc_id); } let internally_mutable = !layout.ty.is_freeze(tcx, param_env, mir.span); let mutability = tcx.is_static(cid.instance.def_id()); let mutability = if mutability == Some(hir::Mutability::MutMutable) || internally_mutable { Mutability::Mutable } else { Mutability::Immutable }; let cleanup = StackPopCleanup::MarkStatic(mutability); let name = ty::tls::with(|tcx| tcx.item_path_str(cid.instance.def_id())); let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p)); trace!("const_eval: pushing stack frame for global: {}{}", name, prom); assert!(mir.arg_count == 0); ecx.push_stack_frame( cid.instance, mir.span, mir, Place::from_ptr(ptr, layout.align), cleanup, )?; while ecx.step()? {} ptr.alloc_id } }; let ptr = MemoryPointer::new(alloc, 0).into(); // always try to read the value and report errors let value = match ecx.try_read_value(ptr, layout.align, layout.ty)? { // if it's a constant (so it needs no address, directly compute its value) Some(val) if !is_static => val, // point at the allocation _ => Value::ByRef(ptr, layout.align), }; Ok((value, ptr, layout.ty)) })(); (res, ecx) } pub struct CompileTimeEvaluator; impl<'tcx> Into<EvalError<'tcx>> for ConstEvalError { fn into(self) -> EvalError<'tcx> { EvalErrorKind::MachineError(self.to_string()).into() } } #[derive(Clone, Debug)] enum ConstEvalError { NeedsRfc(String), NotConst(String), } impl fmt::Display for ConstEvalError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { use self::ConstEvalError::*; match *self { NeedsRfc(ref msg) => { write!( f, "\"{}\" needs an rfc before being allowed inside constants", msg ) } NotConst(ref msg) => write!(f, "{}", msg), } } } impl Error for ConstEvalError { fn description(&self) -> &str { use self::ConstEvalError::*; match *self { NeedsRfc(_) => "this feature needs an rfc before being allowed inside constants", NotConst(_) => "this feature is not compatible with constant evaluation", } } fn cause(&self) -> Option<&dyn Error> { None } } impl<'mir, 'tcx> super::Machine<'mir, 'tcx> for CompileTimeEvaluator { type MemoryData = (); type MemoryKinds = !; fn eval_fn_call<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, destination: Option<(Place, mir::BasicBlock)>, args: &[ValTy<'tcx>], span: Span, sig: ty::FnSig<'tcx>, ) -> EvalResult<'tcx, bool> { debug!("eval_fn_call: {:?}", instance); if !ecx.tcx.is_const_fn(instance.def_id()) { let def_id = instance.def_id(); let (op, oflo) = if let Some(op) = ecx.tcx.is_binop_lang_item(def_id) { op } else { return Err( ConstEvalError::NotConst(format!("calling non-const fn `{}`", instance)).into(), ); }; let (dest, bb) = destination.expect("128 lowerings can't diverge"); let dest_ty = sig.output(); if oflo { ecx.intrinsic_with_overflow(op, args[0], args[1], dest, dest_ty)?; } else { ecx.intrinsic_overflowing(op, args[0], args[1], dest, dest_ty)?; } ecx.goto_block(bb); return Ok(true); } let mir = match ecx.load_mir(instance.def) { Ok(mir) => mir, Err(err) => { if let EvalErrorKind::NoMirFor(ref path) = err.kind { return Err( ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path)) .into(), ); } return Err(err); } }; let (return_place, return_to_block) = match destination { Some((place, block)) => (place, StackPopCleanup::Goto(block)), None => (Place::undef(), StackPopCleanup::None), }; ecx.push_stack_frame( instance, span, mir, return_place, return_to_block, )?; Ok(false) } fn call_intrinsic<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, _args: &[ValTy<'tcx>], dest: Place, dest_layout: layout::TyLayout<'tcx>, target: mir::BasicBlock, ) -> EvalResult<'tcx> { let substs = instance.substs; let intrinsic_name = &ecx.tcx.item_name(instance.def_id())[..]; match intrinsic_name { "min_align_of" => { let elem_ty = substs.type_at(0); let elem_align = ecx.layout_of(elem_ty)?.align.abi(); let align_val = PrimVal::from_u128(elem_align as u128); ecx.write_primval(dest, align_val, dest_layout.ty)?; } "size_of" => { let ty = substs.type_at(0); let size = ecx.layout_of(ty)?.size.bytes() as u128; ecx.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?; } "type_id" => { let ty = substs.type_at(0); let type_id = ecx.tcx.type_id_hash(ty) as u128; ecx.write_primval(dest, PrimVal::from_u128(type_id), dest_layout.ty)?; } name => return Err(ConstEvalError::NeedsRfc(format!("calling intrinsic `{}`", name)).into()), } ecx.goto_block(target); // Since we pushed no stack frame, the main loop will act // as if the call just completed and it's returning to the // current frame. Ok(()) } fn try_ptr_op<'a>( _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, _bin_op: mir::BinOp, left: PrimVal, _left_ty: Ty<'tcx>, right: PrimVal, _right_ty: Ty<'tcx>, ) -> EvalResult<'tcx, Option<(PrimVal, bool)>> { if left.is_bytes() && right.is_bytes() { Ok(None) } else { Err( ConstEvalError::NeedsRfc("Pointer arithmetic or comparison".to_string()).into(), ) } } fn mark_static_initialized<'a>( _mem: &mut Memory<'a, 'mir, 'tcx, Self>, _id: AllocId, _mutability: Mutability, ) -> EvalResult<'tcx, bool> { Ok(false) } fn init_static<'a>( ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, cid: GlobalId<'tcx>, ) -> EvalResult<'tcx, AllocId> { let alloc = ecx .tcx .interpret_interner .get_cached(cid.instance.def_id()); // Don't evaluate when already cached to prevent cycles if let Some(alloc) = alloc { return Ok(alloc) } // ensure the static is computed ecx.const_eval(cid)?; Ok(ecx .tcx .interpret_interner .get_cached(cid.instance.def_id()) .expect("uncached static")) } fn box_alloc<'a>( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, _ty: Ty<'tcx>, _dest: Place, ) -> EvalResult<'tcx> { Err( ConstEvalError::NeedsRfc("Heap allocations via `box` keyword".to_string()).into(), ) } fn global_item_with_linkage<'a>( _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, _instance: ty::Instance<'tcx>, _mutability: Mutability, ) -> EvalResult<'tcx> { Err( ConstEvalError::NotConst("statics with `linkage` attribute".to_string()).into(), ) } } pub fn const_val_field<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, instance: ty::Instance<'tcx>, variant: Option<usize>, field: mir::Field, value: Value, ty: Ty<'tcx>, ) -> ::rustc::middle::const_val::EvalResult<'tcx> { trace!("const_val_field: {:?}, {:?}, {:?}, {:?}", instance, field, value, ty); let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let result = (|| { let (mut field, ty) = match value { Value::ByValPair(..) | Value::ByVal(_) => ecx.read_field(value, variant, field, ty)?.expect("const_val_field on non-field"), Value::ByRef(ptr, align) => { let place = Place::Ptr { ptr, align, extra: variant.map_or(PlaceExtra::None, PlaceExtra::DowncastVariant), }; let layout = ecx.layout_of(ty)?; let (place, layout) = ecx.place_field(place, field, layout)?; let (ptr, align) = place.to_ptr_align(); (Value::ByRef(ptr, align), layout.ty) } }; if let Value::ByRef(ptr, align) = field { if let Some(val) = ecx.try_read_value(ptr, align, ty)? { field = val; } } Ok((field, ty)) })(); match result { Ok((field, ty)) => Ok(tcx.mk_const(ty::Const { val: ConstVal::Value(field), ty, })), Err(err) => { let (trace, span) = ecx.generate_stacktrace(None); let err = ErrKind::Miri(err, trace); Err(ConstEvalErr { kind: err.into(), span, }) }, } } pub fn const_discr<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, instance: ty::Instance<'tcx>, value: Value, ty: Ty<'tcx>, ) -> EvalResult<'tcx, u128> { trace!("const_discr: {:?}, {:?}, {:?}", instance, value, ty); let mut ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); let (ptr, align) = match value { Value::ByValPair(..) | Value::ByVal(_) => { let layout = ecx.layout_of(ty)?; use super::MemoryKind; let ptr = ecx.memory.allocate(layout.size.bytes(), layout.align, Some(MemoryKind::Stack))?; let ptr: Pointer = ptr.into(); ecx.write_value_to_ptr(value, ptr, layout.align, ty)?; (ptr, layout.align) }, Value::ByRef(ptr, align) => (ptr, align), }; let place = Place::from_primval_ptr(ptr, align); ecx.read_discriminant_value(place, ty) } pub fn const_eval_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, ) -> ::rustc::middle::const_val::EvalResult<'tcx> { trace!("const eval: {:?}", key); let cid = key.value; let def_id = cid.instance.def.def_id(); if tcx.is_foreign_item(def_id) { let id = tcx.interpret_interner.get_cached(def_id); let id = match id { // FIXME: due to caches this shouldn't happen, add some assertions Some(id) => id, None => { let id = tcx.interpret_interner.reserve(); tcx.interpret_interner.cache(def_id, id); id }, }; let ty = tcx.type_of(def_id); let layout = tcx.layout_of(key.param_env.and(ty)).unwrap(); let ptr = MemoryPointer::new(id, 0); return Ok(tcx.mk_const(ty::Const { val: ConstVal::Value(Value::ByRef(ptr.into(), layout.align)), ty, })) } if let Some(id) = tcx.hir.as_local_node_id(def_id) { let tables = tcx.typeck_tables_of(def_id); let span = tcx.def_span(def_id); // Do match-check before building MIR if tcx.check_match(def_id).is_err() { return Err(ConstEvalErr { kind: Lrc::new(CheckMatchError), span, }); } if let hir::BodyOwnerKind::Const = tcx.hir.body_owner_kind(id) { tcx.mir_const_qualif(def_id); } // Do not continue into miri if typeck errors occurred; it will fail horribly if tables.tainted_by_errors { return Err(ConstEvalErr { kind: Lrc::new(TypeckError), span, }); } }; let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env); res.map(|(miri_value, _, miri_ty)| { tcx.mk_const(ty::Const { val: ConstVal::Value(miri_value), ty: miri_ty, }) }).map_err(|mut err| { if tcx.is_static(def_id).is_some() { ecx.report(&mut err, true, None); } let (trace, span) = ecx.generate_stacktrace(None); let err = ErrKind::Miri(err, trace); ConstEvalErr { kind: err.into(), span, } }) }
33.816794
136
0.52816
fb9a5be6d5af9a227dd49f28cd4a01a659361806
19,540
// Copyright 2020 The Matrix.org Foundation C.I.C. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod helpers; mod inner_sas; mod sas_state; use std::sync::{Arc, Mutex}; #[cfg(test)] use std::time::Instant; use inner_sas::InnerSas; use matrix_sdk_common::uuid::Uuid; use ruma::{ api::client::r0::keys::upload_signatures::Request as SignatureUploadRequest, events::{ key::verification::{ accept::{AcceptEventContent, AcceptMethod, AcceptToDeviceEventContent}, cancel::CancelCode, ShortAuthenticationString, }, AnyMessageEventContent, AnyToDeviceEventContent, }, DeviceId, EventId, RoomId, UserId, }; use tracing::trace; use super::{ event_enums::{AnyVerificationContent, OutgoingContent, OwnedAcceptContent, StartContent}, FlowId, IdentitiesBeingVerified, VerificationResult, }; use crate::{ identities::{ReadOnlyDevice, UserIdentities}, olm::PrivateCrossSigningIdentity, requests::{OutgoingVerificationRequest, RoomMessageRequest}, store::{CryptoStore, CryptoStoreError}, ReadOnlyAccount, ToDeviceRequest, }; /// Short authentication string object. #[derive(Clone, Debug)] pub struct Sas { inner: Arc<Mutex<InnerSas>>, account: ReadOnlyAccount, identities_being_verified: IdentitiesBeingVerified, flow_id: Arc<FlowId>, } impl Sas { /// Get our own user id. pub fn user_id(&self) -> &UserId { self.account.user_id() } /// Get our own device id. pub fn device_id(&self) -> &DeviceId { self.account.device_id() } /// Get the user id of the other side. pub fn other_user_id(&self) -> &UserId { self.identities_being_verified.other_user_id() } /// Get the device id of the other side. pub fn other_device_id(&self) -> &DeviceId { self.identities_being_verified.other_device_id() } /// Get the device of the other user. pub fn other_device(&self) -> &ReadOnlyDevice { self.identities_being_verified.other_device() } /// Get the unique ID that identifies this SAS verification flow. pub fn flow_id(&self) -> &FlowId { &self.flow_id } /// Does this verification flow support displaying emoji for the short /// authentication string. pub fn supports_emoji(&self) -> bool { self.inner.lock().unwrap().supports_emoji() } /// Did this verification flow start from a verification request. pub fn started_from_request(&self) -> bool { self.inner.lock().unwrap().started_from_request() } /// Is this a verification that is veryfying one of our own devices. pub fn is_self_verification(&self) -> bool { self.identities_being_verified.is_self_verification() } #[cfg(test)] #[allow(dead_code)] pub(crate) fn set_creation_time(&self, time: Instant) { self.inner.lock().unwrap().set_creation_time(time) } fn start_helper( inner_sas: InnerSas, account: ReadOnlyAccount, private_identity: PrivateCrossSigningIdentity, other_device: ReadOnlyDevice, store: Arc<dyn CryptoStore>, other_identity: Option<UserIdentities>, ) -> Sas { let flow_id = inner_sas.verification_flow_id(); let identities = IdentitiesBeingVerified { private_identity, store: store.clone(), device_being_verified: other_device, identity_being_verified: other_identity, }; Sas { inner: Arc::new(Mutex::new(inner_sas)), account, identities_being_verified: identities, flow_id, } } /// Start a new SAS auth flow with the given device. /// /// # Arguments /// /// * `account` - Our own account. /// /// * `other_device` - The other device which we are going to verify. /// /// Returns the new `Sas` object and a `StartEventContent` that needs to be /// sent out through the server to the other device. pub(crate) fn start( account: ReadOnlyAccount, private_identity: PrivateCrossSigningIdentity, other_device: ReadOnlyDevice, store: Arc<dyn CryptoStore>, other_identity: Option<UserIdentities>, transaction_id: Option<String>, ) -> (Sas, OutgoingContent) { let (inner, content) = InnerSas::start( account.clone(), other_device.clone(), other_identity.clone(), transaction_id, ); ( Self::start_helper( inner, account, private_identity, other_device, store, other_identity, ), content, ) } /// Start a new SAS auth flow with the given device inside the given room. /// /// # Arguments /// /// * `account` - Our own account. /// /// * `other_device` - The other device which we are going to verify. /// /// Returns the new `Sas` object and a `StartEventContent` that needs to be /// sent out through the server to the other device. pub(crate) fn start_in_room( flow_id: EventId, room_id: RoomId, account: ReadOnlyAccount, private_identity: PrivateCrossSigningIdentity, other_device: ReadOnlyDevice, store: Arc<dyn CryptoStore>, other_identity: Option<UserIdentities>, ) -> (Sas, OutgoingContent) { let (inner, content) = InnerSas::start_in_room( flow_id, room_id, account.clone(), other_device.clone(), other_identity.clone(), ); ( Self::start_helper( inner, account, private_identity, other_device, store, other_identity, ), content, ) } /// Create a new Sas object from a m.key.verification.start request. /// /// # Arguments /// /// * `account` - Our own account. /// /// * `other_device` - The other device which we are going to verify. /// /// * `event` - The m.key.verification.start event that was sent to us by /// the other side. #[allow(clippy::too_many_arguments)] pub(crate) fn from_start_event( flow_id: FlowId, content: &StartContent, store: Arc<dyn CryptoStore>, account: ReadOnlyAccount, private_identity: PrivateCrossSigningIdentity, other_device: ReadOnlyDevice, other_identity: Option<UserIdentities>, started_from_request: bool, ) -> Result<Sas, OutgoingContent> { let inner = InnerSas::from_start_event( account.clone(), other_device.clone(), flow_id, content, other_identity.clone(), started_from_request, )?; Ok(Self::start_helper( inner, account, private_identity, other_device, store, other_identity, )) } /// Accept the SAS verification. /// /// This does nothing if the verification was already accepted, otherwise it /// returns an `AcceptEventContent` that needs to be sent out. pub fn accept(&self) -> Option<OutgoingVerificationRequest> { self.accept_with_settings(Default::default()) } /// Accept the SAS verification customizing the accept method. /// /// This does nothing if the verification was already accepted, otherwise it /// returns an `AcceptEventContent` that needs to be sent out. /// /// Specify a function modifying the attributes of the accept request. pub fn accept_with_settings( &self, settings: AcceptSettings, ) -> Option<OutgoingVerificationRequest> { self.inner.lock().unwrap().accept().map(|c| match settings.apply(c) { OwnedAcceptContent::ToDevice(c) => { let content = AnyToDeviceEventContent::KeyVerificationAccept(c); self.content_to_request(content).into() } OwnedAcceptContent::Room(room_id, content) => RoomMessageRequest { room_id, txn_id: Uuid::new_v4(), content: AnyMessageEventContent::KeyVerificationAccept(content), } .into(), }) } /// Confirm the Sas verification. /// /// This confirms that the short auth strings match on both sides. /// /// Does nothing if we're not in a state where we can confirm the short auth /// string, otherwise returns a `MacEventContent` that needs to be sent to /// the server. pub async fn confirm( &self, ) -> Result< (Option<OutgoingVerificationRequest>, Option<SignatureUploadRequest>), CryptoStoreError, > { let (content, done) = { let mut guard = self.inner.lock().unwrap(); let sas: InnerSas = (*guard).clone(); let (sas, content) = sas.confirm(); *guard = sas; (content, guard.is_done()) }; let mac_request = content.map(|c| match c { OutgoingContent::ToDevice(c) => self.content_to_request(c).into(), OutgoingContent::Room(r, c) => { RoomMessageRequest { room_id: r, txn_id: Uuid::new_v4(), content: c }.into() } }); if mac_request.is_some() { trace!( user_id = self.other_user_id().as_str(), device_id = self.other_device_id().as_str(), "Confirming SAS verification" ) } if done { match self.mark_as_done().await? { VerificationResult::Cancel(c) => Ok((self.cancel_with_code(c), None)), VerificationResult::Ok => Ok((mac_request, None)), VerificationResult::SignatureUpload(r) => Ok((mac_request, Some(r))), } } else { Ok((mac_request, None)) } } pub(crate) async fn mark_as_done(&self) -> Result<VerificationResult, CryptoStoreError> { self.identities_being_verified .mark_as_done(self.verified_devices().as_deref(), self.verified_identities().as_deref()) .await } /// Cancel the verification. /// /// This cancels the verification with the `CancelCode::User`. /// /// Returns None if the `Sas` object is already in a canceled state, /// otherwise it returns a request that needs to be sent out. pub fn cancel(&self) -> Option<OutgoingVerificationRequest> { self.cancel_with_code(CancelCode::User) } pub(crate) fn cancel_with_code(&self, code: CancelCode) -> Option<OutgoingVerificationRequest> { let mut guard = self.inner.lock().unwrap(); let sas: InnerSas = (*guard).clone(); let (sas, content) = sas.cancel(code); *guard = sas; content.map(|c| match c { OutgoingContent::Room(room_id, content) => { RoomMessageRequest { room_id, txn_id: Uuid::new_v4(), content }.into() } OutgoingContent::ToDevice(c) => self.content_to_request(c).into(), }) } pub(crate) fn cancel_if_timed_out(&self) -> Option<OutgoingVerificationRequest> { if self.is_cancelled() || self.is_done() { None } else if self.timed_out() { self.cancel_with_code(CancelCode::Timeout) } else { None } } /// Has the SAS verification flow timed out. pub fn timed_out(&self) -> bool { self.inner.lock().unwrap().timed_out() } /// Are we in a state where we can show the short auth string. pub fn can_be_presented(&self) -> bool { self.inner.lock().unwrap().can_be_presented() } /// Is the SAS flow done. pub fn is_done(&self) -> bool { self.inner.lock().unwrap().is_done() } /// Is the SAS flow canceled. pub fn is_cancelled(&self) -> bool { self.inner.lock().unwrap().is_cancelled() } /// Get the emoji version of the short auth string. /// /// Returns None if we can't yet present the short auth string, otherwise /// seven tuples containing the emoji and description. pub fn emoji(&self) -> Option<[(&'static str, &'static str); 7]> { self.inner.lock().unwrap().emoji() } /// Get the index of the emoji representing the short auth string /// /// Returns None if we can't yet present the short auth string, otherwise /// seven u8 numbers in the range from 0 to 63 inclusive which can be /// converted to an emoji using the /// [relevant spec entry](https://spec.matrix.org/unstable/client-server-api/#sas-method-emoji). pub fn emoji_index(&self) -> Option<[u8; 7]> { self.inner.lock().unwrap().emoji_index() } /// Get the decimal version of the short auth string. /// /// Returns None if we can't yet present the short auth string, otherwise a /// tuple containing three 4-digit integers that represent the short auth /// string. pub fn decimals(&self) -> Option<(u16, u16, u16)> { self.inner.lock().unwrap().decimals() } pub(crate) fn receive_any_event( &self, sender: &UserId, content: &AnyVerificationContent, ) -> Option<OutgoingContent> { let mut guard = self.inner.lock().unwrap(); let sas: InnerSas = (*guard).clone(); let (sas, content) = sas.receive_any_event(sender, content); *guard = sas; content } pub(crate) fn verified_devices(&self) -> Option<Arc<[ReadOnlyDevice]>> { self.inner.lock().unwrap().verified_devices() } pub(crate) fn verified_identities(&self) -> Option<Arc<[UserIdentities]>> { self.inner.lock().unwrap().verified_identities() } pub(crate) fn content_to_request(&self, content: AnyToDeviceEventContent) -> ToDeviceRequest { ToDeviceRequest::new(self.other_user_id(), self.other_device_id().to_owned(), content) } } /// Customize the accept-reply for a verification process #[derive(Debug)] pub struct AcceptSettings { allowed_methods: Vec<ShortAuthenticationString>, } impl Default for AcceptSettings { /// All methods are allowed fn default() -> Self { Self { allowed_methods: vec![ ShortAuthenticationString::Decimal, ShortAuthenticationString::Emoji, ], } } } impl AcceptSettings { /// Create settings restricting the allowed SAS methods /// /// # Arguments /// /// * `methods` - The methods this client allows at most pub fn with_allowed_methods(methods: Vec<ShortAuthenticationString>) -> Self { Self { allowed_methods: methods } } fn apply(self, mut content: OwnedAcceptContent) -> OwnedAcceptContent { match &mut content { OwnedAcceptContent::ToDevice(AcceptToDeviceEventContent { method: AcceptMethod::SasV1(c), .. }) | OwnedAcceptContent::Room( _, AcceptEventContent { method: AcceptMethod::SasV1(c), .. }, ) => { c.short_authentication_string.retain(|sas| self.allowed_methods.contains(sas)); content } _ => content, } } } #[cfg(test)] mod test { use std::{convert::TryFrom, sync::Arc}; use ruma::{DeviceId, UserId}; use super::Sas; use crate::{ olm::PrivateCrossSigningIdentity, store::{CryptoStore, MemoryStore}, verification::event_enums::{ AcceptContent, KeyContent, MacContent, OutgoingContent, StartContent, }, ReadOnlyAccount, ReadOnlyDevice, }; fn alice_id() -> UserId { UserId::try_from("@alice:example.org").unwrap() } fn alice_device_id() -> Box<DeviceId> { "JLAFKJWSCS".into() } fn bob_id() -> UserId { UserId::try_from("@bob:example.org").unwrap() } fn bob_device_id() -> Box<DeviceId> { "BOBDEVCIE".into() } #[tokio::test] async fn sas_wrapper_full() { let alice = ReadOnlyAccount::new(&alice_id(), &alice_device_id()); let alice_device = ReadOnlyDevice::from_account(&alice).await; let bob = ReadOnlyAccount::new(&bob_id(), &bob_device_id()); let bob_device = ReadOnlyDevice::from_account(&bob).await; let alice_store: Arc<dyn CryptoStore> = Arc::new(MemoryStore::new()); let bob_store = MemoryStore::new(); bob_store.save_devices(vec![alice_device.clone()]).await; let bob_store: Arc<dyn CryptoStore> = Arc::new(bob_store); let (alice, content) = Sas::start( alice, PrivateCrossSigningIdentity::empty(alice_id()), bob_device, alice_store, None, None, ); let flow_id = alice.flow_id().to_owned(); let content = StartContent::try_from(&content).unwrap(); let bob = Sas::from_start_event( flow_id, &content, bob_store, bob, PrivateCrossSigningIdentity::empty(bob_id()), alice_device, None, false, ) .unwrap(); let request = bob.accept().unwrap(); let content = OutgoingContent::try_from(request).unwrap(); let content = AcceptContent::try_from(&content).unwrap(); let content = alice.receive_any_event(bob.user_id(), &content.into()).unwrap(); assert!(!alice.can_be_presented()); assert!(!bob.can_be_presented()); let content = KeyContent::try_from(&content).unwrap(); let content = bob.receive_any_event(alice.user_id(), &content.into()).unwrap(); assert!(bob.can_be_presented()); let content = KeyContent::try_from(&content).unwrap(); alice.receive_any_event(bob.user_id(), &content.into()); assert!(alice.can_be_presented()); assert_eq!(alice.emoji().unwrap(), bob.emoji().unwrap()); assert_eq!(alice.decimals().unwrap(), bob.decimals().unwrap()); let request = alice.confirm().await.unwrap().0.unwrap(); let content = OutgoingContent::try_from(request).unwrap(); let content = MacContent::try_from(&content).unwrap(); bob.receive_any_event(alice.user_id(), &content.into()); let request = bob.confirm().await.unwrap().0.unwrap(); let content = OutgoingContent::try_from(request).unwrap(); let content = MacContent::try_from(&content).unwrap(); alice.receive_any_event(bob.user_id(), &content.into()); assert!(alice.verified_devices().unwrap().contains(alice.other_device())); assert!(bob.verified_devices().unwrap().contains(bob.other_device())); } }
32.566667
100
0.601177
330865f877e8a81c71ca5bef5c3ed1d6262b56c1
748
use crate::formatter_traits::FormatTokenAndNode; use crate::{ format_elements, space_token, FormatElement, FormatResult, Formatter, ToFormatElement, }; use rome_js_syntax::TsPredicateReturnType; use rome_js_syntax::TsPredicateReturnTypeFields; impl ToFormatElement for TsPredicateReturnType { fn to_format_element(&self, formatter: &Formatter) -> FormatResult<FormatElement> { let TsPredicateReturnTypeFields { parameter_name, is_token, ty, } = self.as_fields(); Ok(format_elements![ parameter_name.format(formatter)?, space_token(), is_token.format(formatter)?, space_token(), ty.format(formatter)? ]) } }
31.166667
90
0.656417
e9d4466e798d2c398ce3b708a1bb5ee09727fba0
1,860
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ use aws_config::meta::region::RegionProviderChain; use aws_sdk_s3::{Client, Error, Region}; use uuid::Uuid; #[ignore] #[tokio::test] async fn test_it_runs() { let (region, client, bucket_name, file_name, key, target_key) = setup().await; match run_s3_operations(region, client, bucket_name, file_name, key, target_key).await { Err(_e) => assert!(false), _ => assert!(true), } } async fn run_s3_operations( region: Region, client: Client, bucket_name: String, file_name: String, key: String, target_key: String, ) -> Result<(), Error> { s3_service::create_bucket(&client, &bucket_name, region.as_ref()).await?; s3_service::upload_object(&client, &bucket_name, &file_name, &key).await?; s3_service::download_object(&client, &bucket_name, &key).await?; s3_service::copy_object(&client, &bucket_name, &key, &target_key).await?; s3_service::list_objects(&client, &bucket_name).await?; s3_service::delete_objects(&client, &bucket_name).await?; s3_service::delete_bucket(&client, &bucket_name).await?; Ok(()) } async fn setup() -> (Region, Client, String, String, String, String) { let region_provider = RegionProviderChain::first_try(Region::new("us-west-2")); let region = region_provider.region().await.unwrap(); let shared_config = aws_config::from_env().region(region_provider).load().await; let client = Client::new(&shared_config); let bucket_name = format!("{}{}", "doc-example-bucket-", Uuid::new_v4().to_string()); let file_name = "../s3/testfile.txt".to_string(); let key = "test file key name".to_string(); let target_key = "target_key".to_string(); (region, client, bucket_name, file_name, key, target_key) }
35.09434
92
0.683333
26bc06eee9bbdca1d99826a8a9a240a825007d66
4,497
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files.git) // DO NOT EDIT use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; use std::ptr; glib::wrapper! { #[doc(alias = "GskRenderer")] pub struct Renderer(Object<ffi::GskRenderer, ffi::GskRendererClass>); match fn { type_ => || ffi::gsk_renderer_get_type(), } } impl Renderer { #[doc(alias = "gsk_renderer_new_for_surface")] #[doc(alias = "new_for_surface")] pub fn for_surface(surface: &gdk::Surface) -> Option<Renderer> { assert_initialized_main_thread!(); unsafe { from_glib_full(ffi::gsk_renderer_new_for_surface(surface.to_glib_none().0)) } } } impl Renderer { pub const NONE: Option<&'static Renderer> = None; } pub trait GskRenderer: 'static { #[doc(alias = "gsk_renderer_get_surface")] #[doc(alias = "get_surface")] fn surface(&self) -> Option<gdk::Surface>; #[doc(alias = "gsk_renderer_is_realized")] fn is_realized(&self) -> bool; #[doc(alias = "gsk_renderer_realize")] fn realize(&self, surface: Option<&gdk::Surface>) -> Result<(), glib::Error>; #[doc(alias = "gsk_renderer_unrealize")] fn unrealize(&self); #[doc(alias = "realized")] fn connect_realized_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[doc(alias = "surface")] fn connect_surface_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; } impl<O: IsA<Renderer>> GskRenderer for O { fn surface(&self) -> Option<gdk::Surface> { unsafe { from_glib_none(ffi::gsk_renderer_get_surface( self.as_ref().to_glib_none().0, )) } } fn is_realized(&self) -> bool { unsafe { from_glib(ffi::gsk_renderer_is_realized( self.as_ref().to_glib_none().0, )) } } fn realize(&self, surface: Option<&gdk::Surface>) -> Result<(), glib::Error> { unsafe { let mut error = ptr::null_mut(); let _ = ffi::gsk_renderer_realize( self.as_ref().to_glib_none().0, surface.to_glib_none().0, &mut error, ); if error.is_null() { Ok(()) } else { Err(from_glib_full(error)) } } } fn unrealize(&self) { unsafe { ffi::gsk_renderer_unrealize(self.as_ref().to_glib_none().0); } } fn connect_realized_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_realized_trampoline<P: IsA<Renderer>, F: Fn(&P) + 'static>( this: *mut ffi::GskRenderer, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(Renderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::realized\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_realized_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } fn connect_surface_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_surface_trampoline<P: IsA<Renderer>, F: Fn(&P) + 'static>( this: *mut ffi::GskRenderer, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(Renderer::from_glib_borrow(this).unsafe_cast_ref()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::surface\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_surface_trampoline::<Self, F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for Renderer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("Renderer") } }
30.591837
95
0.541917
e453a0516376c80e915464d608db12820f8c65ac
2,831
use crate::prelude::*; use flex_error::{define_error, TraceError}; use crate::ics24_host::error::ValidationError; define_error! { #[derive(Debug, PartialEq, Eq)] Error { InvalidTrustingPeriod { reason: String } | _ | { "invalid trusting period" }, InvalidUnboundingPeriod { reason: String } | _ | { "invalid unbonding period" }, InvalidAddress | _ | { "invalid address" }, InvalidHeader { reason: String } [ tendermint::Error ] | _ | { "invalid header, failed basic validation" }, InvalidTrustThreshold { reason: String } | e | { format_args!("invalid client state trust threshold: {}", e.reason) }, MissingSignedHeader | _ | { "missing signed header" }, Validation { reason: String } | _ | { "invalid header, failed basic validation" }, InvalidRawClientState { reason: String } | _ | { "invalid raw client state" }, MissingValidatorSet | _ | { "missing validator set" }, MissingTrustedValidatorSet | _ | { "missing trusted validator set" }, MissingTrustedHeight | _ | { "missing trusted height" }, MissingTrustingPeriod | _ | { "missing trusting period" }, MissingUnbondingPeriod | _ | { "missing unbonding period" }, InvalidChainIdentifier [ ValidationError ] | _ | { "Invalid chain identifier" }, NegativeTrustingPeriod | _ | { "negative trusting period" }, NegativeUnbondingPeriod | _ | { "negative unbonding period" }, MissingMaxClockDrift | _ | { "missing max clock drift" }, NegativeMaxClockDrift | _ | { "negative max clock drift" }, MissingLatestHeight | _ | { "missing latest height" }, MissingFrozenHeight | _ | { "missing frozen height" }, InvalidChainId { raw_value: String } [ ValidationError ] | e | { format_args!("invalid chain identifier: raw value {0}", e.raw_value) }, InvalidRawHeight | _ | { "invalid raw height" }, InvalidRawConsensusState { reason: String } | _ | { "invalid raw client consensus state" }, InvalidRawHeader [ tendermint::Error ] | _ | { "invalid raw header" }, InvalidRawMisbehaviour { reason: String } | _ | { "invalid raw misbehaviour" }, Decode [ TraceError<prost::DecodeError> ] | _ | { "decode error" }, } }
26.457944
91
0.515719
094ae0bdb8db3d66d2bf2a46162c75e395b6f771
2,225
use anyhow::Result; use crate::common::{ Solution, Day }; use regex::Regex; use lazy_static::lazy_static; use super::YEAR; pub type TheDay = Day<YEAR, 2>; #[derive(Debug, PartialEq, Eq)] enum Command { Up(i32), Down(i32), Forward(i32), } impl Command { fn from_string(s: &str) -> Command { lazy_static! { static ref PATTERN: Regex = Regex::new(r"^(forward|down|up)\s(\d+)$").unwrap(); } let caps = PATTERN.captures(s).unwrap(); let arg = caps.get(2).map_or(0, |m| m.as_str().parse::<i32>().unwrap()); match caps.get(1).map_or("", |m| m.as_str()) { "forward" => Command::Forward(arg), "up" => Command::Up(arg), "down" => Command::Down(arg), _ => panic!("Unexpected input"), } } } fn parse_input(input: &str) -> Vec<Command> { input.lines().into_iter() .map(|s| Command::from_string(s)) .collect() } #[cfg(test)] mod tests { use super::*; #[test] pub fn test_command_parsing() { assert_eq!(Command::from_string("forward 3"), Command::Forward(3)); assert_eq!(Command::from_string("up 10"), Command::Up(10)); assert_eq!(Command::from_string("down 5"), Command::Down(5)); } } impl Solution for TheDay { type Output = i32; fn part1(&self, input: &str) -> Result<Self::Output> { let mut horiz = 0; let mut depth = 0; for cmd in parse_input(input).iter() { match cmd { Command::Forward(n) => horiz += n, Command::Up(n) => depth -= n, Command::Down(n) => depth += n, } } Ok(horiz * depth) } fn part2(&self, input: &str) -> Result<Self::Output> { let mut horiz = 0; let mut depth = 0; let mut aim = 0; for cmd in parse_input(input).iter() { match cmd { Command::Forward(n) => { horiz += n; depth += aim * n; }, Command::Up(n) => aim -= n, Command::Down(n) => aim += n, } } Ok(horiz * depth) } }
25
91
0.485393
9157f9b9a5b521585f463ae6d983c5ededbbf4a3
6,108
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use crate::Display; use crate::KeymapKey; use crate::ModifierIntent; use crate::ModifierType; use glib::object::ObjectType as ObjectType_; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use std::boxed::Box as Box_; use std::fmt; use std::mem; use std::mem::transmute; glib::wrapper! { pub struct Keymap(Object<ffi::GdkKeymap>); match fn { get_type => || ffi::gdk_keymap_get_type(), } } impl Keymap { pub fn get_caps_lock_state(&self) -> bool { unsafe { from_glib(ffi::gdk_keymap_get_caps_lock_state(self.to_glib_none().0)) } } pub fn get_direction(&self) -> pango::Direction { unsafe { from_glib(ffi::gdk_keymap_get_direction(self.to_glib_none().0)) } } pub fn get_modifier_mask(&self, intent: ModifierIntent) -> ModifierType { unsafe { from_glib(ffi::gdk_keymap_get_modifier_mask( self.to_glib_none().0, intent.to_glib(), )) } } pub fn get_modifier_state(&self) -> u32 { unsafe { ffi::gdk_keymap_get_modifier_state(self.to_glib_none().0) } } pub fn get_num_lock_state(&self) -> bool { unsafe { from_glib(ffi::gdk_keymap_get_num_lock_state(self.to_glib_none().0)) } } #[cfg(any(feature = "v3_18", feature = "dox"))] #[cfg_attr(feature = "dox", doc(cfg(feature = "v3_18")))] pub fn get_scroll_lock_state(&self) -> bool { unsafe { from_glib(ffi::gdk_keymap_get_scroll_lock_state(self.to_glib_none().0)) } } pub fn have_bidi_layouts(&self) -> bool { unsafe { from_glib(ffi::gdk_keymap_have_bidi_layouts(self.to_glib_none().0)) } } pub fn lookup_key(&self, key: &KeymapKey) -> u32 { unsafe { ffi::gdk_keymap_lookup_key(self.to_glib_none().0, key.to_glib_none().0) } } pub fn translate_keyboard_state( &self, hardware_keycode: u32, state: ModifierType, group: i32, ) -> Option<(u32, i32, i32, ModifierType)> { unsafe { let mut keyval = mem::MaybeUninit::uninit(); let mut effective_group = mem::MaybeUninit::uninit(); let mut level = mem::MaybeUninit::uninit(); let mut consumed_modifiers = mem::MaybeUninit::uninit(); let ret = from_glib(ffi::gdk_keymap_translate_keyboard_state( self.to_glib_none().0, hardware_keycode, state.to_glib(), group, keyval.as_mut_ptr(), effective_group.as_mut_ptr(), level.as_mut_ptr(), consumed_modifiers.as_mut_ptr(), )); let keyval = keyval.assume_init(); let effective_group = effective_group.assume_init(); let level = level.assume_init(); let consumed_modifiers = consumed_modifiers.assume_init(); if ret { Some(( keyval, effective_group, level, from_glib(consumed_modifiers), )) } else { None } } } #[cfg_attr(feature = "v3_22", deprecated)] pub fn get_default() -> Option<Keymap> { assert_initialized_main_thread!(); unsafe { from_glib_none(ffi::gdk_keymap_get_default()) } } pub fn get_for_display(display: &Display) -> Option<Keymap> { skip_assert_initialized!(); unsafe { from_glib_none(ffi::gdk_keymap_get_for_display(display.to_glib_none().0)) } } pub fn connect_direction_changed<F: Fn(&Keymap) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn direction_changed_trampoline<F: Fn(&Keymap) + 'static>( this: *mut ffi::GdkKeymap, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"direction-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( direction_changed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } pub fn connect_keys_changed<F: Fn(&Keymap) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn keys_changed_trampoline<F: Fn(&Keymap) + 'static>( this: *mut ffi::GdkKeymap, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"keys-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( keys_changed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } pub fn connect_state_changed<F: Fn(&Keymap) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn state_changed_trampoline<F: Fn(&Keymap) + 'static>( this: *mut ffi::GdkKeymap, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"state-changed\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( state_changed_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } } impl fmt::Display for Keymap { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("Keymap") } }
33.195652
96
0.541912
ac0731a3f456b44cbdaffd1c3f3adf1359d27fd6
3,957
use crate::messages::raw_command::{try_parse_raw_command, RawCommand}; use crate::util::bytevec_to_str; use anyhow::Result; #[derive(Debug)] pub enum ClientCommand { Send { message: Vec<u8>, }, PrivateMessage { target: String, message: Vec<u8>, }, Join { channel: String, }, HostGame { game_name: String, password_or_guid: Vec<u8>, }, JoinGame { game_name: String, password: Vec<u8>, }, NoOp, Unknown { command: String, }, Malformed { reason: String, }, } fn concat_params(params: &[Vec<u8>]) -> Vec<u8> { let mut result = Vec::new(); for (i, param) in params.iter().enumerate() { if i != 0 { result.push(0x20); // space separator } result.extend_from_slice(&param); } result } fn send_from_raw(raw: &RawCommand) -> ClientCommand { if raw.params.is_empty() { return ClientCommand::Malformed { reason: "Missing parameters for /send".to_string(), }; } ClientCommand::Send { message: concat_params(&raw.params[..]), } } fn msg_from_raw(raw: &RawCommand) -> ClientCommand { if raw.params.len() < 2 { return ClientCommand::Malformed { reason: "Missing parameters for /msg".to_string(), }; } ClientCommand::PrivateMessage { target: bytevec_to_str(&raw.params[0]), message: concat_params(&raw.params[1..]), } } fn join_from_raw(raw: &RawCommand) -> ClientCommand { if raw.params.is_empty() { return ClientCommand::Malformed { reason: "Missing parameters for /join".to_string(), }; } ClientCommand::Join { channel: String::from_utf8_lossy(&concat_params(&raw.params[..])).to_string(), } } fn hostgame_from_raw(raw: &RawCommand) -> ClientCommand { if raw.params.len() < 3 { return ClientCommand::Malformed { reason: "Missing parameters for /plays".to_string(), }; } ClientCommand::HostGame { game_name: String::from_utf8_lossy(&raw.params[1]).to_string(), password_or_guid: raw.params[2].to_vec(), } } fn joingame_from_raw(raw: &RawCommand) -> ClientCommand { if raw.params.len() < 3 { return ClientCommand::Malformed { reason: "Missing parameters for /playc".to_string(), }; } ClientCommand::JoinGame { game_name: String::from_utf8_lossy(&raw.params[1]).to_string(), password: raw.params[2].to_vec(), } } fn match_raw_command(raw: RawCommand) -> ClientCommand { match raw.command.as_ref() { "send" => send_from_raw(&raw), "msg" => msg_from_raw(&raw), "join" => join_from_raw(&raw), "plays" => hostgame_from_raw(&raw), "playc" => joingame_from_raw(&raw), "playv" => ClientCommand::NoOp, "playd" => ClientCommand::NoOp, "playi" => ClientCommand::NoOp, "nop" => ClientCommand::NoOp, _ => ClientCommand::Unknown { command: raw.command, }, } } impl ClientCommand { pub fn try_parse(data: &mut Vec<u8>) -> Result<Option<ClientCommand>> { if let Some(position) = data.iter().position(|c| *c == 0) { let message_bytes = data.drain(..position + 1); log::debug!( "Received message: {}", bytevec_to_str(message_bytes.as_slice()) ); return match try_parse_raw_command(&message_bytes.as_slice()[..position]) { Ok(raw) => Ok(Some(match_raw_command(raw))), Err(_) => Ok(Some(ClientCommand::Malformed { reason: "Received message is invalid".to_string(), })), }; } match data.len() { n if n > 1024 => Err(anyhow::anyhow!("Message too long")), _ => Ok(None), } } }
27.866197
87
0.561031
3a6bd59b3acaa44133f76487ecb5377b2872c097
575
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // error-pattern:explicit panic fn main() { let _x = match true { false => { 0 } true => { panic!() } }; }
41.071429
74
0.709565
f53dc8d1032bff65db3304b5657e1365960975ff
295
// run-pass #![allow(non_camel_case_types)] #![feature(box_syntax)] trait double { fn double(self: Box<Self>) -> usize; } impl double for usize { fn double(self: Box<usize>) -> usize { *self * 2 } } pub fn main() { let x: Box<Box<_>> = box box 3; assert_eq!(x.double(), 6); }
17.352941
54
0.59661
90cac3191a153dff56350df3958988f0ee770ced
2,172
use crate::*; use core::{cmp, fmt, ops}; /// Length disassembler iterator. /// /// Instances are created by the [`Isa::iter`](trait.Isa.html#method.iter) method. pub struct Iter<'a, X: Isa> { /// The remaining bytes to length disassemble. pub bytes: &'a [u8], /// The current virtual address. pub va: X::Va, } impl<'a, X: Isa> Clone for Iter<'a, X> { fn clone(&self) -> Self { Iter { bytes: self.bytes, va: self.va, } } } impl<'a, X: Isa> Iter<'a, X> { /// Consumes a number of bytes from the input. pub fn consume(&mut self, n: usize) { let n = cmp::min(n, self.bytes.len()); self.bytes = &self.bytes[n..]; self.va += X::as_va(n); } } impl<'a, X: Isa> Iterator for Iter<'a, X> { type Item = Inst<'a, X>; fn next(&mut self) -> Option<Inst<'a, X>> { let inst_len = X::inst_len(self.bytes); if inst_len.total_len > 0 { let n = cmp::min(inst_len.total_len as usize, self.bytes.len()); let inst = Inst::new(&self.bytes[..n], self.va, inst_len); self.consume(n); Some(inst) } else { None } } } impl<'a, X: Isa> ops::Deref for Iter<'a, X> { type Target = [u8]; fn deref(&self) -> &[u8] { self.bytes } } /// Debug formatter. /// /// Single line, opcodes grouped with square brackets. /// Alternate flag to put spaces between the bytes. impl<'a, X: Isa> fmt::Debug for Iter<'a, X> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut iter = self.clone(); while let Some(inst) = iter.next() { f.write_str("[")?; fmt_bytes(inst.bytes(), b'a', f)?; f.write_str("] ")?; } fmt_bytes(iter.bytes, b'a', f) } } /// Display formatter. /// /// One line per opcode. /// Alternate flag to put spaces between the bytes. impl<'a, X: Isa> fmt::Display for Iter<'a, X> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { for inst in self.clone() { fmt_bytes(inst.bytes(), b'a', f)?; f.write_str("\n")?; } Ok(()) } }
26.168675
82
0.520258
d6d2f546cda91cc43710784a5297c7bf2a5ddbbf
1,988
//! Handles COM initialization and cleanup. use super::IoError; use std::marker::PhantomData; use std::ptr; use super::winapi::shared::winerror::{HRESULT, RPC_E_CHANGED_MODE, SUCCEEDED}; use super::winapi::um::combaseapi::{CoInitializeEx, CoUninitialize}; use super::winapi::um::objbase::COINIT_APARTMENTTHREADED; thread_local!(static COM_INITIALIZED: ComInitialized = { unsafe { // Try to initialize COM with STA by default to avoid compatibility issues with the ASIO // backend (where CoInitialize() is called by the ASIO SDK) or winit (where drag and drop // requires STA). // This call can fail with RPC_E_CHANGED_MODE if another library initialized COM with MTA. // That's OK though since COM ensures thread-safety/compatibility through marshalling when // necessary. let result = CoInitializeEx(ptr::null_mut(), COINIT_APARTMENTTHREADED); if SUCCEEDED(result) || result == RPC_E_CHANGED_MODE { ComInitialized { result, _ptr: PhantomData, } } else { // COM initialization failed in another way, something is really wrong. panic!("Failed to initialize COM: {}", IoError::from_raw_os_error(result)); } } }); /// RAII object that guards the fact that COM is initialized. /// // We store a raw pointer because it's the only way at the moment to remove `Send`/`Sync` from the // object. struct ComInitialized { result: HRESULT, _ptr: PhantomData<*mut ()>, } impl Drop for ComInitialized { #[inline] fn drop(&mut self) { // Need to avoid calling CoUninitialize() if CoInitializeEx failed since it may have // returned RPC_E_MODE_CHANGED - which is OK, see above. if SUCCEEDED(self.result) { unsafe { CoUninitialize() }; } } } /// Ensures that COM is initialized in this thread. #[inline] pub fn com_initialized() { COM_INITIALIZED.with(|_| {}); }
34.877193
98
0.65996
9b2b048749e10a96f55f804a852a761b07b51963
1,184
#[doc = "Reader of register TCD9_NBYTES_MLNO"] pub type R = crate::R<u32, super::TCD9_NBYTES_MLNO>; #[doc = "Writer for register TCD9_NBYTES_MLNO"] pub type W = crate::W<u32, super::TCD9_NBYTES_MLNO>; #[doc = "Register TCD9_NBYTES_MLNO `reset()`'s with value 0"] impl crate::ResetValue for super::TCD9_NBYTES_MLNO { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Reader of field `NBYTES`"] pub type NBYTES_R = crate::R<u32, u32>; #[doc = "Write proxy for field `NBYTES`"] pub struct NBYTES_W<'a> { w: &'a mut W, } impl<'a> NBYTES_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff); self.w } } impl R { #[doc = "Bits 0:31 - Minor Byte Transfer Count"] #[inline(always)] pub fn nbytes(&self) -> NBYTES_R { NBYTES_R::new((self.bits & 0xffff_ffff) as u32) } } impl W { #[doc = "Bits 0:31 - Minor Byte Transfer Count"] #[inline(always)] pub fn nbytes(&mut self) -> NBYTES_W { NBYTES_W { w: self } } }
28.878049
84
0.602196
8a4223d1b4632f15e8f043b25e87851fed0912b5
771
//! [POST /_matrix/client/r0/account/password](https://matrix.org/docs/spec/client_server/r0.6.0#post-matrix-client-r0-account-password) use ruma_api::ruma_api; use crate::r0::uiaa::{AuthData, UiaaResponse}; ruma_api! { metadata { description: "Change the password of the current user's account.", method: POST, name: "change_password", path: "/_matrix/client/r0/account/password", rate_limited: true, requires_authentication: true, } request { /// The new password for the account. pub new_password: String, /// Additional authentication information for the user-interactive authentication API. pub auth: Option<AuthData>, } response {} error: UiaaResponse }
26.586207
136
0.657588
21c9014e6baa580fc912294c7a70c707117818cd
5,397
// TryFrom is a simple and safe type conversion that may fail in a controlled way under some circumstances. // Basically, this is the same as From. The main difference is that this should return a Result type // instead of the target type itself. // You can read more about it at https://doc.rust-lang.org/std/convert/trait.TryFrom.html use std::convert::{TryFrom, TryInto}; use std::error; #[derive(Debug, PartialEq)] struct Color { red: u8, green: u8, blue: u8, } // Your task is to complete this implementation // and return an Ok result of inner type Color. // You need to create an implementation for a tuple of three integers, // an array of three integers and a slice of integers. // // Note that the implementation for tuple and array will be checked at compile time, // but the slice implementation needs to check the slice length! // Also note that correct RGB color values must be integers in the 0..=255 range. fn validate_rgb_single(c: i16) -> Result<u8, Box<dyn error::Error>> { if c < 0 || c > 255 { Err("bad value")?; } match u8::try_from(c) { Ok(v) => Ok(v), Err(v) => Err(v)? } } // Tuple implementation impl TryFrom<(i16, i16, i16)> for Color { type Error = Box<dyn error::Error>; fn try_from(tuple: (i16, i16, i16)) -> Result<Self, Self::Error> { let r = validate_rgb_single(tuple.0)?; let g = validate_rgb_single(tuple.1)?; let b = validate_rgb_single(tuple.2)?; Ok(Color {red: r, green: g, blue: b}) } } // Array implementation impl TryFrom<[i16; 3]> for Color { type Error = Box<dyn error::Error>; fn try_from(arr: [i16; 3]) -> Result<Self, Self::Error> { let r = validate_rgb_single(arr[0])?; let g = validate_rgb_single(arr[1])?; let b = validate_rgb_single(arr[2])?; Ok(Color {red: r, green: g, blue: b}) } } // Slice implementation impl TryFrom<&[i16]> for Color { type Error = Box<dyn error::Error>; fn try_from(slice: &[i16]) -> Result<Self, Self::Error> { if slice.len() != 3 { Err("slice len")?; } let r = validate_rgb_single(slice[0])?; let g = validate_rgb_single(slice[1])?; let b = validate_rgb_single(slice[2])?; Ok(Color {red: r, green: g, blue: b}) } } fn main() { // Use the `from` function let c1 = Color::try_from((183, 65, 14)); println!("{:?}", c1); // Since From is implemented for Color, we should be able to use Into let c2: Result<Color, _> = [183, 65, 14].try_into(); println!("{:?}", c2); let v = vec![183, 65, 14]; // With slice we should use `from` function let c3 = Color::try_from(&v[..]); println!("{:?}", c3); // or take slice within round brackets and use Into let c4: Result<Color, _> = (&v[..]).try_into(); println!("{:?}", c4); } #[cfg(test)] mod tests { use super::*; #[test] fn test_tuple_out_of_range_positive() { assert!(Color::try_from((256, 1000, 10000)).is_err()); } #[test] fn test_tuple_out_of_range_negative() { assert!(Color::try_from((-1, -10, -256)).is_err()); } #[test] fn test_tuple_sum() { assert!(Color::try_from((-1, 255, 255)).is_err()); } #[test] fn test_tuple_correct() { let c: Result<Color, _> = (183, 65, 14).try_into(); assert!(c.is_ok()); assert_eq!( c.unwrap(), Color { red: 183, green: 65, blue: 14 } ); } #[test] fn test_array_out_of_range_positive() { let c: Result<Color, _> = [1000, 10000, 256].try_into(); assert!(c.is_err()); } #[test] fn test_array_out_of_range_negative() { let c: Result<Color, _> = [-10, -256, -1].try_into(); assert!(c.is_err()); } #[test] fn test_array_sum() { let c: Result<Color, _> = [-1, 255, 255].try_into(); assert!(c.is_err()); } #[test] fn test_array_correct() { let c: Result<Color, _> = [183, 65, 14].try_into(); assert!(c.is_ok()); assert_eq!( c.unwrap(), Color { red: 183, green: 65, blue: 14 } ); } #[test] fn test_slice_out_of_range_positive() { let arr = [10000, 256, 1000]; assert!(Color::try_from(&arr[..]).is_err()); } #[test] fn test_slice_out_of_range_negative() { let arr = [-256, -1, -10]; assert!(Color::try_from(&arr[..]).is_err()); } #[test] fn test_slice_sum() { let arr = [-1, 255, 255]; assert!(Color::try_from(&arr[..]).is_err()); } #[test] fn test_slice_correct() { let v = vec![183, 65, 14]; let c: Result<Color, _> = Color::try_from(&v[..]); assert!(c.is_ok()); assert_eq!( c.unwrap(), Color { red: 183, green: 65, blue: 14 } ); } #[test] fn test_slice_excess_length() { let v = vec![0, 0, 0, 0]; assert!(Color::try_from(&v[..]).is_err()); } #[test] fn test_slice_insufficient_length() { let v = vec![0, 0]; assert!(Color::try_from(&v[..]).is_err()); } }
28.555556
107
0.538077
64f3c5e3c8266f1b67e6c54937eb41b1f24eac11
63,453
#[macro_use] mod u05_impl; #[macro_use] mod u10_impl; #[macro_use] mod u15_impl; #[macro_use] mod u35_impl; macro_rules! impl_math_f64 { ($size:expr, $uint:ty, $int:ty, $mask:ty) => { use crate::common::*; use doubled::*; type F64x = packed_simd::Simd<[f64; $size]>; type U64x = packed_simd::Simd<[u64; $size]>; type I64x = packed_simd::Simd<[i64; $size]>; type M64x = packed_simd::Simd<[packed_simd::m64; $size]>; type Ux = packed_simd::Simd<[$uint; $size]>; type Ix = packed_simd::Simd<[$int; $size]>; type Mx = packed_simd::Simd<[$mask; $size]>; impl BaseType for F64x { type Base = f64; } impl BaseType for U64x { type Base = u64; } impl BaseType for I64x { type Base = i64; } /* impl BaseType for M64x { type Base = m64; } */ impl MaskType for F64x { type Mask = M64x; } impl BitsType for F64x { type Bits = U64x; } impl MaskType for Doubled<F64x> { type Mask = M64x; } impl crate::Sleef for F64x { type Int = Ix; #[inline] fn sin(self) -> Self { u35::sin(self) } #[inline] fn cos(self) -> Self { u35::cos(self) } #[inline] fn sin_cos(self) -> (Self, Self) { u35::sincos(self) } #[inline] fn tan(self) -> Self { u35::tan(self) } #[inline] fn asin(self) -> Self { u35::asin(self) } #[inline] fn acos(self) -> Self { u35::acos(self) } #[inline] fn atan(self) -> Self { u35::atan(self) } #[inline] fn atan2(self, other: Self) -> Self { u35::atan2(self, other) } #[inline] fn ln(self) -> Self { u35::log(self) } #[inline] fn cbrt(self) -> Self { u35::cbrt(self) } #[inline] fn exp(self) -> Self { u10::exp(self) } #[inline] fn pow(self, other: Self) -> Self { u10::pow(self, other) } #[inline] fn sinh(self) -> Self { u10::sinh(self) } #[inline] fn cosh(self) -> Self { u10::cosh(self) } #[inline] fn tanh(self) -> Self { u10::tanh(self) } #[inline] fn asinh(self) -> Self { u10::asinh(self) } #[inline] fn acosh(self) -> Self { u10::acosh(self) } #[inline] fn atanh(self) -> Self { u10::atanh(self) } #[inline] fn exp2(self) -> Self { u10::exp2(self) } #[inline] fn exp10(self) -> Self { u10::exp10(self) } #[inline] fn exp_m1(self) -> Self { u10::expm1(self) } #[inline] fn log10(self) -> Self { u10::log10(self) } #[inline] fn log2(self) -> Self { u10::log2(self) } #[inline] fn log_1p(self) -> Self { u10::log1p(self) } #[inline] fn ldexp(self, other: Self::Int) -> Self { ldexp(self, other) } #[inline] fn ilogb(self) -> Self::Int { ilogb(self) } #[inline] fn fma(self, y: Self, z: Self) -> Self { fma(self, y, z) } #[inline] fn sqrt(self) -> Self { sqrt(self) } #[inline] fn abs(self) -> Self { fabs(self) } #[inline] fn copy_sign(self, other: Self) -> Self { copysign(self, other) } #[inline] fn max(self, other: Self) -> Self { fmax(self, other) } #[inline] fn min(self, other: Self) -> Self { fmin(self, other) } #[inline] fn fdim(self, other: Self) -> Self { fdim(self, other) } #[inline] fn truncate(self) -> Self { trunc(self) } #[inline] fn round(self) -> Self { rint(self) } #[inline] fn next_after(self, other: Self) -> Self { nextafter(self, other) } #[inline] fn frfrexp(self) -> Self { frfrexp(self) } #[inline] fn expfrexp(self) -> Self::Int { expfrexp(self) } #[inline] fn fmod(self, other: Self) -> Self { fmod(self, other) } #[inline] fn modf(self) -> (Self, Self) { modf(self) } #[inline] fn sin_cos_pi(self) -> (Self, Self) { u35::sincospi(self) } #[inline] fn sin_pi(self) -> Self { u05::sinpi(self) } #[inline] fn cos_pi(self) -> Self { u05::cospi(self) } #[inline] fn hypot(self, other: Self) -> Self { u35::hypot(self, other) } #[inline] fn lgamma(self) -> Self { u10::lgamma(self) } #[inline] fn erf(self) -> Self { u10::erf(self) } #[inline] fn erfc(self) -> Self { u15::erfc(self) } } const ZERO: F64x = F64x::splat(0.); const NEG_ZERO: F64x = F64x::splat(-0.); const ONE: F64x = F64x::splat(1.); const HALF: F64x = F64x::splat(0.5); const D1_63X: F64x = F64x::splat((1u64 << 63) as f64); const D1_60X: F64x = F64x::splat((1u64 << 60) as f64); const D1_54X: F64x = F64x::splat((1u64 << 54) as f64); const D1_53X: F64x = F64x::splat((1u64 << 53) as f64); const D1_52X: F64x = F64x::splat((1u64 << 52) as f64); const D1_32X: F64x = F64x::splat((1u64 << 32) as f64); const D1_31X: F64x = F64x::splat((1u64 << 31) as f64); const D1_28X: F64x = F64x::splat((1u64 << 28) as f64); const D1_24X: F64x = F64x::splat((1u64 << 24) as f64); const D1_23X: F64x = F64x::splat((1u64 << 23) as f64); const PI_A: F64x = F64x::splat(3.141_592_621_803_283_691_4); const PI_B: F64x = F64x::splat(3.178_650_942_459_171_346_9_e-8); const PI_C: F64x = F64x::splat(1.224_646_786_410_718_850_2_e-16); const PI_D: F64x = F64x::splat(1.273_663_432_702_189_981_6_e-24); const TRIGRANGEMAX: F64x = F64x::splat(1e+14); const PI_A2: F64x = F64x::splat(3.141_592_653_589_793_116); const PI_B2: F64x = F64x::splat(1.224_646_799_147_353_207_2_e-16); const TRIGRANGEMAX2: F64x = F64x::splat(15.); const SLEEF_FP_ILOGB0: F64x = F64x::splat(-2_147_483_648.); const SLEEF_FP_ILOGBNAN: F64x = F64x::splat(2_147_483_647.); const SQRT_DBL_MAX: F64x = F64x::splat(1.340_780_792_994_259_635_5_e+154); const M_2_PI_H: F64x = F64x::splat(0.636_619_772_367_581_382_43); const M_2_PI_L: F64x = F64x::splat(-3.935_735_335_036_497_176_4_e-17); const TRIGRANGEMAX3: F64x = F64x::splat(1e+9); const L2U: F64x = F64x::splat(0.693_147_180_559_662_956_511_601_805_686_950_683_593_75); const L2L: F64x = F64x::splat(0.282_352_905_630_315_771_225_884_481_750_134_360_255_254_120_68_e-12); const R_LN2: F64x = F64x::splat( 1.442_695_040_888_963_407_359_924_681_001_892_137_426_645_954_152_985_934_135_449_406_931, ); const L10U: F64x = F64x::splat(0.301_029_995_663_839_144_98); // log 2 / log 10 const L10L: F64x = F64x::splat(1.420_502_322_726_609_941_8_e-13); const LOG10_2: F64x = F64x::splat(3.321_928_094_887_362_347_870_319_429_489_390_175_864_831_393); mod u05 { //! Functions with 0.5 ULP error bound impl_math_f64_u05!(); } pub use u05::{ sincospi as sincospi_u05, sqrt as sqrt_u05, hypot as hypot_u05, sinpi as sinpi_u05, cospi as cospi_u05, }; mod u10 { //! Functions with 1.0 ULP error bound impl_math_f64_u10!(); } pub use u10::{ sin as sin_u10, cos as cos_u10, sincos as sincos_u10, tan as tan_u10, atan2 as atan2_u10, asin as asin_u10, acos as acos_u10, atan as atan_u10, exp as exp_u10, cbrt as cbrt_u10, log as log_u10, pow as pow_u10, sinh as sinh_u10, cosh as cosh_u10, tanh as tanh_u10, asinh as asinh_u10, acosh as acosh_u10, atanh as atanh_u10, exp10 as exp10_u10, expm1 as expm1_u10, log10 as log10_u10, log2 as log2_u10, tgamma as tgamma_u10, lgamma as lgamma_u10, erf as erf_u10, log1p as log1p_u10, exp2 as exp2_u10, }; mod u15 { //! Functions with 1.5 ULP error bound impl_math_f64_u15!(); } pub use u15::{ erfc as erfc_u15, }; mod u35 { //! Functions with 3.5 ULP error bound impl_math_f64_u35!(); } pub use u35::{ sin as sin_u35, cos as cos_u35, tan as tan_u35, sincos as sincos_u35, sincospi as sincospi_u35, atan as atan_u35, atan2 as atan2_u35, asin as asin_u35, acos as acos_u35, log as log_u35, sqrt as sqrt_u35, cbrt as cbrt_u35, sinh as sinh_u35, cosh as cosh_u35, tanh as tanh_u35, hypot as hypot_u35, exp2 as exp2_u35, exp10 as exp10_u35, log2 as log2_u35, }; #[cfg(test)] fn test_f_f(fun_fx: fn(F64x) -> F64x, fun_f: fn(f64) -> f64, mn: f64, mx: f64, ulp: f64) { use rand::Rng; let mut rng = rand::thread_rng(); for _ in 0..crate::TEST_REPEAT { let mut in_f = [0_f64; $size]; for v in in_f.iter_mut() { *v = rng.gen_range(mn, mx); } let in_fx = F64x::from_slice_unaligned(&in_f); let out_fx = fun_fx(in_fx); for i in 0..$size { let input = in_f[i]; let expected = fun_f(input); let output = out_fx.extract(i); if expected.is_nan() && output.is_nan() { continue; } let diff = (expected.to_bits() as i64).wrapping_sub(output.to_bits() as i64) as f64; #[cfg(not(feature = "std"))] assert!(libm::fabs(diff) <= ulp); #[cfg(feature = "std")] assert!( diff.abs() <= ulp, format!( "Position: {}, Input: {:e}, Output: {}, Expected: {}, ULP: {}", i, input, output, expected, diff.abs() ) ); } } } #[cfg(test)] fn test_f_ff( fun_fx: fn(F64x) -> (F64x, F64x), fun_f: fn(f64) -> (f64, f64), mn: f64, mx: f64, ulp: f64, ) { use rand::Rng; let mut rng = rand::thread_rng(); for _ in 0..crate::TEST_REPEAT { let mut in_f = [0_f64; $size]; for v in in_f.iter_mut() { *v = rng.gen_range(mn, mx); } let in_fx = F64x::from_slice_unaligned(&in_f); let (out_fx1, out_fx2) = fun_fx(in_fx); for i in 0..$size { let input = in_f[i]; let (expected1, expected2) = fun_f(input); let output1 = out_fx1.extract(i); let output2 = out_fx2.extract(i); if (expected1.is_nan() && output1.is_nan()) || (expected2.is_nan() && output2.is_nan()) { continue; } let diff1 = (expected1.to_bits() as i64).wrapping_sub(output1.to_bits() as i64) as f64; let diff2 = (expected2.to_bits() as i64).wrapping_sub(output2.to_bits() as i64) as f64; #[cfg(not(feature = "std"))] assert!(libm::fabs(diff1) <= ulp && libm::fabs(diff2) <= ulp); #[cfg(feature = "std")] assert!( diff1.abs() <= ulp && diff2.abs() <= ulp, format!( "Position: {}, Input: {:e}, Output: ({}, {}), Expected: ({}, {}), ULP: ({}, {})", i, input, output1, output2, expected1, expected2, diff1.abs(), diff2.abs(), ) ); } } } #[cfg(test)] fn test_ff_f( fun_fx: fn(F64x, F64x) -> (F64x), fun_f: fn(f64, f64) -> f64, mn: f64, mx: f64, ulp: f64, ) { use rand::Rng; let mut rng = rand::thread_rng(); for _ in 0..crate::TEST_REPEAT { let mut in_f1 = [0_f64; $size]; let mut in_f2 = [0_f64; $size]; for v in in_f1.iter_mut() { *v = rng.gen_range(mn, mx); } for v in in_f2.iter_mut() { *v = rng.gen_range(mn, mx); } let in_fx1 = F64x::from_slice_unaligned(&in_f1); let in_fx2 = F64x::from_slice_unaligned(&in_f2); let out_fx = fun_fx(in_fx1, in_fx2); for i in 0..$size { let input1 = in_f1[i]; let input2 = in_f2[i]; let expected = fun_f(input1, input2); let output = out_fx.extract(i); if expected.is_nan() && output.is_nan() { continue; } let diff = (expected.to_bits() as i64).wrapping_sub(output.to_bits() as i64) as f64; #[cfg(not(feature = "std"))] assert!(libm::fabs(diff) <= ulp); #[cfg(feature = "std")] assert!( diff.abs() <= 1. + ulp, format!( "Position: {}, Input: ({:e}, {:e}), Output: {}, Expected: {}, ULP:{}", i, input1, input2, output, expected, diff.abs() ) ); } } } #[inline] fn from_slice_offset(ptr: &[f64], vi: Ix) -> F64x { const L: usize = F64x::lanes(); let mut ar: [f64; L] = unsafe { core::mem::uninitialized() }; for i in 0..L { ar[i] = ptr[vi.extract(i) as usize]; } unsafe { core::mem::transmute(ar) } } #[inline] fn swap_upper_lower(i: I64x) -> I64x { const L: usize = I64x::lanes(); let mut r: [i32; L * 2] = unsafe { core::mem::transmute(i) }; for i in 0..L { r.swap(i * 2, i * 2 + 1); } unsafe { core::mem::transmute(r) } } impl Round for F64x { type Int = Ix; #[inline] fn trunc(self) -> Self { Self::from_cast(self.trunci()) } #[inline] fn trunci(self) -> Self::Int { Self::Int::from_cast(self) } #[inline] fn round(self) -> Self { rint(self) } #[inline] fn roundi(self) -> Self::Int { Self::Int::from_cast(self.round()) } } impl MulAdd for F64x { #[inline] fn mul_add(self, y: Self, z: Self) -> Self { self.mul_add(y, z) } } impl MulSub for F64x { #[inline] fn mul_sub(self, y: Self, z: Self) -> Self { self.mul_add(y, -z) } } impl NegMulAdd for F64x { #[inline] fn neg_mul_add(self, y: Self, z: Self) -> Self { (-self).mul_add(y, z) } } impl SqrtAsDoubled for F64x { #[inline] fn sqrt_as_doubled(self) -> Doubled<Self> { let t = self.sqrt(); ((self + t.mul_as_doubled(t)) * t.recpre_as_doubled()).scale(Self::splat(0.5)) } } impl VectorizedSelect<f64> for M64x { type Output = F64x; fn select_splat(self, l: f64, r: f64) -> Self::Output { self.select(Self::Output::splat(l), Self::Output::splat(r)) } } impl DoubledSelect<F64x> for M64x { fn select_doubled(self, l: Doubled<F64x>, r: Doubled<F64x>) -> Doubled<F64x> { Doubled::new(self.select(l.0, r.0), self.select(l.1, r.1)) } } impl SelectSeveral<f64> for F64x { #[inline] fn select3(o0: Self::Mask, o1: Self::Mask, d0: f64, d1: f64, d2: f64) -> Self { o0.select(Self::splat(d0), o1.select_splat(d1, d2)) } fn select4( o0: Self::Mask, o1: Self::Mask, o2: Self::Mask, d0: f64, d1: f64, d2: f64, d3: f64, ) -> Self { o0.select( Self::splat(d0), o1.select(Self::splat(d1), o2.select_splat(d2, d3)), ) } } impl Poly for F64x { fn c2v(c: Self::Base) -> Self { F64x::splat(c) } } // return d0 < d1 ? x : y #[inline] fn vsel_vi_vd_vd_vi_vi(d0: F64x, d1: F64x, x: Ix, y: Ix) -> Ix { d0.lt(d1).select(x, y) } // return d0 < 0 ? x : 0 #[inline] fn vsel_vi_vd_vi(d: F64x, x: Ix) -> Ix { Ix::from_bits(Mx::from_cast(d.is_sign_negative())) & x } impl Sign for F64x { #[inline] fn is_sign_negative(self) -> Self::Mask { self.sign_bit().ne(Self::Bits::splat(0)) } #[inline] fn is_sign_positive(self) -> Self::Mask { !self.is_sign_negative() } #[inline] fn sign_bit(self) -> Self::Bits { Self::Bits::from_bits(self) & Self::Bits::from_bits(NEG_ZERO) } #[inline] fn sign(self) -> Self { ONE.mul_sign(self) } #[inline] fn mul_sign(self, other: Self) -> Self { Self::from_bits(Self::Bits::from_bits(self) ^ other.sign_bit()) } #[inline] fn copy_sign(self, other: Self) -> Self { Self::from_bits( (!Self::Bits::from_bits(NEG_ZERO) & Self::Bits::from_bits(self)) ^ (other.sign_bit()), ) } } impl IsNegZero for F64x { #[inline] fn is_neg_zero(self) -> Self::Mask { U64x::from_bits(self).eq(U64x::from_bits(NEG_ZERO)) } } impl IsInt for F64x { #[inline] fn is_integer(self) -> Self::Mask { let mut x = (self * (ONE / D1_31X)).trunc(); x = (-D1_31X).mul_add(x, self); x.trunc().eq(x) | self.abs().gt(D1_53X) } } #[inline] fn cast_into_upper(q: Ix) -> I64x { const L: usize = Ix::lanes(); let mut a: [<Ix as BaseType>::Base; L * 2] = unsafe { core::mem::uninitialized() }; for i in 0..L { a[i * 2] = 0; a[i * 2 + 1] = q.extract(i); } unsafe { core::mem::transmute(a) } } #[inline] fn cast_from_upper(q: U64x) -> Ix { Ix::from_cast(q >> 32) // TODO: optimize } #[inline] fn pow2i(q: Ix) -> F64x { let q = Ix::splat(0x3ff) + q; let r = cast_into_upper(q); F64x::from_bits(r << 20) } #[inline] fn ldexpk(x: F64x, q: Ix) -> F64x { let mut m = q >> 31; m = (((m + q) >> 9) - m) << 7; let q = q - (m << 2); m = Ix::splat(0x3ff) + m; m = !Ix::from_bits(Ix::splat(0).gt(m)) & m; m = m.gt(Ix::splat(0x7ff)).select(Ix::splat(0x7ff), m); let r = cast_into_upper(m); let y = F64x::from_bits(r << 20); x * y * y * y * y * pow2i(q) } #[inline] fn ldexp2k(d: F64x, e: Ix) -> F64x { d * pow2i(e >> 1) * pow2i(e - (e >> 1)) } #[inline] fn ldexp3k(d: F64x, q: Ix) -> F64x { F64x::from_bits(I64x::from_bits(d) + (cast_into_upper(q) << 20)) } /*#[cfg(all( not(feature = "enable_avx512f"), not(feature = "enable_avx512fnofma") ))]*/ #[inline] fn ilogbk(mut d: F64x) -> Ix { let o = d.lt(F64x::splat(4.909_093_465_297_726_6_e-91)); d = o.select(F64x::splat(2.037_035_976_334_486_e90) * d, d); let mut q = cast_from_upper(U64x::from_bits(d)); q &= Ix::splat(((1 << 12) - 1) << 20); q = Ix::from_bits(Ux::from_bits(q) >> 20); q - Mx::from_cast(o).select(Ix::splat(300 + 0x3ff), Ix::splat(0x3ff)) } /*#[cfg(all( not(feature = "enable_avx512f"), not(feature = "enable_avx512fnofma") ))]*/ #[inline] fn ilogb2k(d: F64x) -> Ix { let mut q = cast_from_upper(U64x::from_bits(d)); q = Ix::from_bits(Ux::from_bits(q) >> 20); q &= Ix::splat(0x7ff); q - Ix::splat(0x3ff) } impl IsOdd for F64x { #[inline] fn is_odd(self) -> Self::Mask { let mut x = (self * (ONE / D1_31X)).trunc(); x = (-D1_31X).mul_add(x, self); M64x::from_cast((x.trunci() & Ix::splat(1)).eq(Ix::splat(1))) & self.abs().lt(D1_53X) } } pub fn ldexp(x: F64x, q: Ix) -> F64x { ldexpk(x, q) } pub fn ilogb(d: F64x) -> Ix { let mut e = F64x::from_cast(ilogbk(d.abs())); e = d.eq(ZERO).select(SLEEF_FP_ILOGB0, e); e = d.is_nan().select(SLEEF_FP_ILOGBNAN, e); e = d.is_infinite().select(F64x::splat(f64::MAX), e); e.roundi() } #[inline] fn rempisub(x: F64x) -> (F64x, Ix) { if cfg!(feature = "full_fp_rounding") { let y = (x * F64x::splat(4.)).round(); let vi = (y - x.round() * F64x::splat(4.)).trunci(); (x - y * F64x::splat(0.25), vi) } else { let mut fr = x - D1_28X * (x * (ONE / D1_28X)).trunc(); let mut vi = Mx::from_cast(x.gt(ZERO)).select(Ix::splat(4), Ix::splat(3)) + (fr * F64x::splat(8.)).trunci(); vi = ((Ix::splat(7) & vi) - Ix::splat(3)) >> 1; fr = fr - F64x::splat(0.25) * fr.mul_add(F64x::splat(4.), HALF.mul_sign(x)).trunc(); fr = fr .abs() .gt(F64x::splat(0.25)) .select(fr - HALF.mul_sign(x), fr); fr = fr.abs().gt(F64x::splat(1e+10)).select(ZERO, fr); let o = x.abs().eq(F64x::splat(0.124_999_999_999_999_986_12)); fr = o.select(x, fr); vi = Mx::from_cast(o).select(Ix::splat(0), vi); (fr, vi) } } #[inline] fn rempi(mut a: F64x) -> (Doubled<F64x>, Ix) { let mut ex = ilogb2k(a); /*if cfg!(feature = "enable_avx512f") || cfg!(feature = "enable_avx512fnofma") { ex = !(ex >> 31) & ex; ex = ex & Ix::splat(1023); }*/ ex -= Ix::splat(55); let mut q = Ix::from_bits(ex.gt(Ix::splat(700 - 55))) & Ix::splat(-64); a = ldexp3k(a, q); ex = !(ex >> 31) & ex; ex <<= 2; let mut x = a.mul_as_doubled(from_slice_offset(&crate::tables::REMPITABDP, ex)); let (did, dii) = rempisub(x.0); q = dii; x.0 = did; x = x.normalize(); let mut y = a.mul_as_doubled(from_slice_offset(&crate::tables::REMPITABDP[1..], ex)); x += y; let (did, dii) = rempisub(x.0); q += dii; x.0 = did; x = x.normalize(); y = Doubled::new( from_slice_offset(&crate::tables::REMPITABDP[2..], ex), from_slice_offset(&crate::tables::REMPITABDP[3..], ex), ); y *= a; x += y; x = x.normalize(); x *= Doubled::from(( 3.141_592_653_589_793_116 * 2., 1.224_646_799_147_353_207_2_e-16 * 2., )); let o = a.abs().lt(F64x::splat(0.7)); x.0 = o.select(a, x.0); x.1 = F64x::from_bits(!U64x::from_bits(o) & U64x::from_bits(x.1)); (x, q) } #[inline] fn cospik(d: F64x) -> Doubled<F64x> { let u = d * F64x::splat(4.); let mut q = u.trunci(); q = (q + (Ix::from_bits(Ux::from_bits(q) >> 31) ^ Ix::splat(1))) & Ix::splat(!1); let o = M64x::from_cast((q & Ix::splat(2)).eq(Ix::splat(0))); let s = u - F64x::from_cast(q); let t = s; let s = s * s; let s2 = t.mul_as_doubled(t); let u = o .select_splat( 9.944_803_876_268_437_740_902_08_e-16, -2.024_611_207_851_823_992_958_68_e-14, ) .mul_add( s, o.select_splat( -3.897_962_260_629_327_991_640_47_e-13, 6.948_218_305_801_794_613_277_84_e-12, ), ) .mul_add( s, o.select_splat( 1.150_115_825_399_960_352_669_01_e-10, -1.757_247_499_528_531_799_526_64_e-9, ), ) .mul_add( s, o.select_splat( -2.461_136_950_104_469_749_535_9_e-8, 3.133_616_889_668_683_928_784_22_e-7, ), ) .mul_add( s, o.select_splat( 3.590_860_448_590_527_540_050_62_e-6, -3.657_620_418_216_155_192_036_1_e-5, ), ) .mul_add( s, o.select_splat( -0.000_325_991_886_927_389_905_997_954, 0.002_490_394_570_192_718_502_743_560, ), ); let mut x = u * s + o.select_doubled( Doubled::new( F64x::splat(0.015_854_344_243_815_501_891_425_9), F64x::splat(-1.046_932_722_806_315_219_088_45_e-18), ), Doubled::new( F64x::splat(-0.080_745_512_188_280_785_248_473_1), F64x::splat(3.618_524_750_670_371_048_499_87_e-18), ), ); x = s2 * x + o.select_doubled( Doubled::new( F64x::splat(-0.308_425_137_534_042_437_259_529), F64x::splat(-1.956_984_921_336_335_503_383_45_e-17), ), Doubled::new( F64x::splat(0.785_398_163_397_448_278_999_491), F64x::splat(3.062_871_137_271_550_026_071_05_e-17), ), ); x *= o.select_doubled(s2, Doubled::new(t, ZERO)); x = o.select_doubled(x + ONE, x); let o = M64x::from_cast((q + Ix::splat(2) & Ix::splat(4)).eq(Ix::splat(4))); x.0 = F64x::from_bits((U64x::from_bits(o) & U64x::from_bits(NEG_ZERO)) ^ U64x::from_bits(x.0)); x.1 = F64x::from_bits((U64x::from_bits(o) & U64x::from_bits(NEG_ZERO)) ^ U64x::from_bits(x.1)); x } #[inline] fn atan2k(y: F64x, x: F64x) -> F64x { let q = vsel_vi_vd_vi(x, Ix::splat(-2)); let x = x.abs(); let q = vsel_vi_vd_vd_vi_vi(x, y, q + Ix::splat(1), q); let p = x.lt(y); let s = p.select(-x, y); let mut t = x.max(y); let s = s / t; t = s * s; let t2 = t * t; let t4 = t2 * t2; let t8 = t4 * t4; let t16 = t8 * t8; let u = F64x::poly19( t, t2, t4, t8, t16, -1.887_960_084_630_734_965_637_46_e-05, 0.000_209_850_076_645_816_976_906_797, -0.001_106_118_314_866_724_825_634_71, 0.003_700_267_441_887_131_192_324_03, -0.008_898_961_958_876_554_917_408_09, 0.016_599_329_773_529_201_970_117, -0.025_451_762_493_231_264_161_686_1, 0.033_785_258_000_135_306_999_389_7, -0.040_762_919_127_683_650_000_193_4, 0.046_666_715_007_784_062_563_267_5, -0.052_367_485_230_348_245_761_611_3, 0.058_766_639_292_667_358_085_431_3, -0.066_657_357_936_108_052_598_456_2, 0.076_921_953_831_176_961_835_502_9, -0.090_908_995_008_245_008_229_153, 0.111_111_105_648_261_418_443_745, -0.142_857_142_667_713_293_837_65, 0.199_999_999_996_591_265_594_148, -0.333_333_333_333_311_110_369_124, ); t = s.mul_add(t * u, s); F64x::from_cast(q).mul_add(F64x::FRAC_PI_2, t) } #[inline] fn visinf2_vd_vd_vd(d: F64x, m: F64x) -> F64x { F64x::from_bits( U64x::from_bits(d.is_infinite()) & ((U64x::from_bits(d) & U64x::from_bits(NEG_ZERO)) | U64x::from_bits(m)), ) } #[inline] fn expm1k(d: F64x) -> F64x { let mut u = (d * R_LN2).round(); let q = u.roundi(); let s = u.mul_add(-L2U, d); let s = u.mul_add(-L2L, s); let s2 = s * s; let s4 = s2 * s2; let s8 = s4 * s4; u = F64x::poly10( s, s2, s4, s8, 2.088_606_211_072_836_875_363_41_e-9, 2.511_129_308_928_765_186_106_61_e-8, 2.755_739_112_349_004_718_933_38_e-7, 2.755_723_629_119_288_276_294_23_e-6, 2.480_158_715_923_547_299_879_1_e-5, 0.000_198_412_698_960_509_205_564_975, 0.001_388_888_888_897_744_922_079_62, 0.008_333_333_333_316_527_216_649_84, 0.041_666_666_666_666_504_759_142_2, 0.166_666_666_666_666_851_703_837, ); u = s2.mul_add(HALF, s2 * s * u) + s; M64x::from_cast(q.eq(Ix::splat(0))).select(u, ldexp2k(u + ONE, q) - ONE) } #[inline] fn logk(mut d: F64x) -> Doubled<F64x> { let m: F64x; let mut s = /*if !cfg!(feature = "enable_avx512f") && !cfg!(feature = "enable_avx512fnofma")*/ { let o = d.lt(F64x::splat(f64::MIN_POSITIVE)); d = o.select(d * (D1_32X * D1_32X), d); let mut e = ilogb2k(d * F64x::splat(1. / 0.75)); m = ldexp3k(d, -e); e = Mx::from_cast(o).select(e - Ix::splat(64), e); Doubled::from((0.693_147_180_559_945_286_226_764, 2.319_046_813_846_299_558_417_771_e-17)) * F64x::from_cast(e) }/* else { let mut e = vgetexp_vd_vd(d * F64x::splat(1. / 0.75)); e = e.eq(F64x::INFINITY).select(F64x::splat(1024.), e); m = vgetmant_vd_vd(d); Doubled::new( F64x::splat(0.693_147_180_559_945_286_226_764), F64x::splat(2.319_046_813_846_299_558_417_771_e-17), ) * e }*/; let x = F64x::splat(-1.).add_as_doubled(m) / ONE.add_as_doubled(m); let x2 = x.square(); let x4 = x2.0 * x2.0; let x8 = x4 * x4; let x16 = x8 * x8; let t = F64x::poly9( x2.0, x4, x8, x16, 0.116_255_524_079_935_043_668_677, 0.103_239_680_901_072_952_701_192, 0.117_754_809_412_463_995_466_069, 0.133_329_810_868_462_739_215_09, 0.153_846_227_114_512_262_845_736, 0.181_818_180_850_050_775_676_507, 0.222_222_222_230_083_560_345_903, 0.285_714_285_714_249_172_087_875, 0.400_000_000_000_000_077_715_612, ); let c = Doubled::from(( 0.666_666_666_666_666_629_659_233, 3.805_549_625_424_120_563_366_16_e-17, )); s = s.add_checked(x.scale(F64x::splat(2.))); s.add_checked(x2 * x * (x2 * t + c)) } #[inline] fn expk(d: Doubled<F64x>) -> F64x { let mut u = (d.0 + d.1) * R_LN2; let dq = u.round(); let q = dq.roundi(); let mut s = d + dq * (-L2U); s += dq * (-L2L); s = s.normalize(); let s2 = s.0 * s.0; let s4 = s2 * s2; let s8 = s4 * s4; u = F64x::poly10( s.0, s2, s4, s8, 2.510_696_834_209_504_195_271_39_e-8, 2.762_861_667_702_706_491_168_55_e-7, 2.755_724_967_250_235_741_438_64_e-6, 2.480_149_739_898_197_941_141_53_e-5, 0.000_198_412_698_809_069_797_676_111, 0.001_388_888_893_997_712_896_052_9, 0.008_333_333_333_323_714_176_010_81, 0.041_666_666_666_540_952_412_844_9, 0.166_666_666_666_666_740_681_535, 0.500_000_000_000_000_999_200_722, ); let mut t = ONE.add_checked(s); t = t.add_checked(s.square() * u); u = t.0 + t.1; u = ldexp2k(u, q); F64x::from_bits(!U64x::from_bits(d.0.lt(F64x::splat(-1000.))) & U64x::from_bits(u)) } #[inline] fn expk2(d: Doubled<F64x>) -> Doubled<F64x> { let u = (d.0 + d.1) * R_LN2; let dq = u.round(); let q = dq.roundi(); let s = d + dq * (-L2U) + dq * (-L2L); let s2 = s.square(); let s4 = s2.square(); let s8 = s4.0 * s4.0; let u = F64x::poly10( s.0, s2.0, s4.0, s8, 0.160_247_221_970_993_207_2_e-9, 0.209_225_518_356_315_700_7_e-8, 0.250_523_002_378_264_446_5_e-7, 0.275_572_480_090_213_530_3_e-6, 0.275_573_189_238_604_437_3_e-5, 0.248_015_873_560_581_506_5_e-4, 0.198_412_698_414_807_185_8_e-3, 0.138_888_888_888_676_325_5_e-2, 0.833_333_333_333_334_709_5_e-2, 0.416_666_666_666_666_990_5_e-1, ); let mut t = HALF.add_checked(s * F64x::splat(0.166_666_666_666_666_657_4)); t = ONE.add_checked(t * s); t = ONE.add_checked(t * s); t = t.add_checked(s4 * u); t.0 = ldexp2k(t.0, q); t.1 = ldexp2k(t.1, q); t.0 = F64x::from_bits(!U64x::from_bits(d.0.lt(F64x::splat(-1000.))) & U64x::from_bits(t.0)); t.1 = F64x::from_bits(!U64x::from_bits(d.0.lt(F64x::splat(-1000.))) & U64x::from_bits(t.1)); t } #[inline] fn logk2(d: Doubled<F64x>) -> Doubled<F64x> { let e = ilogbk(d.0 * F64x::splat(1. / 0.75)); let m = Doubled::new(ldexp2k(d.0, -e), ldexp2k(d.1, -e)); let x = (m + F64x::splat(-1.)) / (m + ONE); let x2 = x.square(); let x4 = x2.0 * x2.0; let x8 = x4 * x4; let t = F64x::poly7( x2.0, x4, x8, 0.138_604_363_904_671_679_108_56, 0.131_699_838_841_615_374_240_845, 0.153_914_168_346_271_945_653_214, 0.181_816_523_941_564_611_721_589, 0.222_222_246_326_620_354_039_96, 0.285_714_285_511_134_091_777_308, 0.400_000_000_000_914_013_309_483, ) .mul_add(x2.0, F64x::splat(0.666_666_666_666_664_853_302_393)); let mut s = Doubled::from(( 0.693_147_180_559_945_286_226_764, 2.319_046_813_846_299_558_417_771_e-17, )) * F64x::from_cast(e); s = s.add_checked(x.scale(F64x::splat(2.))); s.add_checked(x2 * x * t) } #[inline] const fn splat2i(i0: i32, i1: i32) -> I64x { I64x::splat(((i0 as i64) << 32) + (i1 as i64)) } #[inline] const fn splat2u(u0: u32, u1: u32) -> I64x { I64x::splat((((u0 as u64) << 32) + (u1 as u64)) as i64) } #[inline] const fn splat2uu(u0: u32, u1: u32) -> U64x { U64x::splat(((u0 as u64) << 32) + (u1 as u64)) } #[inline] pub fn fabs(x: F64x) -> F64x { x.abs() } #[inline] pub fn copysign(x: F64x, y: F64x) -> F64x { x.copy_sign(y) } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] // && !defined(ENABLE_VECEXT) && !defined(ENABLE_PUREC) pub fn fmax(x: F64x, y: F64x) -> F64x { y.is_nan().select(x, x.max(y)) } #[cfg(all(not(target_arch = "x86"), not(target_arch = "x86_64")))] pub fn fmax(x: F64x, y: F64x) -> F64x { y.is_nan().select(x, x.gt(y).select(x, y)) } #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] // && !defined(ENABLE_VECEXT) && !defined(ENABLE_PUREC) pub fn fmin(x: F64x, y: F64x) -> F64x { y.is_nan().select(x, x.min(y)) } #[cfg(all(not(target_arch = "x86"), not(target_arch = "x86_64")))] pub fn fmin(x: F64x, y: F64x) -> F64x { y.is_nan().select(x, y.gt(x).select(x, y)) } pub fn fdim(x: F64x, y: F64x) -> F64x { let ret = x - y; (ret.lt(ZERO) | x.eq(y)).select(ZERO, ret) } pub fn trunc(x: F64x) -> F64x { let mut fr = x - D1_31X * F64x::from_cast((x * (ONE / D1_31X)).trunci()); fr -= F64x::from_cast(fr.trunci()); (x.is_infinite() | x.abs().ge(D1_52X)).select(x, (x - fr).copy_sign(x)) } pub fn floor(x: F64x) -> F64x { let mut fr = x - D1_31X * F64x::from_cast((x * (ONE / D1_31X)).trunci()); fr -= F64x::from_cast(fr.trunci()); fr = fr.lt(ZERO).select(fr + ONE, fr); (x.is_infinite() | x.abs().ge(D1_52X)).select(x, (x - fr).copy_sign(x)) } pub fn ceil(x: F64x) -> F64x { let mut fr = x - D1_31X * F64x::from_cast((x * (ONE / D1_31X)).trunci()); fr -= F64x::from_cast(fr.trunci()); fr = fr.le(ZERO).select(fr, fr - ONE); (x.is_infinite() | x.abs().ge(D1_52X)).select(x, (x - fr).copy_sign(x)) } pub fn round(d: F64x) -> F64x { let mut x = d + HALF; let mut fr = x - D1_31X * F64x::from_cast((x * (ONE / D1_31X)).trunci()); fr -= F64x::from_cast(fr.trunci()); x = (x.le(ZERO) & fr.eq(ZERO)).select(x - ONE, x); fr = fr.lt(ZERO).select(fr + ONE, fr); x = d .eq(F64x::splat(0.499_999_999_999_999_944_49)) .select(ZERO, x); (d.is_infinite() | d.abs().ge(D1_52X)).select(d, (x - fr).copy_sign(d)) } pub fn rint(d: F64x) -> F64x { let mut x = d + HALF; let mut fr = x - D1_31X * F64x::from_cast((x * (ONE / D1_31X)).trunci()); let isodd = M64x::from_cast((Ix::splat(1) & fr.trunci()).eq(Ix::splat(1))); fr -= F64x::from_cast(fr.trunci()); fr = (fr.lt(ZERO) | (fr.eq(ZERO) & isodd)).select(fr + ONE, fr); x = d .eq(F64x::splat(0.500_000_000_000_000_111_02)) .select(ZERO, x); (d.is_infinite() | d.abs().ge(D1_52X)).select(d, (x - fr).copy_sign(d)) } pub fn nextafter(x: F64x, y: F64x) -> F64x { let x = x.eq(ZERO).select(ZERO.mul_sign(y), x); let mut xi2 = I64x::from_bits(x); let c = x.is_sign_negative() ^ y.ge(x); let mut t = (xi2 ^ splat2u(0x_7fff_ffff, 0x_ffff_ffff)) + splat2i(0, 1); t += swap_upper_lower(splat2i(0, 1) & I64x::from_bits(t.eq(splat2i(-1, 0)))); xi2 = I64x::from_bits(c.select(F64x::from_bits(t), F64x::from_bits(xi2))); xi2 -= I64x::from_cast(U64x::from_bits(x.ne(y)) & splat2uu(0, 1)); xi2 = I64x::from_bits(x.ne(y).select( F64x::from_bits( xi2 + swap_upper_lower(splat2i(0, -1) & I64x::from_bits(xi2.eq(splat2i(0, -1)))), ), F64x::from_bits(xi2), )); let mut t = (xi2 ^ splat2u(0x_7fff_ffff, 0x_ffff_ffff)) + splat2i(0, 1); t += swap_upper_lower(splat2i(0, 1) & I64x::from_bits(t.eq(splat2i(-1, 0)))); xi2 = I64x::from_bits(c.select(F64x::from_bits(t), F64x::from_bits(xi2))); let mut ret = F64x::from_bits(xi2); ret = (ret.eq(ZERO) & x.ne(ZERO)).select(ZERO.mul_sign(x), ret); ret = (x.eq(ZERO) & y.eq(ZERO)).select(y, ret); (x.is_nan() | y.is_nan()).select(F64x::NAN, ret) } pub fn frfrexp(x: F64x) -> F64x { let x = x .abs() .lt(F64x::splat(f64::MIN_POSITIVE)) .select(x * D1_63X, x); let mut xm = U64x::from_bits(x); xm &= splat2uu(!0x_7ff0_0000, !0); xm |= splat2uu(0x_3fe0_0000, 0); let ret = F64x::from_bits(xm); let ret = x.is_infinite().select(F64x::INFINITY.mul_sign(x), ret); x.eq(ZERO).select(x, ret) } pub fn expfrexp(x: F64x) -> Ix { let x = x .abs() .lt(F64x::splat(f64::MIN_POSITIVE)) .select(x * D1_63X, x); let mut ret = cast_from_upper(U64x::from_bits(x)); ret = (Ix::from_bits(Ux::from_bits(ret) >> 20) & Ix::splat(0x7ff)) - Ix::splat(0x3fe); (x.eq(ZERO) | x.is_nan() | x.is_infinite()).select(Ix::splat(0), ret) } pub fn fma(mut x: F64x, mut y: F64x, mut z: F64x) -> F64x { let mut h2 = x * y + z; let mut q = ONE; const C0: F64x = D1_54X; let c1: F64x = C0 * C0; let c2: F64x = c1 * c1; let o = h2.abs().lt(F64x::splat(1e-300)); { x = o.select(x * c1, x); y = o.select(y * c1, y); z = o.select(z * c2, z); q = o.select(ONE / c2, q); } let o = h2.abs().gt(F64x::splat(1e+300)); { x = o.select(x * (ONE / c1), x); y = o.select(y * (ONE / c1), y); z = o.select(z * (ONE / c2), z); q = o.select(c2, q); } let d = x.mul_as_doubled(y) + z; let ret = (x.eq(ZERO) | y.eq(ZERO)).select(z, d.0 + d.1); let mut o = z.is_infinite(); o = !x.is_infinite() & o; o = !x.is_nan() & o; o = !y.is_infinite() & o; o = !y.is_nan() & o; h2 = o.select(z, h2); let o = h2.is_infinite() | h2.is_nan(); o.select(h2, ret * q) } //#[cfg(feature = "accurate_sqrt")] pub fn sqrt(d: F64x) -> F64x { d.sqrt() } // fall back to approximation if ACCURATE_SQRT is undefined /*#[cfg(not(feature = "accurate_sqrt"))] pub fn xsqrt(d: F64x) -> F64x { u05::sqrt(d) }*/ /* TODO AArch64: potential optimization by using `vfmad_lane_f64` */ pub fn fmod(x: F64x, y: F64x) -> F64x { #[inline] fn toward0(x: F64x) -> F64x { // returns nextafter(x, 0) let t = F64x::from_bits(U64x::from_bits(x) + U64x::from_bits(splat2i(-1, -1))); x.eq(ZERO).select(ZERO, t) } #[cfg(feature = "full_fp_rounding")] #[inline] fn trunc_positive(x: F64x) -> F64x { // round to integer toward 0, positive argument only x.trunc() } #[cfg(not(feature = "full_fp_rounding"))] #[inline] fn trunc_positive(x: F64x) -> F64x { let mut fr = (-D1_31X).mul_add(F64x::from_cast((x * (ONE / D1_31X)).trunci()), x); fr -= F64x::from_cast(fr.trunci()); x.abs().ge(D1_52X).select(x, x - fr) } let nu = x.abs(); let de = y.abs(); let s = ONE; let o = de.lt(F64x::splat(f64::MIN_POSITIVE)); let nu = o.select(nu * D1_54X, nu); let de = o.select(de * D1_54X, de); let s = o.select(s * (ONE / D1_54X), s); let rde = toward0(de.recpre()); let mut r = Doubled::new(nu, ZERO); for _ in 0..21 { // ceil(log2(DBL_MAX) / 51) + 1 let q = ((de + de).gt(r.0) & r.0.ge(de)).select(ONE, toward0(r.0) * rde); let q = F64x::from_bits( U64x::from_bits(trunc_positive(q)) & splat2uu(0x_ffff_ffff, 0x_ffff_fffe), ); r = (r + q.mul_as_doubled(-de)).normalize(); if r.0.lt(de).all() { break; } } let mut ret = r.0 * s; ret = (r.0 + r.1).eq(de).select(ZERO, ret); ret = ret.mul_sign(x); ret = nu.lt(de).select(x, ret); de.eq(ZERO).select(F64x::NAN, ret) } /* TODO AArch64: potential optimization by using `vfmad_lane_f64` */ fn gammak(a: F64x) -> (Doubled<F64x>, Doubled<F64x>) { let mut clln = Doubled::from((1., 0.)); let mut clld = Doubled::from((1., 0.)); let otiny = a.abs().lt(F64x::splat(1e-306)); let oref = a.lt(HALF); let mut x = otiny.select_doubled( Doubled::from((0., 0.)), oref.select_doubled(ONE.add_as_doubled(-a), Doubled::new(a, ZERO)), ); let o0 = HALF.le(x.0) & x.0.le(F64x::splat(1.1)); let o2 = F64x::splat(2.3).le(x.0); let mut y = ((x + ONE) * x).normalize(); y = ((x + F64x::splat(2.)) * y).normalize(); y = ((x + F64x::splat(3.)) * y).normalize(); y = ((x + F64x::splat(4.)) * y).normalize(); let o = o2 & x.0.le(F64x::splat(7.)); clln = o.select_doubled(y, clln); x = o.select_doubled(x + F64x::splat(5.), x); let t = o2.select(x.0.recpre(), (x + o0.select_splat(-1., -2.)).normalize().0); let u = F64x::select3( o2, o0, -156.801_412_704_022_726_379_848_862, 0.294_791_677_282_761_419_6_e+2, 0.707_481_600_086_460_927_9_e-7, ) .mul_add( t, F64x::select3( o2, o0, 1.120_804_464_289_911_606_838_558_16, 0.128_145_969_182_782_010_9_e+3, 0.400_924_433_300_873_044_3_e-6, ), ) .mul_add( t, F64x::select3( o2, o0, 13.397_985_455_142_589_218_333_060_2, 0.261_754_402_578_451_504_3_e+3, 0.104_011_464_162_824_694_6_e-5, ), ) .mul_add( t, F64x::select3( o2, o0, -0.116_546_276_599_463_200_848_033_357, 0.328_702_285_568_579_043_2_e+3, 0.150_834_915_073_332_916_7_e-5, ), ) .mul_add( t, F64x::select3( o2, o0, -1.391_801_093_265_337_481_495_562_41, 0.281_814_586_773_034_818_6_e+3, 0.128_814_307_493_390_102_e-5, ), ) .mul_add( t, F64x::select3( o2, o0, 0.015_056_113_040_026_424_412_918_973_4, 0.172_867_041_467_355_960_5_e+3, 0.474_416_774_988_499_393_7_e-6, ), ) .mul_add( t, F64x::select3( o2, o0, 0.179_540_117_061_234_856_098_844_714, 0.774_873_576_403_041_681_7_e+2, -0.655_481_630_654_248_990_2_e-7, ), ) .mul_add( t, F64x::select3( o2, o0, -0.002_481_743_600_264_997_730_942_489_28, 0.251_285_664_308_093_075_2_e+2, -0.318_925_247_145_259_984_4_e-6, ), ) .mul_add( t, F64x::select3( o2, o0, -0.029_527_880_945_699_120_504_851_034_1, 0.576_679_210_614_007_686_8_e+1, 0.135_888_382_147_035_537_7_e-6, ), ) .mul_add( t, F64x::select3( o2, o0, 0.000_540_164_767_892_604_515_196_325_186, 0.727_027_547_399_618_057_1, -0.434_393_127_715_733_604_e-6, ), ) .mul_add( t, F64x::select3( o2, o0, 0.006_403_362_833_808_069_794_787_256_2, 0.839_670_912_457_914_780_9_e-1, 0.972_478_589_740_677_955_5_e-6, ), ) .mul_add( t, F64x::select3( o2, o0, -0.000_162_516_262_783_915_816_896_611_252, -0.821_155_866_974_680_459_5_e-1, -0.203_688_605_722_596_601_1_e-5, ), ) .mul_add( t, F64x::select3( o2, o0, -0.001_914_438_498_565_477_526_465_972_39, 0.682_883_182_834_188_445_8_e-1, 0.437_336_314_181_972_581_5_e-5, ), ) .mul_add( t, F64x::select3( o2, o0, 7.204_895_416_020_010_558_983_115_17_e-5, -0.771_248_133_996_167_151_1_e-1, -0.943_995_126_830_400_867_7_e-5, ), ) .mul_add( t, F64x::select3( o2, o0, 0.000_839_498_720_672_087_279_971_000_786, 0.833_749_202_301_731_495_7_e-1, 0.205_072_703_037_638_980_4_e-4, ), ) .mul_add( t, F64x::select3( o2, o0, -5.171_790_908_260_592_193_293_944_22_e-5, -0.909_496_493_145_624_251_8_e-1, -0.449_262_018_343_118_401_8_e-4, ), ) .mul_add( t, F64x::select3( o2, o0, -0.000_592_166_437_353_693_882_857_342_347, 0.100_099_631_357_592_935_8, 0.994_575_123_607_187_593_1_e-4, ), ) .mul_add( t, F64x::select3( o2, o0, 6.972_813_758_365_857_774_037_435_39_e-5, -0.111_334_286_154_420_772_4, -0.223_154_759_903_498_319_6_e-3, ), ) .mul_add( t, F64x::select3( o2, o0, 0.000_784_039_221_720_066_627_493_314_301, 0.125_509_667_321_302_087_5, 0.509_669_524_710_196_762_2_e-3, ), ) .mul_add( t, F64x::select3( o2, o0, -0.000_229_472_093_621_399_176_949_318_732, -0.144_049_896_784_305_436_8, -0.119_275_391_166_788_697_1_e-2, ), ) .mul_add( t, F64x::select3( o2, o0, -0.002_681_327_160_493_827_160_473_958_490, 0.169_557_177_004_194_981_1, 0.289_051_033_074_221_031_e-2, ), ) .mul_add( t, F64x::select3( o2, o0, 0.003_472_222_222_222_222_222_175_164_840, -0.207_385_551_028_409_276_2, -0.738_555_102_867_446_185_8_e-2, ), ) .mul_add( t, F64x::select3( o2, o0, 0.083_333_333_333_333_333_335_592_087_900, 0.270_580_808_427_781_593_9, 0.205_808_084_277_845_533_5_e-1, ), ); let mut y = (x + F64x::splat(-0.5)) * logk2(x); y += -x; y += Doubled::from(( 0.918_938_533_204_672_780_56, -3.878_294_158_067_241_449_8_e-17, )); // 0.5*log(2*M_PI) let mut z = u.mul_as_doubled(t) + o0.select_splat( -0.400_685_634_386_531_486_2, -0.673_523_010_531_981_020_1_e-1, ); z = z * t + o0.select_splat(0.822_467_033_424_113_203, 0.322_467_033_424_113_203); z = z * t + o0.select_splat(-0.577_215_664_901_532_865_5, 0.422_784_335_098_467_134_5); z *= t; let mut clc = o2.select_doubled(y, z); clld = o2.select_doubled(u.mul_as_doubled(t) + ONE, clld); y = clln; clc = otiny.select_doubled( Doubled::from(( 83.177_661_667_193_433_459_033_3, 3.671_034_596_315_685_072_218_78_e-15, )), // log(2^120) oref.select_doubled( Doubled::<F64x>::from((1.144_729_885_849_400_163_9, 1.026_595_116_270_782_638_e-17)) + (-clc), clc, ), ); // log(M_PI) clln = otiny.select_doubled(Doubled::from((1., 0.)), oref.select_doubled(clln, clld)); if !(!oref).all() { let t = a - D1_28X * F64x::from_cast((a * (ONE / D1_28X)).trunci()); x = clld * sinpik(t); } clld = otiny.select_doubled( Doubled::new(a * (D1_60X * D1_60X), ZERO), oref.select_doubled(x, y), ); (clc, clln / clld) } #[inline] fn sinpik(d: F64x) -> Doubled<F64x> { let u = d * F64x::splat(4.); let mut q = u.trunci(); q = (q + (Ix::from_bits(Ux::from_bits(q) >> 31) ^ Ix::splat(1))) & Ix::splat(!1); let o = M64x::from_cast((q & Ix::splat(2)).eq(Ix::splat(2))); let s = u - F64x::from_cast(q); let t = s; let s = s * s; let s2 = t.mul_as_doubled(t); // let u = o .select_splat( 9.944_803_876_268_437_740_902_08_e-16, -2.024_611_207_851_823_992_958_68_e-14, ) .mul_add( s, o.select_splat( -3.897_962_260_629_327_991_640_47_e-13, 6.948_218_305_801_794_613_277_84_e-12, ), ) .mul_add( s, o.select_splat( 1.150_115_825_399_960_352_669_01_e-10, -1.757_247_499_528_531_799_526_64_e-9, ), ) .mul_add( s, o.select_splat( -2.461_136_950_104_469_749_535_9_e-8, 3.133_616_889_668_683_928_784_22_e-7, ), ) .mul_add( s, o.select_splat( 3.590_860_448_590_527_540_050_62_e-6, -3.657_620_418_216_155_192_036_1_e-5, ), ) .mul_add( s, o.select_splat( -0.000_325_991_886_927_389_905_997_954, 0.002_490_394_570_192_718_502_743_56, ), ); let mut x = u * s + o.select_doubled( Doubled::new( F64x::splat(0.015_854_344_243_815_501_891_425_9), F64x::splat(-1.046_932_722_806_315_219_088_45_e-18), ), Doubled::new( F64x::splat(-0.080_745_512_188_280_785_248_473_1), F64x::splat(3.618_524_750_670_371_048_499_87_e-18), ), ); x = s2 * x + o.select_doubled( Doubled::new( F64x::splat(-0.308_425_137_534_042_437_259_529), F64x::splat(-1.956_984_921_336_335_503_383_45_e-17), ), Doubled::new( F64x::splat(0.785_398_163_397_448_278_999_491), F64x::splat(3.062_871_137_271_550_026_071_05_e-17), ), ); x *= o.select_doubled(s2, Doubled::new(t, ZERO)); x = o.select_doubled(x + ONE, x); let o = M64x::from_cast((q & Ix::splat(4)).eq(Ix::splat(4))); x.0 = F64x::from_bits((U64x::from_bits(o) & U64x::from_bits(NEG_ZERO)) ^ U64x::from_bits(x.0)); x.1 = F64x::from_bits((U64x::from_bits(o) & U64x::from_bits(NEG_ZERO)) ^ U64x::from_bits(x.1)); x } pub fn modf(x: F64x) -> (F64x, F64x) { let mut fr = x - D1_31X * F64x::from_cast((x * (ONE / D1_31X)).trunci()); fr -= F64x::from_cast(fr.trunci()); fr = x.abs().gt(D1_52X).select(ZERO, fr); (fr.copy_sign(x), (x - fr).copy_sign(x)) } }; }
34.864286
121
0.425827
edfa5f492be97d2ddff80b24683c7c0d1ce4dbbc
3,060
use std::convert::{From}; use std::collections::btree_map::BTreeMap; use rustc_serialize::json::{ToJson, Json}; use url::{Url}; use chrono::{DateTime,FixedOffset}; use primitive::{Dec, Time}; pub mod value; pub use element::value::{Value,ValueType}; pub struct Element { pub name: String, pub value: Value } impl Element { pub fn id(mut self, id: &str) -> Self { let v = self.value; self.value = v.id(id); self } pub fn valid_extension(&self) -> bool { match self.name.as_ref() { "Coding" => true, "CodeableConcept" => true, "Attachment" => true, "Identifier" => true, "Quantity" => true, "Range" => true, "Period" => true, "Ratio" => true, "HumanName" => true, "Address" => true, "ContactPoint" => true, "Timing" => true, "Signature" => true, "Reference" => true, _ => false } } pub fn extension_name(&self) -> String { format!("value{}",self.name) } } trait InternalToJson { fn _to_json(&self) -> Json; } impl InternalToJson for Vec<Element> { fn _to_json(&self) -> Json { let mut o: BTreeMap<String,Json> = BTreeMap::new(); for e in self.iter() { let mut keys = e.value.keys(&e.name); while let Some((name, json)) = keys.pop() { o.insert(name,json); } } Json::Object(o) } } impl InternalToJson for Element { fn _to_json(&self) -> Json { self.value.to_json() } } pub trait NamedFrom<T> { fn with(name: &str, val: T) -> Self; } macro_rules! gen_named { ($t:ty) => { impl NamedFrom<$t> for Element { fn with(name: &str, v: $t) -> Self { Element { name: name.to_string(), value: Value::from(v) } } } } } gen_named!(bool); gen_named!(i32); gen_named!(u32); gen_named!(Dec); gen_named!(String); gen_named!(Time); gen_named!(Url); gen_named!(DateTime<FixedOffset>); impl<'a> NamedFrom<&'a str> for Element { fn with(name: &str, val: &'a str) -> Self { Element { name: String::from(name), value: Value::from(val) } } } impl NamedFrom<Vec<Element>> for Element { fn with(name: &str, val: Vec<Element>) -> Self { Element { name: name.to_string(), value: Value { value: ValueType::Elt(val), id: None, extension: Vec::new()} } } } impl NamedFrom<Vec<Value>> for Element { fn with(name: &str, val: Vec<Value>) -> Self { Element { name: name.to_string(), value: Value { value: ValueType::List(val), id: None, extension: Vec::new()} } } } fn make_test_elt() -> Element { let e1 = Element::with("foo",false) .id("quux"); let e2 = Element::with("bar",false); let e3 = Element::with("baz",23u32); let e_second = Element::with("second", vec![e3]); let e_list = Element::with("list", vec![ Value::from(true), Value::from(true).id("abc123")]); let e_top = Element::with("top", vec![e1,e2,e_second,e_list]); e_top } #[test] fn test_compound_elt() { let expected = Json::from_str(r#"{"foo": false, "_foo": {"id": "quux"}, "bar": false, "second": { "baz": 23 }, "list": [true,true], "_list": [null, {"id":"abc123"}]}"#).unwrap(); assert_eq!(expected, make_test_elt()._to_json()); }
21.103448
179
0.609804
768a09ba3b9249620430982b9d387d5a00a62a43
308
use super::State; impl State { pub fn create_shader(device: &wgpu::Device) -> wgpu::ShaderModule { device.create_shader_module(&wgpu::ShaderModuleDescriptor { label: Some("Shader"), source: wgpu::ShaderSource::Wgsl(include_str!("shader.wgsl").into()), }) } }
28
81
0.616883
dd17d1b8c44ae9e1ed18e5cdfd312a51d83f4d30
6,852
use ggez::{Context, GameResult}; use ggez::audio::{Source}; use ggez::event::{MouseButton}; use nalgebra::{Point2}; use { rivr::input::{PcInputHandler}, spacegame_game::{ state::{ ship::{Ship, Task}, normalize_area, BuildState, BuildDrag, BuildChoice, Camera, }, }, }; pub struct BuildInputHandler { last_tile_position: Option<Point2<i32>>, build_sound_queued: bool, place_sound: Source, } impl BuildInputHandler { pub fn new( ctx: &mut Context, ) -> GameResult<Self> { let mut place_sound = Source::new(ctx, "/object_placed.ogg")?; place_sound.set_volume(0.2); Ok(BuildInputHandler { last_tile_position: None, build_sound_queued: false, place_sound, }) } pub fn update( &mut self ) -> GameResult<()> { if self.build_sound_queued { self.place_sound.play()?; self.build_sound_queued = false; } Ok(()) } pub fn handle_mouse_down(&mut self, button: MouseButton, state: &mut BuildState) { if button != MouseButton::Left || state.choice == BuildChoice::None { return } // If we were currently hovering, switch over to dragging if let BuildDrag::Hovering { position: Some(hovered_tile) } = state.drag { state.drag = BuildDrag::Dragging { start: hovered_tile, end: hovered_tile, } } } pub fn handle_mouse_up( &mut self, button: MouseButton, state: &mut BuildState, ship: &mut Ship ) -> GameResult<()> { if state.choice == BuildChoice::None { return Ok(()) } match button { MouseButton::Left => self.handle_build_up(state, ship), MouseButton::Right => self.handle_cancel_up(state)?, _ => {}, } Ok(()) } fn handle_build_up(&mut self, state: &mut BuildState, ship: &mut Ship) { // If we were currently dragging, switch back to hovering if let BuildDrag::Dragging { start, end } = state.drag { let mut world_changed = false; // This also means we finished a build, so let's apply it let (start, end) = normalize_area(start, end); for y in start.y..end.y { for x in start.x..end.x { let tile_pos = Point2::new(x, y); match state.choice { BuildChoice::None => unreachable!(), BuildChoice::Floor => { let tile = ship.tiles.get_mut(tile_pos).unwrap(); if !tile.floor { tile.floor = true; world_changed = true; self.build_sound_queued = true; } }, BuildChoice::Object(id) => { let tile = ship.tiles.get_mut(tile_pos).unwrap(); let has_tile = tile.floor; let has_object = tile.object.is_some(); let has_task = ship.task_queue.get_at(tile_pos).is_some(); if has_tile && !has_object && !has_task { let task = Task::new(tile_pos, id, 1.0); ship.task_queue.queue(task).unwrap(); self.build_sound_queued = true; } }, BuildChoice::Destroy => { let tile = ship.tiles.get_mut(tile_pos).unwrap(); if tile.object.is_some() { world_changed = true; self.build_sound_queued = true; } tile.object = None; if let Some(task_id) = ship.task_queue.get_at(tile_pos) { ship.task_queue.dequeue(task_id).unwrap(); self.build_sound_queued = true; } }, BuildChoice::DestroyAll => { let tile = ship.tiles.get_mut(tile_pos).unwrap(); if tile.floor || tile.object.is_some() { world_changed = true; self.build_sound_queued = true; } tile.floor = false; tile.object = None; if let Some(task_id) = ship.task_queue.get_at(tile_pos) { ship.task_queue.dequeue(task_id).unwrap(); self.build_sound_queued = true; } }, } } } // Actually switch back to hovering now state.drag = BuildDrag::Hovering { position: self.last_tile_position }; if world_changed { ship.tiles.changed.raise(); } } } fn handle_cancel_up(&mut self, state: &mut BuildState) -> GameResult<()> { state.drag = BuildDrag::Hovering { position: self.last_tile_position }; state.choice = BuildChoice::None; Ok(()) } pub fn handle_mouse_move( &mut self, mouse_position: Point2<i32>, ui_input: &PcInputHandler, state: &mut BuildState, camera: &mut Camera, ship: &Ship, ) { // Get the position of the cursor in-world let world_position = camera.screen_to_world(mouse_position); let tile_position = Point2::new( world_position.x.floor() as i32, world_position.y.floor() as i32, ); // Make sure we're not over UI, and the tile we're hovering over is valid if !ui_input.is_cursor_over_ui() && ship.tiles.is_in_bounds(tile_position) { self.last_tile_position = Some(tile_position); match state.drag { BuildDrag::Hovering { ref mut position } => *position = Some(tile_position), BuildDrag::Dragging { start: _, ref mut end } => *end = tile_position, } } else { self.last_tile_position = None; // If this is an invalid tile, the dragging won't be interested but the hover should be // set to None so it won't show up previewed if let &mut BuildDrag::Hovering { ref mut position } = &mut state.drag { *position = None; } } } }
34.781726
99
0.479714
23a065a38622d56e3518d72a69a81d1850988989
197,880
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. #[derive(Debug)] pub(crate) struct Handle< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { client: aws_smithy_client::Client<C, M, R>, conf: crate::Config, } /// Client for Amazon MemoryDB /// /// Client for invoking operations on Amazon MemoryDB. Each operation on Amazon MemoryDB is a method on this /// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service. /// /// # Examples /// **Constructing a client and invoking an operation** /// ```rust,no_run /// # async fn docs() { /// // create a shared configuration. This can be used & shared between multiple service clients. /// let shared_config = aws_config::load_from_env().await; /// let client = aws_sdk_memorydb::Client::new(&shared_config); /// // invoke an operation /// /* let rsp = client /// .<operationname>(). /// .<param>("some value") /// .send().await; */ /// # } /// ``` /// **Constructing a client with custom configuration** /// ```rust,no_run /// use aws_config::RetryConfig; /// # async fn docs() { /// let shared_config = aws_config::load_from_env().await; /// let config = aws_sdk_memorydb::config::Builder::from(&shared_config) /// .retry_config(RetryConfig::disabled()) /// .build(); /// let client = aws_sdk_memorydb::Client::from_conf(config); /// # } #[derive(std::fmt::Debug)] pub struct Client< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<Handle<C, M, R>>, } impl<C, M, R> std::clone::Clone for Client<C, M, R> { fn clone(&self) -> Self { Self { handle: self.handle.clone(), } } } #[doc(inline)] pub use aws_smithy_client::Builder; impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> { fn from(client: aws_smithy_client::Client<C, M, R>) -> Self { Self::with_config(client, crate::Config::builder().build()) } } impl<C, M, R> Client<C, M, R> { /// Creates a client with the given service configuration. pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self { Self { handle: std::sync::Arc::new(Handle { client, conf }), } } /// Returns the client's configuration. pub fn conf(&self) -> &crate::Config { &self.handle.conf } } impl<C, M, R> Client<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Constructs a fluent builder for the `BatchUpdateCluster` operation. /// /// See [`BatchUpdateCluster`](crate::client::fluent_builders::BatchUpdateCluster) for more information about the /// operation and its arguments. pub fn batch_update_cluster(&self) -> fluent_builders::BatchUpdateCluster<C, M, R> { fluent_builders::BatchUpdateCluster::new(self.handle.clone()) } /// Constructs a fluent builder for the `CopySnapshot` operation. /// /// See [`CopySnapshot`](crate::client::fluent_builders::CopySnapshot) for more information about the /// operation and its arguments. pub fn copy_snapshot(&self) -> fluent_builders::CopySnapshot<C, M, R> { fluent_builders::CopySnapshot::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateACL` operation. /// /// See [`CreateACL`](crate::client::fluent_builders::CreateACL) for more information about the /// operation and its arguments. pub fn create_acl(&self) -> fluent_builders::CreateACL<C, M, R> { fluent_builders::CreateACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateCluster` operation. /// /// See [`CreateCluster`](crate::client::fluent_builders::CreateCluster) for more information about the /// operation and its arguments. pub fn create_cluster(&self) -> fluent_builders::CreateCluster<C, M, R> { fluent_builders::CreateCluster::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateParameterGroup` operation. /// /// See [`CreateParameterGroup`](crate::client::fluent_builders::CreateParameterGroup) for more information about the /// operation and its arguments. pub fn create_parameter_group(&self) -> fluent_builders::CreateParameterGroup<C, M, R> { fluent_builders::CreateParameterGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateSnapshot` operation. /// /// See [`CreateSnapshot`](crate::client::fluent_builders::CreateSnapshot) for more information about the /// operation and its arguments. pub fn create_snapshot(&self) -> fluent_builders::CreateSnapshot<C, M, R> { fluent_builders::CreateSnapshot::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateSubnetGroup` operation. /// /// See [`CreateSubnetGroup`](crate::client::fluent_builders::CreateSubnetGroup) for more information about the /// operation and its arguments. pub fn create_subnet_group(&self) -> fluent_builders::CreateSubnetGroup<C, M, R> { fluent_builders::CreateSubnetGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `CreateUser` operation. /// /// See [`CreateUser`](crate::client::fluent_builders::CreateUser) for more information about the /// operation and its arguments. pub fn create_user(&self) -> fluent_builders::CreateUser<C, M, R> { fluent_builders::CreateUser::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteACL` operation. /// /// See [`DeleteACL`](crate::client::fluent_builders::DeleteACL) for more information about the /// operation and its arguments. pub fn delete_acl(&self) -> fluent_builders::DeleteACL<C, M, R> { fluent_builders::DeleteACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteCluster` operation. /// /// See [`DeleteCluster`](crate::client::fluent_builders::DeleteCluster) for more information about the /// operation and its arguments. pub fn delete_cluster(&self) -> fluent_builders::DeleteCluster<C, M, R> { fluent_builders::DeleteCluster::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteParameterGroup` operation. /// /// See [`DeleteParameterGroup`](crate::client::fluent_builders::DeleteParameterGroup) for more information about the /// operation and its arguments. pub fn delete_parameter_group(&self) -> fluent_builders::DeleteParameterGroup<C, M, R> { fluent_builders::DeleteParameterGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteSnapshot` operation. /// /// See [`DeleteSnapshot`](crate::client::fluent_builders::DeleteSnapshot) for more information about the /// operation and its arguments. pub fn delete_snapshot(&self) -> fluent_builders::DeleteSnapshot<C, M, R> { fluent_builders::DeleteSnapshot::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteSubnetGroup` operation. /// /// See [`DeleteSubnetGroup`](crate::client::fluent_builders::DeleteSubnetGroup) for more information about the /// operation and its arguments. pub fn delete_subnet_group(&self) -> fluent_builders::DeleteSubnetGroup<C, M, R> { fluent_builders::DeleteSubnetGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `DeleteUser` operation. /// /// See [`DeleteUser`](crate::client::fluent_builders::DeleteUser) for more information about the /// operation and its arguments. pub fn delete_user(&self) -> fluent_builders::DeleteUser<C, M, R> { fluent_builders::DeleteUser::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeACLs` operation. /// /// See [`DescribeACLs`](crate::client::fluent_builders::DescribeACLs) for more information about the /// operation and its arguments. pub fn describe_ac_ls(&self) -> fluent_builders::DescribeACLs<C, M, R> { fluent_builders::DescribeACLs::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeClusters` operation. /// /// See [`DescribeClusters`](crate::client::fluent_builders::DescribeClusters) for more information about the /// operation and its arguments. pub fn describe_clusters(&self) -> fluent_builders::DescribeClusters<C, M, R> { fluent_builders::DescribeClusters::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeEngineVersions` operation. /// /// See [`DescribeEngineVersions`](crate::client::fluent_builders::DescribeEngineVersions) for more information about the /// operation and its arguments. pub fn describe_engine_versions(&self) -> fluent_builders::DescribeEngineVersions<C, M, R> { fluent_builders::DescribeEngineVersions::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeEvents` operation. /// /// See [`DescribeEvents`](crate::client::fluent_builders::DescribeEvents) for more information about the /// operation and its arguments. pub fn describe_events(&self) -> fluent_builders::DescribeEvents<C, M, R> { fluent_builders::DescribeEvents::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeParameterGroups` operation. /// /// See [`DescribeParameterGroups`](crate::client::fluent_builders::DescribeParameterGroups) for more information about the /// operation and its arguments. pub fn describe_parameter_groups(&self) -> fluent_builders::DescribeParameterGroups<C, M, R> { fluent_builders::DescribeParameterGroups::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeParameters` operation. /// /// See [`DescribeParameters`](crate::client::fluent_builders::DescribeParameters) for more information about the /// operation and its arguments. pub fn describe_parameters(&self) -> fluent_builders::DescribeParameters<C, M, R> { fluent_builders::DescribeParameters::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeServiceUpdates` operation. /// /// See [`DescribeServiceUpdates`](crate::client::fluent_builders::DescribeServiceUpdates) for more information about the /// operation and its arguments. pub fn describe_service_updates(&self) -> fluent_builders::DescribeServiceUpdates<C, M, R> { fluent_builders::DescribeServiceUpdates::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeSnapshots` operation. /// /// See [`DescribeSnapshots`](crate::client::fluent_builders::DescribeSnapshots) for more information about the /// operation and its arguments. pub fn describe_snapshots(&self) -> fluent_builders::DescribeSnapshots<C, M, R> { fluent_builders::DescribeSnapshots::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeSubnetGroups` operation. /// /// See [`DescribeSubnetGroups`](crate::client::fluent_builders::DescribeSubnetGroups) for more information about the /// operation and its arguments. pub fn describe_subnet_groups(&self) -> fluent_builders::DescribeSubnetGroups<C, M, R> { fluent_builders::DescribeSubnetGroups::new(self.handle.clone()) } /// Constructs a fluent builder for the `DescribeUsers` operation. /// /// See [`DescribeUsers`](crate::client::fluent_builders::DescribeUsers) for more information about the /// operation and its arguments. pub fn describe_users(&self) -> fluent_builders::DescribeUsers<C, M, R> { fluent_builders::DescribeUsers::new(self.handle.clone()) } /// Constructs a fluent builder for the `FailoverShard` operation. /// /// See [`FailoverShard`](crate::client::fluent_builders::FailoverShard) for more information about the /// operation and its arguments. pub fn failover_shard(&self) -> fluent_builders::FailoverShard<C, M, R> { fluent_builders::FailoverShard::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListAllowedNodeTypeUpdates` operation. /// /// See [`ListAllowedNodeTypeUpdates`](crate::client::fluent_builders::ListAllowedNodeTypeUpdates) for more information about the /// operation and its arguments. pub fn list_allowed_node_type_updates( &self, ) -> fluent_builders::ListAllowedNodeTypeUpdates<C, M, R> { fluent_builders::ListAllowedNodeTypeUpdates::new(self.handle.clone()) } /// Constructs a fluent builder for the `ListTags` operation. /// /// See [`ListTags`](crate::client::fluent_builders::ListTags) for more information about the /// operation and its arguments. pub fn list_tags(&self) -> fluent_builders::ListTags<C, M, R> { fluent_builders::ListTags::new(self.handle.clone()) } /// Constructs a fluent builder for the `ResetParameterGroup` operation. /// /// See [`ResetParameterGroup`](crate::client::fluent_builders::ResetParameterGroup) for more information about the /// operation and its arguments. pub fn reset_parameter_group(&self) -> fluent_builders::ResetParameterGroup<C, M, R> { fluent_builders::ResetParameterGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `TagResource` operation. /// /// See [`TagResource`](crate::client::fluent_builders::TagResource) for more information about the /// operation and its arguments. pub fn tag_resource(&self) -> fluent_builders::TagResource<C, M, R> { fluent_builders::TagResource::new(self.handle.clone()) } /// Constructs a fluent builder for the `UntagResource` operation. /// /// See [`UntagResource`](crate::client::fluent_builders::UntagResource) for more information about the /// operation and its arguments. pub fn untag_resource(&self) -> fluent_builders::UntagResource<C, M, R> { fluent_builders::UntagResource::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateACL` operation. /// /// See [`UpdateACL`](crate::client::fluent_builders::UpdateACL) for more information about the /// operation and its arguments. pub fn update_acl(&self) -> fluent_builders::UpdateACL<C, M, R> { fluent_builders::UpdateACL::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateCluster` operation. /// /// See [`UpdateCluster`](crate::client::fluent_builders::UpdateCluster) for more information about the /// operation and its arguments. pub fn update_cluster(&self) -> fluent_builders::UpdateCluster<C, M, R> { fluent_builders::UpdateCluster::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateParameterGroup` operation. /// /// See [`UpdateParameterGroup`](crate::client::fluent_builders::UpdateParameterGroup) for more information about the /// operation and its arguments. pub fn update_parameter_group(&self) -> fluent_builders::UpdateParameterGroup<C, M, R> { fluent_builders::UpdateParameterGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateSubnetGroup` operation. /// /// See [`UpdateSubnetGroup`](crate::client::fluent_builders::UpdateSubnetGroup) for more information about the /// operation and its arguments. pub fn update_subnet_group(&self) -> fluent_builders::UpdateSubnetGroup<C, M, R> { fluent_builders::UpdateSubnetGroup::new(self.handle.clone()) } /// Constructs a fluent builder for the `UpdateUser` operation. /// /// See [`UpdateUser`](crate::client::fluent_builders::UpdateUser) for more information about the /// operation and its arguments. pub fn update_user(&self) -> fluent_builders::UpdateUser<C, M, R> { fluent_builders::UpdateUser::new(self.handle.clone()) } } pub mod fluent_builders { //! //! Utilities to ergonomically construct a request to the service. //! //! Fluent builders are created through the [`Client`](crate::client::Client) by calling //! one if its operation methods. After parameters are set using the builder methods, //! the `send` method can be called to initiate the request. //! /// Fluent builder constructing a request to `BatchUpdateCluster`. /// /// <p>Apply the service update to a list of clusters supplied. For more information on service updates and applying them, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/managing-updates.html#applying-updates">Applying the service updates</a>.</p> #[derive(std::fmt::Debug)] pub struct BatchUpdateCluster< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::batch_update_cluster_input::Builder, } impl<C, M, R> BatchUpdateCluster<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `BatchUpdateCluster`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::BatchUpdateClusterOutput, aws_smithy_http::result::SdkError<crate::error::BatchUpdateClusterError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::BatchUpdateClusterInputOperationOutputAlias, crate::output::BatchUpdateClusterOutput, crate::error::BatchUpdateClusterError, crate::input::BatchUpdateClusterInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// Appends an item to `ClusterNames`. /// /// To override the contents of this collection use [`set_cluster_names`](Self::set_cluster_names). /// /// <p>The cluster names to apply the updates.</p> pub fn cluster_names(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_names(inp); self } /// <p>The cluster names to apply the updates.</p> pub fn set_cluster_names( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_cluster_names(input); self } /// <p>The unique ID of the service update</p> pub fn service_update(mut self, inp: crate::model::ServiceUpdateRequest) -> Self { self.inner = self.inner.service_update(inp); self } /// <p>The unique ID of the service update</p> pub fn set_service_update( mut self, input: std::option::Option<crate::model::ServiceUpdateRequest>, ) -> Self { self.inner = self.inner.set_service_update(input); self } } /// Fluent builder constructing a request to `CopySnapshot`. /// /// <p>Makes a copy of an existing snapshot.</p> #[derive(std::fmt::Debug)] pub struct CopySnapshot< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::copy_snapshot_input::Builder, } impl<C, M, R> CopySnapshot<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CopySnapshot`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CopySnapshotOutput, aws_smithy_http::result::SdkError<crate::error::CopySnapshotError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CopySnapshotInputOperationOutputAlias, crate::output::CopySnapshotOutput, crate::error::CopySnapshotError, crate::input::CopySnapshotInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of an existing snapshot from which to make a copy.</p> pub fn source_snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.source_snapshot_name(inp); self } /// <p>The name of an existing snapshot from which to make a copy.</p> pub fn set_source_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_source_snapshot_name(input); self } /// <p>A name for the snapshot copy. MemoryDB does not permit overwriting a snapshot, therefore this name must be unique within its context - MemoryDB or an Amazon S3 bucket if exporting.</p> pub fn target_snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.target_snapshot_name(inp); self } /// <p>A name for the snapshot copy. MemoryDB does not permit overwriting a snapshot, therefore this name must be unique within its context - MemoryDB or an Amazon S3 bucket if exporting.</p> pub fn set_target_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_target_snapshot_name(input); self } /// <p>The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access. /// /// When using this parameter to export a snapshot, be sure MemoryDB has the needed permissions to this S3 bucket. For more information, see /// /// <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/snapshots-exporting.html">Step 2: Grant MemoryDB Access to Your Amazon S3 Bucket</a>. /// /// </p> pub fn target_bucket(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.target_bucket(inp); self } /// <p>The Amazon S3 bucket to which the snapshot is exported. This parameter is used only when exporting a snapshot for external access. /// /// When using this parameter to export a snapshot, be sure MemoryDB has the needed permissions to this S3 bucket. For more information, see /// /// <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/snapshots-exporting.html">Step 2: Grant MemoryDB Access to Your Amazon S3 Bucket</a>. /// /// </p> pub fn set_target_bucket( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_target_bucket(input); self } /// <p>The ID of the KMS key used to encrypt the target snapshot.</p> pub fn kms_key_id(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.kms_key_id(inp); self } /// <p>The ID of the KMS key used to encrypt the target snapshot.</p> pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_kms_key_id(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateACL`. /// /// <p>Creates an Access Control List. For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/clusters.acls.html">Authenticating users with Access Contol Lists (ACLs)</a>.</p> #[derive(std::fmt::Debug)] pub struct CreateACL< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_acl_input::Builder, } impl<C, M, R> CreateACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateAclOutput, aws_smithy_http::result::SdkError<crate::error::CreateACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateAclInputOperationOutputAlias, crate::output::CreateAclOutput, crate::error::CreateACLError, crate::input::CreateAclInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the Access Control List.</p> pub fn acl_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.acl_name(inp); self } /// <p>The name of the Access Control List.</p> pub fn set_acl_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_acl_name(input); self } /// Appends an item to `UserNames`. /// /// To override the contents of this collection use [`set_user_names`](Self::set_user_names). /// /// <p>The list of users that belong to the Access Control List.</p> pub fn user_names(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_names(inp); self } /// <p>The list of users that belong to the Access Control List.</p> pub fn set_user_names( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_user_names(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateCluster`. /// /// <p>Creates a cluster. All nodes in the cluster run the same protocol-compliant engine software.</p> #[derive(std::fmt::Debug)] pub struct CreateCluster< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_cluster_input::Builder, } impl<C, M, R> CreateCluster<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateCluster`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateClusterOutput, aws_smithy_http::result::SdkError<crate::error::CreateClusterError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateClusterInputOperationOutputAlias, crate::output::CreateClusterOutput, crate::error::CreateClusterError, crate::input::CreateClusterInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the cluster. This value must be unique as it also serves as the cluster identifier.</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The name of the cluster. This value must be unique as it also serves as the cluster identifier.</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>The compute and memory capacity of the nodes in the cluster.</p> pub fn node_type(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.node_type(inp); self } /// <p>The compute and memory capacity of the nodes in the cluster.</p> pub fn set_node_type(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_node_type(input); self } /// <p>The name of the parameter group associated with the cluster.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of the parameter group associated with the cluster.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// <p>An optional description of the cluster.</p> pub fn description(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(inp); self } /// <p>An optional description of the cluster.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// <p>The number of shards the cluster will contain. The default value is 1. </p> pub fn num_shards(mut self, inp: i32) -> Self { self.inner = self.inner.num_shards(inp); self } /// <p>The number of shards the cluster will contain. The default value is 1. </p> pub fn set_num_shards(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_num_shards(input); self } /// <p>The number of replicas to apply to each shard. The default value is 1. The maximum is 5. </p> pub fn num_replicas_per_shard(mut self, inp: i32) -> Self { self.inner = self.inner.num_replicas_per_shard(inp); self } /// <p>The number of replicas to apply to each shard. The default value is 1. The maximum is 5. </p> pub fn set_num_replicas_per_shard(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_num_replicas_per_shard(input); self } /// <p>The name of the subnet group to be used for the cluster.</p> pub fn subnet_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_group_name(inp); self } /// <p>The name of the subnet group to be used for the cluster.</p> pub fn set_subnet_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_subnet_group_name(input); self } /// Appends an item to `SecurityGroupIds`. /// /// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids). /// /// <p>A list of security group names to associate with this cluster.</p> pub fn security_group_ids(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.security_group_ids(inp); self } /// <p>A list of security group names to associate with this cluster.</p> pub fn set_security_group_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_security_group_ids(input); self } /// <p>Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format <code>ddd:hh24:mi-ddd:hh24:mi</code> (24H Clock UTC). The minimum maintenance window is a 60 minute period.</p> pub fn maintenance_window(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.maintenance_window(inp); self } /// <p>Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format <code>ddd:hh24:mi-ddd:hh24:mi</code> (24H Clock UTC). The minimum maintenance window is a 60 minute period.</p> pub fn set_maintenance_window( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_maintenance_window(input); self } /// <p>The port number on which each of the nodes accepts connections.</p> pub fn port(mut self, inp: i32) -> Self { self.inner = self.inner.port(inp); self } /// <p>The port number on which each of the nodes accepts connections.</p> pub fn set_port(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_port(input); self } /// <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.</p> pub fn sns_topic_arn(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.sns_topic_arn(inp); self } /// <p>The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.</p> pub fn set_sns_topic_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_sns_topic_arn(input); self } /// <p>A flag to enable in-transit encryption on the cluster.</p> pub fn tls_enabled(mut self, inp: bool) -> Self { self.inner = self.inner.tls_enabled(inp); self } /// <p>A flag to enable in-transit encryption on the cluster.</p> pub fn set_tls_enabled(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_tls_enabled(input); self } /// <p>The ID of the KMS key used to encrypt the cluster.</p> pub fn kms_key_id(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.kms_key_id(inp); self } /// <p>The ID of the KMS key used to encrypt the cluster.</p> pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_kms_key_id(input); self } /// Appends an item to `SnapshotArns`. /// /// To override the contents of this collection use [`set_snapshot_arns`](Self::set_snapshot_arns). /// /// <p>A list of Amazon Resource Names (ARN) that uniquely identify the RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new cluster. The Amazon S3 object name in the ARN cannot contain any commas.</p> pub fn snapshot_arns(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_arns(inp); self } /// <p>A list of Amazon Resource Names (ARN) that uniquely identify the RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new cluster. The Amazon S3 object name in the ARN cannot contain any commas.</p> pub fn set_snapshot_arns( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_snapshot_arns(input); self } /// <p>The name of a snapshot from which to restore data into the new cluster. The snapshot status changes to restoring while the new cluster is being created.</p> pub fn snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_name(inp); self } /// <p>The name of a snapshot from which to restore data into the new cluster. The snapshot status changes to restoring while the new cluster is being created.</p> pub fn set_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_snapshot_name(input); self } /// <p>The number of days for which MemoryDB retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> pub fn snapshot_retention_limit(mut self, inp: i32) -> Self { self.inner = self.inner.snapshot_retention_limit(inp); self } /// <p>The number of days for which MemoryDB retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> pub fn set_snapshot_retention_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_snapshot_retention_limit(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } /// <p>The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your shard.</p> /// /// <p> Example: 05:00-09:00</p> /// /// <p> If you do not specify this parameter, MemoryDB automatically chooses an appropriate time range.</p> pub fn snapshot_window(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_window(inp); self } /// <p>The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your shard.</p> /// /// <p> Example: 05:00-09:00</p> /// /// <p> If you do not specify this parameter, MemoryDB automatically chooses an appropriate time range.</p> pub fn set_snapshot_window( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_snapshot_window(input); self } /// <p>The name of the Access Control List to associate with the cluster.</p> pub fn acl_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.acl_name(inp); self } /// <p>The name of the Access Control List to associate with the cluster.</p> pub fn set_acl_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_acl_name(input); self } /// <p>The version number of the Redis engine to be used for the cluster.</p> pub fn engine_version(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.engine_version(inp); self } /// <p>The version number of the Redis engine to be used for the cluster.</p> pub fn set_engine_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_engine_version(input); self } /// <p>When set to true, the cluster will automatically receive minor engine version upgrades after launch.</p> pub fn auto_minor_version_upgrade(mut self, inp: bool) -> Self { self.inner = self.inner.auto_minor_version_upgrade(inp); self } /// <p>When set to true, the cluster will automatically receive minor engine version upgrades after launch.</p> pub fn set_auto_minor_version_upgrade(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_auto_minor_version_upgrade(input); self } } /// Fluent builder constructing a request to `CreateParameterGroup`. /// /// <p>Creates a new MemoryDB parameter group. A parameter group is a collection of parameters and their values that are applied to all of the nodes in any cluster. For /// more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/parametergroups.html">Configuring engine parameters using parameter groups</a>. /// /// </p> #[derive(std::fmt::Debug)] pub struct CreateParameterGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_parameter_group_input::Builder, } impl<C, M, R> CreateParameterGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateParameterGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateParameterGroupOutput, aws_smithy_http::result::SdkError<crate::error::CreateParameterGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateParameterGroupInputOperationOutputAlias, crate::output::CreateParameterGroupOutput, crate::error::CreateParameterGroupError, crate::input::CreateParameterGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the parameter group.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of the parameter group.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// <p>The name of the parameter group family that the parameter group can be used with.</p> pub fn family(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.family(inp); self } /// <p>The name of the parameter group family that the parameter group can be used with.</p> pub fn set_family(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_family(input); self } /// <p>An optional description of the parameter group.</p> pub fn description(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(inp); self } /// <p>An optional description of the parameter group.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateSnapshot`. /// /// <p>Creates a copy of an entire cluster at a specific moment in time.</p> #[derive(std::fmt::Debug)] pub struct CreateSnapshot< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_snapshot_input::Builder, } impl<C, M, R> CreateSnapshot<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateSnapshot`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateSnapshotOutput, aws_smithy_http::result::SdkError<crate::error::CreateSnapshotError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateSnapshotInputOperationOutputAlias, crate::output::CreateSnapshotOutput, crate::error::CreateSnapshotError, crate::input::CreateSnapshotInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The snapshot is created from this cluster.</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The snapshot is created from this cluster.</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>A name for the snapshot being created.</p> pub fn snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_name(inp); self } /// <p>A name for the snapshot being created.</p> pub fn set_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_snapshot_name(input); self } /// <p>The ID of the KMS key used to encrypt the snapshot.</p> pub fn kms_key_id(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.kms_key_id(inp); self } /// <p>The ID of the KMS key used to encrypt the snapshot.</p> pub fn set_kms_key_id(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_kms_key_id(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateSubnetGroup`. /// /// <p>Creates a subnet group. A subnet group is a collection of subnets (typically private) that you can designate for your clusters running in an Amazon Virtual Private Cloud (VPC) environment. /// /// When you create a cluster in an Amazon VPC, you must specify a subnet group. MemoryDB uses that subnet group to choose a subnet and IP addresses within that subnet to associate with your nodes. /// For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/subnetgroups.html">Subnets and subnet groups</a>.</p> #[derive(std::fmt::Debug)] pub struct CreateSubnetGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_subnet_group_input::Builder, } impl<C, M, R> CreateSubnetGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateSubnetGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateSubnetGroupOutput, aws_smithy_http::result::SdkError<crate::error::CreateSubnetGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateSubnetGroupInputOperationOutputAlias, crate::output::CreateSubnetGroupOutput, crate::error::CreateSubnetGroupError, crate::input::CreateSubnetGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the subnet group.</p> pub fn subnet_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_group_name(inp); self } /// <p>The name of the subnet group.</p> pub fn set_subnet_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_subnet_group_name(input); self } /// <p>A description for the subnet group.</p> pub fn description(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(inp); self } /// <p>A description for the subnet group.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// Appends an item to `SubnetIds`. /// /// To override the contents of this collection use [`set_subnet_ids`](Self::set_subnet_ids). /// /// <p>A list of VPC subnet IDs for the subnet group.</p> pub fn subnet_ids(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_ids(inp); self } /// <p>A list of VPC subnet IDs for the subnet group.</p> pub fn set_subnet_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_subnet_ids(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `CreateUser`. /// /// <p>Creates a MemoryDB user. For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/clusters.acls.html">Authenticating users with Access Contol Lists (ACLs)</a>.</p> #[derive(std::fmt::Debug)] pub struct CreateUser< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::create_user_input::Builder, } impl<C, M, R> CreateUser<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `CreateUser`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::CreateUserOutput, aws_smithy_http::result::SdkError<crate::error::CreateUserError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::CreateUserInputOperationOutputAlias, crate::output::CreateUserOutput, crate::error::CreateUserError, crate::input::CreateUserInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the user. This value must be unique as it also serves as the user identifier.</p> pub fn user_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_name(inp); self } /// <p>The name of the user. This value must be unique as it also serves as the user identifier.</p> pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_user_name(input); self } /// <p>Denotes the user's authentication properties, such as whether it requires a password to authenticate.</p> pub fn authentication_mode(mut self, inp: crate::model::AuthenticationMode) -> Self { self.inner = self.inner.authentication_mode(inp); self } /// <p>Denotes the user's authentication properties, such as whether it requires a password to authenticate.</p> pub fn set_authentication_mode( mut self, input: std::option::Option<crate::model::AuthenticationMode>, ) -> Self { self.inner = self.inner.set_authentication_mode(input); self } /// <p>Access permissions string used for this user.</p> pub fn access_string(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.access_string(inp); self } /// <p>Access permissions string used for this user.</p> pub fn set_access_string( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_access_string(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `DeleteACL`. /// /// <p>Deletes an Access Control List. The ACL must first be disassociated from the cluster before it can be deleted. For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/clusters.acls.html">Authenticating users with Access Contol Lists (ACLs)</a>.</p> #[derive(std::fmt::Debug)] pub struct DeleteACL< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_acl_input::Builder, } impl<C, M, R> DeleteACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteAclOutput, aws_smithy_http::result::SdkError<crate::error::DeleteACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteAclInputOperationOutputAlias, crate::output::DeleteAclOutput, crate::error::DeleteACLError, crate::input::DeleteAclInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the Access Control List to delete</p> pub fn acl_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.acl_name(inp); self } /// <p>The name of the Access Control List to delete</p> pub fn set_acl_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_acl_name(input); self } } /// Fluent builder constructing a request to `DeleteCluster`. /// /// <p>Deletes a cluster. It also deletes all associated nodes and node endpoints</p> #[derive(std::fmt::Debug)] pub struct DeleteCluster< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_cluster_input::Builder, } impl<C, M, R> DeleteCluster<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteCluster`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteClusterOutput, aws_smithy_http::result::SdkError<crate::error::DeleteClusterError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteClusterInputOperationOutputAlias, crate::output::DeleteClusterOutput, crate::error::DeleteClusterError, crate::input::DeleteClusterInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the cluster to be deleted</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The name of the cluster to be deleted</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>The user-supplied name of a final cluster snapshot. This is the unique name that identifies the snapshot. MemoryDB creates the snapshot, and then deletes the cluster immediately afterward.</p> pub fn final_snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.final_snapshot_name(inp); self } /// <p>The user-supplied name of a final cluster snapshot. This is the unique name that identifies the snapshot. MemoryDB creates the snapshot, and then deletes the cluster immediately afterward.</p> pub fn set_final_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_final_snapshot_name(input); self } } /// Fluent builder constructing a request to `DeleteParameterGroup`. /// /// <p>Deletes the specified parameter group. You cannot delete a parameter group if it is associated with any clusters. /// You cannot delete the default parameter groups in your account.</p> #[derive(std::fmt::Debug)] pub struct DeleteParameterGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_parameter_group_input::Builder, } impl<C, M, R> DeleteParameterGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteParameterGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteParameterGroupOutput, aws_smithy_http::result::SdkError<crate::error::DeleteParameterGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteParameterGroupInputOperationOutputAlias, crate::output::DeleteParameterGroupOutput, crate::error::DeleteParameterGroupError, crate::input::DeleteParameterGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the parameter group to delete.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of the parameter group to delete.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } } /// Fluent builder constructing a request to `DeleteSnapshot`. /// /// <p>Deletes an existing snapshot. When you receive a successful response from this operation, MemoryDB immediately begins deleting the snapshot; you cannot cancel or revert this operation.</p> #[derive(std::fmt::Debug)] pub struct DeleteSnapshot< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_snapshot_input::Builder, } impl<C, M, R> DeleteSnapshot<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteSnapshot`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteSnapshotOutput, aws_smithy_http::result::SdkError<crate::error::DeleteSnapshotError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteSnapshotInputOperationOutputAlias, crate::output::DeleteSnapshotOutput, crate::error::DeleteSnapshotError, crate::input::DeleteSnapshotInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the snapshot to delete</p> pub fn snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_name(inp); self } /// <p>The name of the snapshot to delete</p> pub fn set_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_snapshot_name(input); self } } /// Fluent builder constructing a request to `DeleteSubnetGroup`. /// /// <p>Deletes a subnet group. You cannot delete a default subnet group or one that is associated with any clusters.</p> #[derive(std::fmt::Debug)] pub struct DeleteSubnetGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_subnet_group_input::Builder, } impl<C, M, R> DeleteSubnetGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteSubnetGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteSubnetGroupOutput, aws_smithy_http::result::SdkError<crate::error::DeleteSubnetGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteSubnetGroupInputOperationOutputAlias, crate::output::DeleteSubnetGroupOutput, crate::error::DeleteSubnetGroupError, crate::input::DeleteSubnetGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the subnet group to delete</p> pub fn subnet_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_group_name(inp); self } /// <p>The name of the subnet group to delete</p> pub fn set_subnet_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_subnet_group_name(input); self } } /// Fluent builder constructing a request to `DeleteUser`. /// /// <p>Deletes a user. The user will be removed from all ACLs and in turn removed from all clusters.</p> #[derive(std::fmt::Debug)] pub struct DeleteUser< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::delete_user_input::Builder, } impl<C, M, R> DeleteUser<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DeleteUser`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DeleteUserOutput, aws_smithy_http::result::SdkError<crate::error::DeleteUserError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DeleteUserInputOperationOutputAlias, crate::output::DeleteUserOutput, crate::error::DeleteUserError, crate::input::DeleteUserInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the user to delete</p> pub fn user_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_name(inp); self } /// <p>The name of the user to delete</p> pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_user_name(input); self } } /// Fluent builder constructing a request to `DescribeACLs`. /// /// <p>Returns a list of ACLs</p> #[derive(std::fmt::Debug)] pub struct DescribeACLs< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_ac_ls_input::Builder, } impl<C, M, R> DescribeACLs<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeACLs`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeAcLsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeACLsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeAcLsInputOperationOutputAlias, crate::output::DescribeAcLsOutput, crate::error::DescribeACLsError, crate::input::DescribeAcLsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the ACL</p> pub fn acl_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.acl_name(inp); self } /// <p>The name of the ACL</p> pub fn set_acl_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_acl_name(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `DescribeClusters`. /// /// <p>Returns information about all provisioned clusters if no cluster identifier is specified, or about a specific cluster if a cluster name is supplied.</p> #[derive(std::fmt::Debug)] pub struct DescribeClusters< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_clusters_input::Builder, } impl<C, M, R> DescribeClusters<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeClusters`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeClustersOutput, aws_smithy_http::result::SdkError<crate::error::DescribeClustersError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeClustersInputOperationOutputAlias, crate::output::DescribeClustersOutput, crate::error::DescribeClustersError, crate::input::DescribeClustersInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the cluster</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The name of the cluster</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>An optional flag that can be included in the request to retrieve information about the individual shard(s).</p> pub fn show_shard_details(mut self, inp: bool) -> Self { self.inner = self.inner.show_shard_details(inp); self } /// <p>An optional flag that can be included in the request to retrieve information about the individual shard(s).</p> pub fn set_show_shard_details(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_show_shard_details(input); self } } /// Fluent builder constructing a request to `DescribeEngineVersions`. /// /// <p>Returns a list of the available Redis engine versions.</p> #[derive(std::fmt::Debug)] pub struct DescribeEngineVersions< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_engine_versions_input::Builder, } impl<C, M, R> DescribeEngineVersions<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeEngineVersions`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeEngineVersionsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeEngineVersionsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeEngineVersionsInputOperationOutputAlias, crate::output::DescribeEngineVersionsOutput, crate::error::DescribeEngineVersionsError, crate::input::DescribeEngineVersionsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Redis engine version</p> pub fn engine_version(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.engine_version(inp); self } /// <p>The Redis engine version</p> pub fn set_engine_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_engine_version(input); self } /// <p>The name of a specific parameter group family to return details for.</p> pub fn parameter_group_family(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_family(inp); self } /// <p>The name of a specific parameter group family to return details for.</p> pub fn set_parameter_group_family( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_family(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned.</p> pub fn default_only(mut self, inp: bool) -> Self { self.inner = self.inner.default_only(inp); self } /// <p>If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned.</p> pub fn set_default_only(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_default_only(input); self } } /// Fluent builder constructing a request to `DescribeEvents`. /// /// <p>Returns events related to clusters, security groups, and parameter groups. You can obtain events specific to a particular cluster, security group, or parameter group by providing the name as a parameter. /// /// By default, only the events occurring within the last hour are returned; however, you can retrieve up to 14 days' worth of events if necessary.</p> #[derive(std::fmt::Debug)] pub struct DescribeEvents< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_events_input::Builder, } impl<C, M, R> DescribeEvents<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeEvents`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeEventsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeEventsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeEventsInputOperationOutputAlias, crate::output::DescribeEventsOutput, crate::error::DescribeEventsError, crate::input::DescribeEventsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The identifier of the event source for which events are returned. If not specified, all sources are included in the response.</p> pub fn source_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.source_name(inp); self } /// <p>The identifier of the event source for which events are returned. If not specified, all sources are included in the response.</p> pub fn set_source_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_source_name(input); self } /// <p>The event source to retrieve events for. If no value is specified, all events are returned.</p> pub fn source_type(mut self, inp: crate::model::SourceType) -> Self { self.inner = self.inner.source_type(inp); self } /// <p>The event source to retrieve events for. If no value is specified, all events are returned.</p> pub fn set_source_type( mut self, input: std::option::Option<crate::model::SourceType>, ) -> Self { self.inner = self.inner.set_source_type(input); self } /// <p>The beginning of the time interval to retrieve events for, specified in ISO 8601 format. /// /// Example: 2017-03-30T07:03:49.555Z</p> pub fn start_time(mut self, inp: aws_smithy_types::DateTime) -> Self { self.inner = self.inner.start_time(inp); self } /// <p>The beginning of the time interval to retrieve events for, specified in ISO 8601 format. /// /// Example: 2017-03-30T07:03:49.555Z</p> pub fn set_start_time( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.inner = self.inner.set_start_time(input); self } /// <p>The end of the time interval for which to retrieve events, specified in ISO 8601 format. /// /// Example: 2017-03-30T07:03:49.555Z</p> pub fn end_time(mut self, inp: aws_smithy_types::DateTime) -> Self { self.inner = self.inner.end_time(inp); self } /// <p>The end of the time interval for which to retrieve events, specified in ISO 8601 format. /// /// Example: 2017-03-30T07:03:49.555Z</p> pub fn set_end_time( mut self, input: std::option::Option<aws_smithy_types::DateTime>, ) -> Self { self.inner = self.inner.set_end_time(input); self } /// <p>The number of minutes worth of events to retrieve.</p> pub fn duration(mut self, inp: i32) -> Self { self.inner = self.inner.duration(inp); self } /// <p>The number of minutes worth of events to retrieve.</p> pub fn set_duration(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_duration(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `DescribeParameterGroups`. /// /// <p>Returns a list of parameter group descriptions. If a parameter group name is specified, the list contains only the descriptions for that group.</p> #[derive(std::fmt::Debug)] pub struct DescribeParameterGroups< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_parameter_groups_input::Builder, } impl<C, M, R> DescribeParameterGroups<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeParameterGroups`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeParameterGroupsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeParameterGroupsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeParameterGroupsInputOperationOutputAlias, crate::output::DescribeParameterGroupsOutput, crate::error::DescribeParameterGroupsError, crate::input::DescribeParameterGroupsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of a specific parameter group to return details for.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of a specific parameter group to return details for.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `DescribeParameters`. /// /// <p>Returns the detailed parameter list for a particular parameter group.</p> #[derive(std::fmt::Debug)] pub struct DescribeParameters< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_parameters_input::Builder, } impl<C, M, R> DescribeParameters<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeParameters`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeParametersOutput, aws_smithy_http::result::SdkError<crate::error::DescribeParametersError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeParametersInputOperationOutputAlias, crate::output::DescribeParametersOutput, crate::error::DescribeParametersError, crate::input::DescribeParametersInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>he name of a specific parameter group to return details for.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>he name of a specific parameter group to return details for.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `DescribeServiceUpdates`. /// /// <p>Returns details of the service updates</p> #[derive(std::fmt::Debug)] pub struct DescribeServiceUpdates< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_service_updates_input::Builder, } impl<C, M, R> DescribeServiceUpdates<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeServiceUpdates`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeServiceUpdatesOutput, aws_smithy_http::result::SdkError<crate::error::DescribeServiceUpdatesError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeServiceUpdatesInputOperationOutputAlias, crate::output::DescribeServiceUpdatesOutput, crate::error::DescribeServiceUpdatesError, crate::input::DescribeServiceUpdatesInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The unique ID of the service update to describe.</p> pub fn service_update_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.service_update_name(inp); self } /// <p>The unique ID of the service update to describe.</p> pub fn set_service_update_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_service_update_name(input); self } /// Appends an item to `ClusterNames`. /// /// To override the contents of this collection use [`set_cluster_names`](Self::set_cluster_names). /// /// <p>The list of cluster names to identify service updates to apply</p> pub fn cluster_names(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_names(inp); self } /// <p>The list of cluster names to identify service updates to apply</p> pub fn set_cluster_names( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_cluster_names(input); self } /// Appends an item to `Status`. /// /// To override the contents of this collection use [`set_status`](Self::set_status). /// /// <p>The status(es) of the service updates to filter on</p> pub fn status(mut self, inp: impl Into<crate::model::ServiceUpdateStatus>) -> Self { self.inner = self.inner.status(inp); self } /// <p>The status(es) of the service updates to filter on</p> pub fn set_status( mut self, input: std::option::Option<std::vec::Vec<crate::model::ServiceUpdateStatus>>, ) -> Self { self.inner = self.inner.set_status(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `DescribeSnapshots`. /// /// <p>Returns information about cluster snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, /// or just the snapshots associated with a particular cluster.</p> #[derive(std::fmt::Debug)] pub struct DescribeSnapshots< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_snapshots_input::Builder, } impl<C, M, R> DescribeSnapshots<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeSnapshots`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeSnapshotsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeSnapshotsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeSnapshotsInputOperationOutputAlias, crate::output::DescribeSnapshotsOutput, crate::error::DescribeSnapshotsError, crate::input::DescribeSnapshotsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>A user-supplied cluster identifier. If this parameter is specified, only snapshots associated with that specific cluster are described.</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>A user-supplied cluster identifier. If this parameter is specified, only snapshots associated with that specific cluster are described.</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>A user-supplied name of the snapshot. If this parameter is specified, only this named snapshot is described.</p> pub fn snapshot_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_name(inp); self } /// <p>A user-supplied name of the snapshot. If this parameter is specified, only this named snapshot is described.</p> pub fn set_snapshot_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_snapshot_name(input); self } /// <p>If set to system, the output shows snapshots that were automatically created by MemoryDB. If set to user the output shows snapshots that were manually created. If omitted, the output shows both automatically and manually created snapshots.</p> pub fn source(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.source(inp); self } /// <p>If set to system, the output shows snapshots that were automatically created by MemoryDB. If set to user the output shows snapshots that were manually created. If omitted, the output shows both automatically and manually created snapshots.</p> pub fn set_source(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_source(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>A Boolean value which if true, the shard configuration is included in the snapshot description.</p> pub fn show_detail(mut self, inp: bool) -> Self { self.inner = self.inner.show_detail(inp); self } /// <p>A Boolean value which if true, the shard configuration is included in the snapshot description.</p> pub fn set_show_detail(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_show_detail(input); self } } /// Fluent builder constructing a request to `DescribeSubnetGroups`. /// /// <p>Returns a list of subnet group descriptions. If a subnet group name is specified, the list contains only the description of that group.</p> #[derive(std::fmt::Debug)] pub struct DescribeSubnetGroups< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_subnet_groups_input::Builder, } impl<C, M, R> DescribeSubnetGroups<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeSubnetGroups`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeSubnetGroupsOutput, aws_smithy_http::result::SdkError<crate::error::DescribeSubnetGroupsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeSubnetGroupsInputOperationOutputAlias, crate::output::DescribeSubnetGroupsOutput, crate::error::DescribeSubnetGroupsError, crate::input::DescribeSubnetGroupsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the subnet group to return details for.</p> pub fn subnet_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_group_name(inp); self } /// <p>The name of the subnet group to return details for.</p> pub fn set_subnet_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_subnet_group_name(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `DescribeUsers`. /// /// <p>Returns a list of users.</p> #[derive(std::fmt::Debug)] pub struct DescribeUsers< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::describe_users_input::Builder, } impl<C, M, R> DescribeUsers<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `DescribeUsers`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::DescribeUsersOutput, aws_smithy_http::result::SdkError<crate::error::DescribeUsersError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::DescribeUsersInputOperationOutputAlias, crate::output::DescribeUsersOutput, crate::error::DescribeUsersError, crate::input::DescribeUsersInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the user</p> pub fn user_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_name(inp); self } /// <p>The name of the user</p> pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_user_name(input); self } /// Appends an item to `Filters`. /// /// To override the contents of this collection use [`set_filters`](Self::set_filters). /// /// <p>Filter to determine the list of users to return.</p> pub fn filters(mut self, inp: impl Into<crate::model::Filter>) -> Self { self.inner = self.inner.filters(inp); self } /// <p>Filter to determine the list of users to return.</p> pub fn set_filters( mut self, input: std::option::Option<std::vec::Vec<crate::model::Filter>>, ) -> Self { self.inner = self.inner.set_filters(input); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn max_results(mut self, inp: i32) -> Self { self.inner = self.inner.max_results(inp); self } /// <p>The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_max_results(input); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.next_token(inp); self } /// <p>An optional argument to pass in case the total number of records exceeds the value of MaxResults. If nextToken is returned, there are more results available. The value of nextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. </p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_next_token(input); self } } /// Fluent builder constructing a request to `FailoverShard`. /// /// <p>Used to failover a shard</p> #[derive(std::fmt::Debug)] pub struct FailoverShard< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::failover_shard_input::Builder, } impl<C, M, R> FailoverShard<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `FailoverShard`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::FailoverShardOutput, aws_smithy_http::result::SdkError<crate::error::FailoverShardError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::FailoverShardInputOperationOutputAlias, crate::output::FailoverShardOutput, crate::error::FailoverShardError, crate::input::FailoverShardInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The cluster being failed over</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The cluster being failed over</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>The name of the shard</p> pub fn shard_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.shard_name(inp); self } /// <p>The name of the shard</p> pub fn set_shard_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_shard_name(input); self } } /// Fluent builder constructing a request to `ListAllowedNodeTypeUpdates`. /// /// <p>Lists all available node types that you can scale to from your cluster's current node type. /// /// When you use the UpdateCluster operation to scale your cluster, the value of the NodeType parameter must be one of the node types returned by this operation.</p> #[derive(std::fmt::Debug)] pub struct ListAllowedNodeTypeUpdates< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_allowed_node_type_updates_input::Builder, } impl<C, M, R> ListAllowedNodeTypeUpdates<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListAllowedNodeTypeUpdates`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListAllowedNodeTypeUpdatesOutput, aws_smithy_http::result::SdkError<crate::error::ListAllowedNodeTypeUpdatesError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListAllowedNodeTypeUpdatesInputOperationOutputAlias, crate::output::ListAllowedNodeTypeUpdatesOutput, crate::error::ListAllowedNodeTypeUpdatesError, crate::input::ListAllowedNodeTypeUpdatesInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the cluster you want to scale. MemoryDB uses the cluster name to identify the current node type being used by this cluster, and from that to create a list of node types /// you can scale up to.</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The name of the cluster you want to scale. MemoryDB uses the cluster name to identify the current node type being used by this cluster, and from that to create a list of node types /// you can scale up to.</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } } /// Fluent builder constructing a request to `ListTags`. /// /// <p>Lists all tags currently on a named resource. /// /// A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track your MemoryDB resources. /// For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/Tagging-Resources.html">Tagging your MemoryDB resources</a> /// </p> #[derive(std::fmt::Debug)] pub struct ListTags< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::list_tags_input::Builder, } impl<C, M, R> ListTags<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ListTags`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ListTagsOutput, aws_smithy_http::result::SdkError<crate::error::ListTagsError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ListTagsInputOperationOutputAlias, crate::output::ListTagsOutput, crate::error::ListTagsError, crate::input::ListTagsInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the resource for which you want the list of tags</p> pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(inp); self } /// <p>The Amazon Resource Name (ARN) of the resource for which you want the list of tags</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } } /// Fluent builder constructing a request to `ResetParameterGroup`. /// /// <p>Modifies the parameters of a parameter group to the engine or system default value. You can reset specific parameters by submitting a list of parameter names. To reset the entire parameter group, specify the AllParameters and ParameterGroupName parameters.</p> #[derive(std::fmt::Debug)] pub struct ResetParameterGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::reset_parameter_group_input::Builder, } impl<C, M, R> ResetParameterGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `ResetParameterGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::ResetParameterGroupOutput, aws_smithy_http::result::SdkError<crate::error::ResetParameterGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::ResetParameterGroupInputOperationOutputAlias, crate::output::ResetParameterGroupOutput, crate::error::ResetParameterGroupError, crate::input::ResetParameterGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the parameter group to reset.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of the parameter group to reset.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// <p>If true, all parameters in the parameter group are reset to their default values. If false, only the parameters listed by ParameterNames are reset to their default values.</p> pub fn all_parameters(mut self, inp: bool) -> Self { self.inner = self.inner.all_parameters(inp); self } /// <p>If true, all parameters in the parameter group are reset to their default values. If false, only the parameters listed by ParameterNames are reset to their default values.</p> pub fn set_all_parameters(mut self, input: std::option::Option<bool>) -> Self { self.inner = self.inner.set_all_parameters(input); self } /// Appends an item to `ParameterNames`. /// /// To override the contents of this collection use [`set_parameter_names`](Self::set_parameter_names). /// /// <p>An array of parameter names to reset to their default values. If AllParameters is true, do not use ParameterNames. If AllParameters is false, you must specify the name of at least one parameter to reset.</p> pub fn parameter_names(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_names(inp); self } /// <p>An array of parameter names to reset to their default values. If AllParameters is true, do not use ParameterNames. If AllParameters is false, you must specify the name of at least one parameter to reset.</p> pub fn set_parameter_names( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_parameter_names(input); self } } /// Fluent builder constructing a request to `TagResource`. /// /// <p>A tag is a key-value pair where the key and value are case-sensitive. You can use tags to categorize and track all your MemoryDB resources. /// /// When you add or remove tags on clusters, those actions will be replicated to all nodes in the cluster. For more information, see /// /// <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/iam.resourcelevelpermissions.html">Resource-level permissions</a>.</p> /// /// <p>For example, you can use cost-allocation tags to your MemoryDB resources, Amazon generates a cost allocation report as a comma-separated value /// (CSV) file with your usage and costs aggregated by your tags. You can apply tags that represent business categories /// (such as cost centers, application names, or owners) to organize your costs across multiple services. /// /// For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/tagging.html">Using Cost Allocation Tags</a>.</p> #[derive(std::fmt::Debug)] pub struct TagResource< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::tag_resource_input::Builder, } impl<C, M, R> TagResource<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `TagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::TagResourceOutput, aws_smithy_http::result::SdkError<crate::error::TagResourceError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::TagResourceInputOperationOutputAlias, crate::output::TagResourceOutput, crate::error::TagResourceError, crate::input::TagResourceInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the resource to which the tags are to be added</p> pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(inp); self } /// <p>The Amazon Resource Name (ARN) of the resource to which the tags are to be added</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `Tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn tags(mut self, inp: impl Into<crate::model::Tag>) -> Self { self.inner = self.inner.tags(inp); self } /// <p>A list of tags to be added to this resource. A tag is a key-value pair. A tag key must be accompanied by a tag value, although null is accepted.</p> pub fn set_tags( mut self, input: std::option::Option<std::vec::Vec<crate::model::Tag>>, ) -> Self { self.inner = self.inner.set_tags(input); self } } /// Fluent builder constructing a request to `UntagResource`. /// /// <p>Use this operation to remove tags on a resource</p> #[derive(std::fmt::Debug)] pub struct UntagResource< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::untag_resource_input::Builder, } impl<C, M, R> UntagResource<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UntagResource`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UntagResourceOutput, aws_smithy_http::result::SdkError<crate::error::UntagResourceError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UntagResourceInputOperationOutputAlias, crate::output::UntagResourceOutput, crate::error::UntagResourceError, crate::input::UntagResourceInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The Amazon Resource Name (ARN) of the resource to which the tags are to be removed</p> pub fn resource_arn(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.resource_arn(inp); self } /// <p>The Amazon Resource Name (ARN) of the resource to which the tags are to be removed</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_resource_arn(input); self } /// Appends an item to `TagKeys`. /// /// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys). /// /// <p>The list of keys of the tags that are to be removed</p> pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.tag_keys(inp); self } /// <p>The list of keys of the tags that are to be removed</p> pub fn set_tag_keys( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_tag_keys(input); self } } /// Fluent builder constructing a request to `UpdateACL`. /// /// <p>Changes the list of users that belong to the Access Control List.</p> #[derive(std::fmt::Debug)] pub struct UpdateACL< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_acl_input::Builder, } impl<C, M, R> UpdateACL<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateACL`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateAclOutput, aws_smithy_http::result::SdkError<crate::error::UpdateACLError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateAclInputOperationOutputAlias, crate::output::UpdateAclOutput, crate::error::UpdateACLError, crate::input::UpdateAclInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the Access Control List</p> pub fn acl_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.acl_name(inp); self } /// <p>The name of the Access Control List</p> pub fn set_acl_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_acl_name(input); self } /// Appends an item to `UserNamesToAdd`. /// /// To override the contents of this collection use [`set_user_names_to_add`](Self::set_user_names_to_add). /// /// <p>The list of users to add to the Access Control List</p> pub fn user_names_to_add(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_names_to_add(inp); self } /// <p>The list of users to add to the Access Control List</p> pub fn set_user_names_to_add( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_user_names_to_add(input); self } /// Appends an item to `UserNamesToRemove`. /// /// To override the contents of this collection use [`set_user_names_to_remove`](Self::set_user_names_to_remove). /// /// <p>The list of users to remove from the Access Control List</p> pub fn user_names_to_remove(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_names_to_remove(inp); self } /// <p>The list of users to remove from the Access Control List</p> pub fn set_user_names_to_remove( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_user_names_to_remove(input); self } } /// Fluent builder constructing a request to `UpdateCluster`. /// /// <p>Modifies the settings for a cluster. You can use this operation to change one or more cluster configuration settings by specifying the settings and the new values.</p> #[derive(std::fmt::Debug)] pub struct UpdateCluster< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_cluster_input::Builder, } impl<C, M, R> UpdateCluster<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateCluster`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateClusterOutput, aws_smithy_http::result::SdkError<crate::error::UpdateClusterError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateClusterInputOperationOutputAlias, crate::output::UpdateClusterOutput, crate::error::UpdateClusterError, crate::input::UpdateClusterInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the cluster to update</p> pub fn cluster_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.cluster_name(inp); self } /// <p>The name of the cluster to update</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_cluster_name(input); self } /// <p>The description of the cluster to update</p> pub fn description(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(inp); self } /// <p>The description of the cluster to update</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// Appends an item to `SecurityGroupIds`. /// /// To override the contents of this collection use [`set_security_group_ids`](Self::set_security_group_ids). /// /// <p>The SecurityGroupIds to update</p> pub fn security_group_ids(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.security_group_ids(inp); self } /// <p>The SecurityGroupIds to update</p> pub fn set_security_group_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_security_group_ids(input); self } /// <p>The maintenance window to update</p> pub fn maintenance_window(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.maintenance_window(inp); self } /// <p>The maintenance window to update</p> pub fn set_maintenance_window( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_maintenance_window(input); self } /// <p>The SNS topic ARN to update</p> pub fn sns_topic_arn(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.sns_topic_arn(inp); self } /// <p>The SNS topic ARN to update</p> pub fn set_sns_topic_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_sns_topic_arn(input); self } /// <p>The status of the Amazon SNS notification topic. Notifications are sent only if the status is active.</p> pub fn sns_topic_status(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.sns_topic_status(inp); self } /// <p>The status of the Amazon SNS notification topic. Notifications are sent only if the status is active.</p> pub fn set_sns_topic_status( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_sns_topic_status(input); self } /// <p>The name of the parameter group to update</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of the parameter group to update</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// <p>The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your cluster.</p> pub fn snapshot_window(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.snapshot_window(inp); self } /// <p>The daily time range (in UTC) during which MemoryDB begins taking a daily snapshot of your cluster.</p> pub fn set_snapshot_window( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_snapshot_window(input); self } /// <p>The number of days for which MemoryDB retains automatic cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> pub fn snapshot_retention_limit(mut self, inp: i32) -> Self { self.inner = self.inner.snapshot_retention_limit(inp); self } /// <p>The number of days for which MemoryDB retains automatic cluster snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.</p> pub fn set_snapshot_retention_limit(mut self, input: std::option::Option<i32>) -> Self { self.inner = self.inner.set_snapshot_retention_limit(input); self } /// <p>A valid node type that you want to scale this cluster up or down to.</p> pub fn node_type(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.node_type(inp); self } /// <p>A valid node type that you want to scale this cluster up or down to.</p> pub fn set_node_type(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_node_type(input); self } /// <p>The upgraded version of the engine to be run on the nodes. You can upgrade to a newer engine version, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.</p> pub fn engine_version(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.engine_version(inp); self } /// <p>The upgraded version of the engine to be run on the nodes. You can upgrade to a newer engine version, but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version.</p> pub fn set_engine_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_engine_version(input); self } /// <p>The number of replicas that will reside in each shard</p> pub fn replica_configuration( mut self, inp: crate::model::ReplicaConfigurationRequest, ) -> Self { self.inner = self.inner.replica_configuration(inp); self } /// <p>The number of replicas that will reside in each shard</p> pub fn set_replica_configuration( mut self, input: std::option::Option<crate::model::ReplicaConfigurationRequest>, ) -> Self { self.inner = self.inner.set_replica_configuration(input); self } /// <p>The number of shards in the cluster</p> pub fn shard_configuration(mut self, inp: crate::model::ShardConfigurationRequest) -> Self { self.inner = self.inner.shard_configuration(inp); self } /// <p>The number of shards in the cluster</p> pub fn set_shard_configuration( mut self, input: std::option::Option<crate::model::ShardConfigurationRequest>, ) -> Self { self.inner = self.inner.set_shard_configuration(input); self } /// <p>The Access Control List that is associated with the cluster</p> pub fn acl_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.acl_name(inp); self } /// <p>The Access Control List that is associated with the cluster</p> pub fn set_acl_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_acl_name(input); self } } /// Fluent builder constructing a request to `UpdateParameterGroup`. /// /// <p>Updates the parameters of a parameter group. You can modify up to 20 parameters in a single request by submitting a list parameter name and value pairs.</p> #[derive(std::fmt::Debug)] pub struct UpdateParameterGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_parameter_group_input::Builder, } impl<C, M, R> UpdateParameterGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateParameterGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateParameterGroupOutput, aws_smithy_http::result::SdkError<crate::error::UpdateParameterGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateParameterGroupInputOperationOutputAlias, crate::output::UpdateParameterGroupOutput, crate::error::UpdateParameterGroupError, crate::input::UpdateParameterGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the parameter group to update.</p> pub fn parameter_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.parameter_group_name(inp); self } /// <p>The name of the parameter group to update.</p> pub fn set_parameter_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_parameter_group_name(input); self } /// Appends an item to `ParameterNameValues`. /// /// To override the contents of this collection use [`set_parameter_name_values`](Self::set_parameter_name_values). /// /// <p>An array of parameter names and values for the parameter update. You must supply at least one parameter name and value; subsequent arguments are optional. A maximum of 20 parameters may be updated per request.</p> pub fn parameter_name_values( mut self, inp: impl Into<crate::model::ParameterNameValue>, ) -> Self { self.inner = self.inner.parameter_name_values(inp); self } /// <p>An array of parameter names and values for the parameter update. You must supply at least one parameter name and value; subsequent arguments are optional. A maximum of 20 parameters may be updated per request.</p> pub fn set_parameter_name_values( mut self, input: std::option::Option<std::vec::Vec<crate::model::ParameterNameValue>>, ) -> Self { self.inner = self.inner.set_parameter_name_values(input); self } } /// Fluent builder constructing a request to `UpdateSubnetGroup`. /// /// <p>Updates a subnet group. For more information, see <a href="https://docs.aws.amazon.com/MemoryDB/latest/devguide/ubnetGroups.Modifying.html">Updating a subnet group</a> /// </p> #[derive(std::fmt::Debug)] pub struct UpdateSubnetGroup< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_subnet_group_input::Builder, } impl<C, M, R> UpdateSubnetGroup<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateSubnetGroup`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateSubnetGroupOutput, aws_smithy_http::result::SdkError<crate::error::UpdateSubnetGroupError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateSubnetGroupInputOperationOutputAlias, crate::output::UpdateSubnetGroupOutput, crate::error::UpdateSubnetGroupError, crate::input::UpdateSubnetGroupInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the subnet group</p> pub fn subnet_group_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_group_name(inp); self } /// <p>The name of the subnet group</p> pub fn set_subnet_group_name( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_subnet_group_name(input); self } /// <p>A description of the subnet group</p> pub fn description(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.description(inp); self } /// <p>A description of the subnet group</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_description(input); self } /// Appends an item to `SubnetIds`. /// /// To override the contents of this collection use [`set_subnet_ids`](Self::set_subnet_ids). /// /// <p>The EC2 subnet IDs for the subnet group.</p> pub fn subnet_ids(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.subnet_ids(inp); self } /// <p>The EC2 subnet IDs for the subnet group.</p> pub fn set_subnet_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.inner = self.inner.set_subnet_ids(input); self } } /// Fluent builder constructing a request to `UpdateUser`. /// /// <p>Changes user password(s) and/or access string.</p> #[derive(std::fmt::Debug)] pub struct UpdateUser< C = aws_smithy_client::erase::DynConnector, M = aws_hyper::AwsMiddleware, R = aws_smithy_client::retry::Standard, > { handle: std::sync::Arc<super::Handle<C, M, R>>, inner: crate::input::update_user_input::Builder, } impl<C, M, R> UpdateUser<C, M, R> where C: aws_smithy_client::bounds::SmithyConnector, M: aws_smithy_client::bounds::SmithyMiddleware<C>, R: aws_smithy_client::retry::NewRequestPolicy, { /// Creates a new `UpdateUser`. pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self { Self { handle, inner: Default::default(), } } /// Sends the request and returns the response. /// /// If an error occurs, an `SdkError` will be returned with additional details that /// can be matched against. /// /// By default, any retryable failures will be retried twice. Retry behavior /// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be /// set when configuring the client. pub async fn send( self, ) -> std::result::Result< crate::output::UpdateUserOutput, aws_smithy_http::result::SdkError<crate::error::UpdateUserError>, > where R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy< crate::input::UpdateUserInputOperationOutputAlias, crate::output::UpdateUserOutput, crate::error::UpdateUserError, crate::input::UpdateUserInputOperationRetryAlias, >, { let input = self.inner.build().map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; let op = input .make_operation(&self.handle.conf) .await .map_err(|err| { aws_smithy_http::result::SdkError::ConstructionFailure(err.into()) })?; self.handle.client.call(op).await } /// <p>The name of the user</p> pub fn user_name(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.user_name(inp); self } /// <p>The name of the user</p> pub fn set_user_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.inner = self.inner.set_user_name(input); self } /// <p>Denotes the user's authentication properties, such as whether it requires a password to authenticate.</p> pub fn authentication_mode(mut self, inp: crate::model::AuthenticationMode) -> Self { self.inner = self.inner.authentication_mode(inp); self } /// <p>Denotes the user's authentication properties, such as whether it requires a password to authenticate.</p> pub fn set_authentication_mode( mut self, input: std::option::Option<crate::model::AuthenticationMode>, ) -> Self { self.inner = self.inner.set_authentication_mode(input); self } /// <p>Access permissions string used for this user.</p> pub fn access_string(mut self, inp: impl Into<std::string::String>) -> Self { self.inner = self.inner.access_string(inp); self } /// <p>Access permissions string used for this user.</p> pub fn set_access_string( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.inner = self.inner.set_access_string(input); self } } } impl<C> Client<C, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard> { /// Creates a client with the given service config and connector override. pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut client = aws_hyper::Client::new(conn) .with_retry_config(retry_config.into()) .with_timeout_config(timeout_config); client.set_sleep_impl(sleep_impl); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } } impl Client< aws_smithy_client::erase::DynConnector, aws_hyper::AwsMiddleware, aws_smithy_client::retry::Standard, > { /// Creates a new client from a shared config. #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn new(config: &aws_types::config::Config) -> Self { Self::from_conf(config.into()) } /// Creates a new client from the service [`Config`](crate::Config). #[cfg(any(feature = "rustls", feature = "native-tls"))] pub fn from_conf(conf: crate::Config) -> Self { let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default(); let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default(); let sleep_impl = conf.sleep_impl.clone(); let mut client = aws_hyper::Client::https() .with_retry_config(retry_config.into()) .with_timeout_config(timeout_config); client.set_sleep_impl(sleep_impl); Self { handle: std::sync::Arc::new(Handle { client, conf }), } } }
47.44186
352
0.601698
69a31518d1fab96960b5c5536394be6b71891002
5,687
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use lazy_static; use libra_metrics::{Histogram, IntGauge, OpMetrics}; use prometheus::{HistogramVec, IntCounterVec, IntGaugeVec}; lazy_static::lazy_static! { pub static ref LIBRA_NETWORK_PEERS: IntGaugeVec = register_int_gauge_vec!( // metric name "libra_network_peers", // metric description "Libra network peers counter", // metric labels (dimensions) &["role_type", "state"] ).unwrap(); pub static ref LIBRA_NETWORK_RPC_MESSAGES: IntCounterVec = register_int_counter_vec!( "libra_network_rpc_messages", "Libra network rpc messages counter", &["type", "state"] ).unwrap(); pub static ref LIBRA_NETWORK_RPC_BYTES: HistogramVec = register_histogram_vec!( "libra_network_rpc_bytes", "Libra network rpc bytes histogram", &["type", "state"] ).unwrap(); pub static ref LIBRA_NETWORK_RPC_LATENCY: Histogram = register_histogram!( "libra_network_rpc_latency_seconds", "Libra network rpc latency histogram" ).unwrap(); pub static ref LIBRA_NETWORK_DIRECT_SEND_MESSAGES: IntCounterVec = register_int_counter_vec!( "libra_network_direct_send_messages", "Libra network direct send messages counter", &["state"] ).unwrap(); pub static ref LIBRA_NETWORK_DIRECT_SEND_BYTES: HistogramVec = register_histogram_vec!( "libra_network_direct_send_bytes", "Libra network direct send bytes histogram", &["state"] ).unwrap(); } lazy_static::lazy_static! { pub static ref OP_COUNTERS: OpMetrics = OpMetrics::new_and_registered("network"); } lazy_static::lazy_static! { /// /// Channel Counters /// /// Counter of pending requests in Network Provider pub static ref PENDING_NETWORK_REQUESTS: IntGauge = OP_COUNTERS.gauge("pending_network_requests"); /// Counter of pending network events to Mempool pub static ref PENDING_MEMPOOL_NETWORK_EVENTS: IntGauge = OP_COUNTERS.gauge("pending_mempool_network_events"); /// Counter of pending network events to Consensus pub static ref PENDING_CONSENSUS_NETWORK_EVENTS: IntGauge = OP_COUNTERS.gauge("pending_consensus_network_events"); /// Counter of pending network events to State Synchronizer pub static ref PENDING_STATE_SYNCHRONIZER_NETWORK_EVENTS: IntGauge = OP_COUNTERS.gauge("pending_state_sync_network_events"); /// Counter of pending network events to Admission Control pub static ref PENDING_ADMISSION_CONTROL_NETWORK_EVENTS: IntGauge = OP_COUNTERS.gauge("pending_admission_control_network_events"); /// Counter of pending requests in Peer Manager pub static ref PENDING_PEER_MANAGER_REQUESTS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_requests"); /// Counter of pending Peer Manager notifications in Network Provider pub static ref PENDING_PEER_MANAGER_NET_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_net_notifications"); /// Counter of pending requests in Direct Send pub static ref PENDING_DIRECT_SEND_REQUESTS: IntGauge = OP_COUNTERS.gauge("pending_direct_send_requests"); /// Counter of pending Direct Send notifications to Network Provider pub static ref PENDING_DIRECT_SEND_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_direct_send_notifications"); /// Counter of pending requests in Connectivity Manager pub static ref PENDING_CONNECTIVITY_MANAGER_REQUESTS: IntGauge = OP_COUNTERS.gauge("pending_connectivity_manager_requests"); /// Counter of pending requests in RPC pub static ref PENDING_RPC_REQUESTS: IntGauge = OP_COUNTERS.gauge("pending_rpc_requests"); /// Counter of pending RPC notifications to Network Provider pub static ref PENDING_RPC_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_rpc_notifications"); /// Counter of pending Peer Manager notifications to Direct Send pub static ref PENDING_PEER_MANAGER_DIRECT_SEND_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_direct_send_notifications"); /// Counter of pending Peer Manager notifications to RPC pub static ref PENDING_PEER_MANAGER_RPC_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_rpc_notifications"); /// Counter of pending Peer Manager notifications to Discovery pub static ref PENDING_PEER_MANAGER_DISCOVERY_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_discovery_notifications"); /// Counter of pending Peer Manager notifications to Ping pub static ref PENDING_PEER_MANAGER_PING_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_ping_notifications"); /// Counter of pending Peer Manager notifications to Connectivity Manager pub static ref PENDING_PEER_MANAGER_CONNECTIVITY_MANAGER_NOTIFICATIONS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_connectivity_manager_notifications"); /// Counter of pending internal events in Peer Manager pub static ref PENDING_PEER_MANAGER_INTERNAL_EVENTS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_internal_events"); /// Counter of pending dial requests in Peer Manager pub static ref PENDING_PEER_MANAGER_DIAL_REQUESTS: IntGauge = OP_COUNTERS.gauge("pending_peer_manager_dial_requests"); /// Counter of pending requests for each remote peer pub static ref PENDING_PEER_REQUESTS: &'static str = "pending_peer_requests"; /// Counter of pending outbound messages in Direct Send for each remote peer pub static ref PENDING_DIRECT_SEND_OUTBOUND_MESSAGES: &'static str = "pending_direct_send_outbound_messages"; }
47.391667
164
0.767188
91cdaaed27222788c68eaae36550ebb20371b230
56
pub mod decode; pub mod encode; #[cfg(test)] mod tests;
11.2
15
0.696429
4b484e374c1b90587c9fdb7921c2800fdec6ed87
12,749
//! The non-connection-oriented interface to send and receive messages //! (whether they be "clients" or "servers"). //! //! ISteamNetworkingSockets is connection-oriented (like TCP), meaning you //! need to listen and connect, and then you send messages using a connection //! handle. ISteamNetworkingMessages is more like UDP, in that you can just send //! messages to arbitrary peers at any time. The underlying connections are //! established implicitly. //! //! Under the hood ISteamNetworkingMessages works on top of the ISteamNetworkingSockets //! code, so you get the same routing and messaging efficiency. The difference is //! mainly in your responsibility to explicitly establish a connection and //! the type of feedback you get about the state of the connection. Both //! interfaces can do "P2P" communications, and both support both unreliable //! and reliable messages, fragmentation and reassembly. //! //! The primary purpose of this interface is to be "like UDP", so that UDP-based code //! can be ported easily to take advantage of relayed connections. If you find //! yourself needing more low level information or control, or to be able to better //! handle failure, then you probably need to use ISteamNetworkingSockets directly. //! Also, note that if your main goal is to obtain a connection between two peers //! without concerning yourself with assigning roles of "client" and "server", //! you may find the symmetric connection mode of ISteamNetworkingSockets useful. //! (See k_ESteamNetworkingConfig_SymmetricConnect.) // TODO: examples here use crate::networking_types::{ NetConnectionInfo, NetworkingIdentity, NetworkingMessage, SendFlags, }; use crate::{register_callback, Callback, Inner, SteamError}; use std::ffi::c_void; use std::sync::{Arc, Weak}; use gamenetworkingsockets_sys as sys; /// Access to the steam networking messages interface pub struct NetworkingMessages<Manager> { pub(crate) net: *mut sys::ISteamNetworkingMessages, pub(crate) inner: Arc<Inner<Manager>>, } unsafe impl<Manager> Sync for NetworkingMessages<Manager> {} unsafe impl<Manager> Send for NetworkingMessages<Manager> {} impl<Manager: 'static> NetworkingMessages<Manager> { /// Sends a message to the specified host. /// /// If we don't already have a session with that user, a session is implicitly created. /// There might be some handshaking that needs to happen before we can actually begin sending message data. /// If this handshaking fails and we can't get through, an error will be posted via the callback /// SteamNetworkingMessagesSessionFailed_t. /// There is no notification when the operation succeeds. (You should have the peer send a reply /// for this purpose.) /// /// Sending a message to a host will also implicitly accept any incoming connection from that host. /// /// `channel` is a routing number you can use to help route message to different systems. /// You'll have to call ReceiveMessagesOnChannel() with the same channel number in order to retrieve /// the data on the other end. /// /// Using different channels to talk to the same user will still use the same underlying /// connection, saving on resources. If you don't need this feature, use 0. /// Otherwise, small integers are the most efficient. /// /// It is guaranteed that reliable messages to the same host on the same channel /// will be be received by the remote host (if they are received at all) exactly once, /// and in the same order that they were sent. /// /// NO other order guarantees exist! In particular, unreliable messages may be dropped, /// received out of order with respect to each other and with respect to reliable data, /// or may be received multiple times. Messages on different channels are *not* guaranteed /// to be received in the order they were sent. /// /// A note for those familiar with TCP/IP ports, or converting an existing codebase that /// opened multiple sockets: You might notice that there is only one channel, and with /// TCP/IP each endpoint has a port number. You can think of the channel number as the /// *destination* port. If you need each message to also include a "source port" (so the /// recipient can route the reply), then just put that in your message. That is essentially /// how UDP works! /// /// Returns: /// - k_EREsultOK on success. /// - k_EResultNoConnection will be returned if the session has failed or was closed by the peer, /// and k_nSteamNetworkingSend_AutoRestartBrokenSession is not used. (You can use /// GetSessionConnectionInfo to get the details.) In order to acknowledge the broken session /// and start a new one, you must call CloseSessionWithUser /// - See ISteamNetworkingSockets::SendMessageToConnection for more possible return values pub fn send_message_to_user( &self, user: NetworkingIdentity, send_type: SendFlags, data: &[u8], channel: u32, ) -> Result<(), SteamError> { let result = unsafe { sys::SteamAPI_ISteamNetworkingMessages_SendMessageToUser( self.net, user.as_ptr(), data.as_ptr() as *const c_void, data.len() as u32, send_type.bits(), channel as i32, ) }; if result == sys::EResult::k_EResultOK { return Ok(()); } Err(result.into()) } /// Reads the next message that has been sent from another user on the given channel. /// /// `batch_size` is the maximum number of messages that can be received at once. /// /// # Example /// ``` /// # use steamworks::Client; /// # use std::time::Duration; /// let (client, single) = Client::init().unwrap(); /// /// // run_callbacks must be called regularly, or no incoming connections can be received /// let callback_loop = std::thread::spawn(move || loop { /// single.run_callbacks(); /// std::thread::sleep(Duration::from_millis(10)); /// }); /// let networking_messages = client.networking_messages(); /// /// // Accept all new connections /// networking_messages.session_request_callback(|request| request.accept()); /// /// let _received = networking_messages.receive_messages_on_channel(0, 10); /// ``` pub fn receive_messages_on_channel( &self, channel: u32, batch_size: usize, ) -> Vec<NetworkingMessage<Manager>> { let mut buffer = Vec::with_capacity(batch_size); unsafe { let message_count = sys::SteamAPI_ISteamNetworkingMessages_ReceiveMessagesOnChannel( self.net, channel as i32, buffer.as_mut_ptr(), batch_size as _, ); buffer.set_len(message_count as usize); } buffer .into_iter() .map(|x| NetworkingMessage { message: x, _inner: self.inner.clone(), }) .collect() } /// Register a callback that will be called whenever a peer requests a connection. /// /// Use the [`SessionRequest`](../networking_messages/struct.SessionRequest.html) to accept or reject the connection. /// /// Requires regularly calling [`SingleClient.run_callbacks()`](../struct.SingleClient.html#method.run_callbacks). /// Calling this function more than once will replace the previous callback. /// /// # Example /// ``` /// # use steamworks::Client; /// # use std::time::Duration; /// let (client, single) = Client::init().unwrap(); /// /// // run_callbacks must be called regularly, or no incoming connections can be received /// let callback_loop = std::thread::spawn(move || loop { /// single.run_callbacks(); /// std::thread::sleep(Duration::from_millis(10)); /// }); /// let messages = client.networking_messages(); /// /// // Accept all incoming connections /// messages.session_request_callback(|request| { /// request.accept(); /// }); /// ``` pub fn session_request_callback( &self, mut callback: impl FnMut(SessionRequest<Manager>) + Send + 'static, ) { let builder = SessionRequestBuilder { message: self.net, inner: Arc::downgrade(&self.inner), }; unsafe { register_callback( &self.inner, move |request: NetworkingMessagesSessionRequest| { if let Some(request) = builder.build_request(request.remote) { callback(request); } }, ); } } /// Register a callback that will be called whenever a connection fails to be established. /// /// Requires regularly calling [`SingleClient.run_callbacks()`](../struct.SingleClient.html#method.run_callbacks). /// Calling this function more than once will replace the previous callback. pub fn session_failed_callback( &self, mut callback: impl FnMut(NetConnectionInfo) + Send + 'static, ) { unsafe { register_callback( &self.inner, move |failed: NetworkingMessagesSessionFailed| { callback(failed.info); }, ); } } } /// A helper for creating SessionRequests. /// /// It's Send and Sync, so it can be moved into the callback. struct SessionRequestBuilder<Manager> { message: *mut sys::ISteamNetworkingMessages, // Once the builder is in the callback, it creates a cyclic reference, so this has to be Weak inner: Weak<Inner<Manager>>, } unsafe impl<Manager> Sync for SessionRequestBuilder<Manager> {} unsafe impl<Manager> Send for SessionRequestBuilder<Manager> {} impl<Manager> SessionRequestBuilder<Manager> { pub fn build_request(&self, remote: NetworkingIdentity) -> Option<SessionRequest<Manager>> { self.inner.upgrade().map(|inner| SessionRequest { remote, messages: self.message, _inner: inner, }) } } struct NetworkingMessagesSessionRequest { remote: NetworkingIdentity, } unsafe impl Callback for NetworkingMessagesSessionRequest { const ID: i32 = sys::SteamNetworkingMessagesSessionRequest_t_k_iCallback as _; const SIZE: i32 = std::mem::size_of::<sys::SteamNetworkingMessagesSessionRequest_t>() as _; unsafe fn from_raw(raw: *mut c_void) -> Self { let remote = *(raw as *mut sys::SteamNetworkingMessagesSessionRequest_t); let remote = remote.m_identityRemote.into(); Self { remote } } } struct NetworkingMessagesSessionFailed { pub info: NetConnectionInfo, } unsafe impl Callback for NetworkingMessagesSessionFailed { const ID: i32 = sys::SteamNetworkingMessagesSessionFailed_t_k_iCallback as _; const SIZE: i32 = std::mem::size_of::<sys::SteamNetworkingMessagesSessionFailed_t>() as _; unsafe fn from_raw(raw: *mut c_void) -> Self { let remote = *(raw as *mut sys::SteamNetworkingMessagesSessionFailed_t); let remote = remote.m_info.into(); Self { info: remote } } } /// A request for a new connection. /// /// Use this to accept or reject the connection. /// Letting this struct go out of scope will reject the connection. pub struct SessionRequest<Manager> { remote: NetworkingIdentity, messages: *mut sys::ISteamNetworkingMessages, _inner: Arc<Inner<Manager>>, } unsafe impl<Manager> Sync for SessionRequest<Manager> {} unsafe impl<Manager> Send for SessionRequest<Manager> {} impl<Manager> SessionRequest<Manager> { /// The remote peer requesting the connection. pub fn remote(&self) -> &NetworkingIdentity { &self.remote } /// Accept the connection. pub fn accept(self) { unsafe { sys::SteamAPI_ISteamNetworkingMessages_AcceptSessionWithUser( self.messages, self.remote.as_ptr(), ); } } /// Reject the connection. pub fn reject(mut self) { self.reject_inner(); } /// Reject the connection without consuming self, useful for implementing [`Drop`] fn reject_inner(&mut self) { unsafe { sys::SteamAPI_ISteamNetworkingMessages_CloseSessionWithUser( self.messages, self.remote.as_ptr(), ); } } } impl<Manager> Drop for SessionRequest<Manager> { fn drop(&mut self) { self.reject_inner(); } }
39.227692
121
0.652522
71cacfc7cb1a4bef62a44290d87f6f893a5afba0
877
// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= //! <fullname>Amazon AppStream 2.0</fullname> <p>API documentation for Amazon AppStream 2.0.</p> //! //! If you're using the service, you're probably looking for [AppStreamClient](struct.AppStreamClient.html) and [AppStream](trait.AppStream.html). extern crate hyper; extern crate rusoto_core; extern crate serde; #[macro_use] extern crate serde_derive; extern crate serde_json; mod generated; mod custom; pub use generated::*; pub use custom::*;
29.233333
146
0.572406
d620dc491e6789e5ce0b17a2b907c5f8f5ac6443
22,814
#![allow(deprecated)] use pyo3::{ create_exception, exceptions::PyException, prelude::*, pyclass::CompareOp, types::PyBytes, }; use serde::{Deserialize, Serialize}; use solana_sdk::{ pubkey::Pubkey as PubkeyOriginal, sanitize::{Sanitize, SanitizeError as SanitizeErrorOriginal}, signature::Signature as SignatureOriginal, transaction::{ get_nonce_pubkey_from_instruction, uses_durable_nonce, Transaction as TransactionOriginal, TransactionError as TransactionErrorOriginal, }, }; use crate::{ convert_instructions, convert_optional_pubkey, handle_py_err, signer::SignerVec, CompiledInstruction, Instruction, Message, Pubkey, PyErrWrapper, RichcmpEqualityOnly, Signature, Signer, SolderHash, }; create_exception!( solders, TransactionError, PyException, "Umbrella error for the ``Transaction`` object." ); impl From<TransactionErrorOriginal> for PyErrWrapper { fn from(e: TransactionErrorOriginal) -> Self { Self(TransactionError::new_err(e.to_string())) } } create_exception!( solders, SanitizeError, PyException, "Raised when an error is encountered during transaction sanitization." ); impl From<SanitizeErrorOriginal> for PyErrWrapper { fn from(e: SanitizeErrorOriginal) -> Self { Self(SanitizeError::new_err(e.to_string())) } } #[pyclass(module = "solders.transactionav", subclass)] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] /// An atomically-commited sequence of instructions. /// /// While :class:`~solders.instruction.Instruction`\s are the basic unit of computation in Solana, /// they are submitted by clients in :class:`~solders.transaction.Transaction`\s containing one or /// more instructions, and signed by one or more signers. /// /// /// See the `Rust module documentation <https://docs.rs/solana-sdk/latest/solana_sdk/transaction/index.html>`_ for more details about transactions. /// /// Some constructors accept an optional ``payer``, the account responsible for /// paying the cost of executing a transaction. In most cases, callers should /// specify the payer explicitly in these constructors. In some cases though, /// the caller is not *required* to specify the payer, but is still allowed to: /// in the :class:`~solders.message.Message` object, the first account is always the fee-payer, so /// if the caller has knowledge that the first account of the constructed /// transaction's ``Message`` is both a signer and the expected fee-payer, then /// redundantly specifying the fee-payer is not strictly required. /// /// The main ``Transaction()`` constructor creates a fully-signed transaction from a ``Message``. /// /// Args: /// from_keypairs (Sequence[Keypair | Presigner]): The keypairs that are to sign the transaction. /// message (Message): The message to sign. /// recent_blockhash (Hash): The id of a recent ledger entry. /// /// Example: /// >>> from solders.message import Message /// >>> from solders.keypair import Keypair /// >>> from solders.instruction import Instruction /// >>> from solders.hash import Hash /// >>> from solders.transaction import Transaction /// >>> from solders.pubkey import Pubkey /// >>> program_id = Pubkey.default() /// >>> arbitrary_instruction_data = bytes([1]) /// >>> accounts = [] /// >>> instruction = Instruction(program_id, arbitrary_instruction_data, accounts) /// >>> payer = Keypair() /// >>> message = Message([instruction], payer.pubkey()) /// >>> blockhash = Hash.default() # replace with a real blockhash /// >>> tx = Transaction([payer], message, blockhash) /// pub struct Transaction(TransactionOriginal); #[pymethods] impl Transaction { #[new] pub fn new( from_keypairs: Vec<Signer>, message: &Message, recent_blockhash: SolderHash, ) -> Self { TransactionOriginal::new( &SignerVec(from_keypairs), message.into(), recent_blockhash.into(), ) .into() } #[getter] /// list[Signature]: A set of signatures of a serialized :class:`~solders.message.Message`, /// signed by the first keys of the message's :attr:`~solders.message.Message.account_keys`, /// where the number of signatures is equal to ``num_required_signatures`` of the `Message`'s /// :class:`~solders.message.MessageHeader`. pub fn signatures(&self) -> Vec<Signature> { self.0 .signatures .clone() .into_iter() .map(Signature::from) .collect() } #[getter] /// Message: The message to sign. pub fn message(&self) -> Message { self.0.message.clone().into() } #[staticmethod] /// Create an unsigned transaction from a :class:`~solders.message.Message`. /// /// Args: /// message (Message): The transaction's message. /// /// Returns: /// Transaction: The unsigned transaction. /// /// Example: /// >>> from typing import List /// >>> from solders.message import Message /// >>> from solders.keypair import Keypair /// >>> from solders.pubkey import Pubkey /// >>> from solders.instruction import Instruction, AccountMeta /// >>> from solders.hash import Hash /// >>> from solders.transaction import Transaction /// >>> program_id = Pubkey.default() /// >>> blockhash = Hash.default() # replace with a real blockhash /// >>> arbitrary_instruction_data = bytes([1]) /// >>> accounts: List[AccountMeta] = [] /// >>> instruction = Instruction(program_id, arbitrary_instruction_data, accounts) /// >>> payer = Keypair() /// >>> message = Message.new_with_blockhash([instruction], payer.pubkey(), blockhash) /// >>> tx = Transaction.new_unsigned(message) /// >>> tx.sign([payer], tx.message.recent_blockhash) /// pub fn new_unsigned(message: Message) -> Self { TransactionOriginal::new_unsigned(message.into()).into() } #[staticmethod] /// Create an unsigned transaction from a list of :class:`~solders.instruction.Instruction`\s. /// /// Args: /// instructions (Sequence[Instruction]): The instructions to include in the transaction message. /// payer (Optional[Pubkey], optional): The transaction fee payer. Defaults to None. /// /// Returns: /// Transaction: The unsigned transaction. /// /// Example: /// >>> from solders.keypair import Keypair /// >>> from solders.instruction import Instruction /// >>> from solders.transaction import Transaction /// >>> from solders.pubkey import Pubkey /// >>> program_id = Pubkey.default() /// >>> arbitrary_instruction_data = bytes([1]) /// >>> accounts = [] /// >>> instruction = Instruction(program_id, arbitrary_instruction_data, accounts) /// >>> payer = Keypair() /// >>> tx = Transaction.new_with_payer([instruction], payer.pubkey()) /// pub fn new_with_payer(instructions: Vec<Instruction>, payer: Option<&Pubkey>) -> Self { TransactionOriginal::new_with_payer( &convert_instructions(instructions), convert_optional_pubkey(payer), ) .into() } #[staticmethod] /// Create a fully-signed transaction from a list of :class:`~solders.instruction.Instruction`\s. /// /// Args: /// instructions (Sequence[Instruction]): The instructions to include in the transaction message. /// payer (Optional[Pubkey], optional): The transaction fee payer. /// signing_keypairs (Sequence[Keypair | Presigner]): The keypairs that will sign the transaction. /// recent_blockhash (Hash): The id of a recent ledger entry. /// /// Returns: /// Transaction: The signed transaction. /// /// /// Example: /// >>> from solders.keypair import Keypair /// >>> from solders.instruction import Instruction /// >>> from solders.transaction import Transaction /// >>> from solders.pubkey import Pubkey /// >>> program_id = Pubkey.default() /// >>> arbitrary_instruction_data = bytes([1]) /// >>> accounts = [] /// >>> instruction = Instruction(program_id, arbitrary_instruction_data, accounts) /// >>> payer = Keypair() /// >>> blockhash = Hash.default() # replace with a real blockhash /// >>> tx = Transaction.new_signed_with_payer([instruction], payer.pubkey(), [payer], blockhash); /// pub fn new_signed_with_payer( instructions: Vec<Instruction>, payer: Option<&Pubkey>, signing_keypairs: Vec<Signer>, recent_blockhash: SolderHash, ) -> Self { TransactionOriginal::new_signed_with_payer( &convert_instructions(instructions), convert_optional_pubkey(payer), &SignerVec(signing_keypairs), recent_blockhash.into(), ) .into() } #[staticmethod] /// Create a fully-signed transaction from pre-compiled instructions. /// /// Args: /// from_keypairs (Sequence[Keypair | Presigner]): The keys used to sign the transaction. /// keys (Sequence[Pubkey]): The keys for the transaction. These are the program state /// instances or lamport recipient keys. /// recent_blockhash (Hash): The PoH hash. /// program_ids (Sequence[Pubkey]): The keys that identify programs used in the `instruction` vector. /// instructions (Sequence[Instruction]): Instructions that will be executed atomically. /// /// Returns: /// Transaction: The signed transaction. /// pub fn new_with_compiled_instructions( from_keypairs: Vec<Signer>, keys: Vec<Pubkey>, recent_blockhash: SolderHash, program_ids: Vec<Pubkey>, instructions: Vec<CompiledInstruction>, ) -> Self { let converted_keys: Vec<PubkeyOriginal> = keys.into_iter().map(PubkeyOriginal::from).collect(); let converted_program_ids: Vec<PubkeyOriginal> = program_ids.into_iter().map(PubkeyOriginal::from).collect(); let converted_instructions = instructions .into_iter() .map(solana_sdk::instruction::CompiledInstruction::from) .collect(); TransactionOriginal::new_with_compiled_instructions( &SignerVec(from_keypairs), &converted_keys, recent_blockhash.into(), converted_program_ids, converted_instructions, ) .into() } #[staticmethod] /// Create a fully-signed transaction from a message and its signatures. /// /// Args: /// message (Message): The transaction message. /// signatures (Sequence[Signature]): The message's signatures. /// /// Returns: /// Message: The signed transaction. /// /// Example: /// /// >>> from solders.keypair import Keypair /// >>> from solders.instruction import Instruction /// >>> from solders.transaction import Transaction /// >>> from solders.pubkey import Pubkey /// >>> program_id = Pubkey.default() /// >>> arbitrary_instruction_data = bytes([1]) /// >>> accounts = [] /// >>> instruction = Instruction(program_id, arbitrary_instruction_data, accounts) /// >>> payer = Keypair() /// >>> blockhash = Hash.default() # replace with a real blockhash /// >>> tx = Transaction.new_signed_with_payer([instruction], payer.pubkey(), [payer], blockhash); /// >>> assert tx == Transaction.populate(tx.message, tx.signatures) /// pub fn populate(message: Message, signatures: Vec<Signature>) -> Self { (TransactionOriginal { message: message.into(), signatures: signatures .into_iter() .map(SignatureOriginal::from) .collect(), }) .into() } /// Get the data for an instruction at the given index. /// /// Args: /// instruction_index (int): index into the ``instructions`` vector of the transaction's ``message``. /// /// Returns: /// bytes: The instruction data. /// pub fn data(&self, instruction_index: usize) -> &[u8] { self.0.data(instruction_index) } /// Get the :class:`~solders.pubkey.Pubkey` of an account required by one of the instructions in /// the transaction. /// /// Returns ``None`` if `instruction_index` is greater than or equal to the /// number of instructions in the transaction; or if `accounts_index` is /// greater than or equal to the number of accounts in the instruction. /// /// Args: /// instruction_index (int): index into the ``instructions`` vector of the transaction's ``message``. /// account_index (int): index into the ``acounts`` list of the message's ``compiled_instructions``. /// /// Returns: /// Optional[Pubkey]: The account key. /// pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<Pubkey> { self.0 .key(instruction_index, accounts_index) .map(Pubkey::from) } /// Get the :class:`~solders.pubkey.Pubkey` of a signing account required by one of the /// instructions in the transaction. /// /// The transaction does not need to be signed for this function to return a /// signing account's pubkey. /// /// Returns ``None`` if the indexed account is not required to sign the /// transaction. Returns ``None`` if the [`signatures`] field does not contain /// enough elements to hold a signature for the indexed account (this should /// only be possible if `Transaction` has been manually constructed). /// /// Returns `None` if `instruction_index` is greater than or equal to the /// number of instructions in the transaction; or if `accounts_index` is /// greater than or equal to the number of accounts in the instruction. /// /// Args: /// instruction_index (int): index into the ``instructions`` vector of the transaction's ``message``. /// account_index (int): index into the ``acounts`` list of the message's ``compiled_instructions``. /// /// Returns: /// Optional[Pubkey]: The account key. /// pub fn signer_key(&self, instruction_index: usize, accounts_index: usize) -> Option<Pubkey> { self.0 .signer_key(instruction_index, accounts_index) .map(Pubkey::from) } /// Return the serialized message data to sign. /// /// Returns: /// bytes: The serialized message data. /// pub fn message_data<'a>(&self, py: Python<'a>) -> &'a PyBytes { PyBytes::new(py, &self.0.message_data()) } /// Sign the transaction, returning any errors. /// /// This method fully signs a transaction with all required signers, which /// must be present in the ``keypairs`` list. To sign with only some of the /// required signers, use :meth:`Transaction.partial_sign`. /// /// If ``recent_blockhash`` is different than recorded in the transaction message's /// ``recent_blockhash``] field, then the message's ``recent_blockhash`` will be updated /// to the provided ``recent_blockhash``, and any prior signatures will be cleared. /// /// /// **Errors:** /// /// Signing will fail if some required signers are not provided in /// ``keypairs``; or, if the transaction has previously been partially signed, /// some of the remaining required signers are not provided in ``keypairs``. /// In other words, the transaction must be fully signed as a result of /// calling this function. /// /// Signing will fail for any of the reasons described in the documentation /// for :meth:`Transaction.partial_sign`. /// /// Args: /// keypairs (Sequence[Keypair | Presigner]): The signers for the transaction. /// recent_blockhash (Hash): The id of a recent ledger entry. /// pub fn sign(&mut self, keypairs: Vec<Signer>, recent_blockhash: SolderHash) -> PyResult<()> { handle_py_err( self.0 .try_sign(&SignerVec(keypairs), recent_blockhash.into()), ) } /// Sign the transaction with a subset of required keys, returning any errors. /// /// Unlike :meth:`Transaction.sign`, this method does not require all /// keypairs to be provided, allowing a transaction to be signed in multiple /// steps. /// /// It is permitted to sign a transaction with the same keypair multiple /// times. /// /// If ``recent_blockhash`` is different than recorded in the transaction message's /// ``recent_blockhash`` field, then the message's ``recent_blockhash`` will be updated /// to the provided ``recent_blockhash``, and any prior signatures will be cleared. /// /// **Errors:** /// /// Signing will fail if /// /// - The transaction's :class:`~solders.message.Message` is malformed such that the number of /// required signatures recorded in its header /// (``num_required_signatures``) is greater than the length of its /// account keys (``account_keys``). /// - Any of the provided signers in ``keypairs`` is not a required signer of /// the message. /// - Any of the signers is a :class:`~solders.presigner.Presigner`, and its provided signature is /// incorrect. /// /// Args: /// keypairs (Sequence[Keypair | Presigner]): The signers for the transaction. /// recent_blockhash (Hash): The id of a recent ledger entry. /// pub fn partial_sign( &mut self, keypairs: Vec<Signer>, recent_blockhash: SolderHash, ) -> PyResult<()> { handle_py_err( self.0 .try_partial_sign(&SignerVec(keypairs), recent_blockhash.into()), ) } /// Verifies that all signers have signed the message. /// /// Raises: /// TransactionError: if the check fails. pub fn verify(&self) -> PyResult<()> { handle_py_err(self.0.verify()) } /// Verify the transaction and hash its message. /// /// Returns: /// Hash: The blake3 hash of the message. /// /// Raises: /// TransactionError: if the check fails. pub fn verify_and_hash_message(&self) -> PyResult<SolderHash> { handle_py_err(self.0.verify_and_hash_message()) } /// Verifies that all signers have signed the message. /// /// Returns: /// list[bool]: a list with the length of required signatures, where each element is either ``True`` if that signer has signed, or ``False`` if not. /// pub fn verify_with_results(&self) -> Vec<bool> { self.0.verify_with_results() } /// Get the positions of the pubkeys in account_keys associated with signing keypairs. /// /// Args: /// pubkeys (Sequence[Pubkey]): The pubkeys to find. /// /// Returns: /// list[Optional[int]]: The pubkey positions. /// pub fn get_signing_keypair_positions( &self, pubkeys: Vec<Pubkey>, ) -> PyResult<Vec<Option<usize>>> { let converted_pubkeys: Vec<PubkeyOriginal> = pubkeys.into_iter().map(PubkeyOriginal::from).collect(); handle_py_err(self.0.get_signing_keypair_positions(&converted_pubkeys)) } /// Replace all the signatures and pubkeys. /// /// Args: /// signers (Sequence[Tuple[Pubkey, Signature]]): The replacement pubkeys and signatures. /// pub fn replace_signatures(&mut self, signers: Vec<(Pubkey, Signature)>) -> PyResult<()> { let converted_signers: Vec<(PubkeyOriginal, SignatureOriginal)> = signers .into_iter() .map(|(pubkey, signature)| { ( PubkeyOriginal::from(pubkey), SignatureOriginal::from(signature), ) }) .collect(); handle_py_err(self.0.replace_signatures(&converted_signers)) } /// Check if the transaction has been signed. /// /// Returns: /// bool: True if the transaction has been signed. /// pub fn is_signed(&self) -> bool { self.0.is_signed() } /// See https://docs.rs/solana-sdk/latest/solana_sdk/transaction/fn.uses_durable_nonce.html pub fn uses_durable_nonce(&self) -> Option<CompiledInstruction> { uses_durable_nonce(&self.0).map(|x| CompiledInstruction::from(x.clone())) } /// Sanity checks the Transaction properties. pub fn sanitize(&self) -> PyResult<()> { handle_py_err(self.0.sanitize()) } pub fn __bytes__<'a>(&self, py: Python<'a>) -> PyResult<&'a PyBytes> { let as_vec: Vec<u8> = handle_py_err(bincode::serialize(&self.0))?; Ok(PyBytes::new(py, &as_vec)) } #[staticmethod] #[pyo3(name = "default")] /// Return a new default transaction. /// /// Returns: /// Transaction: The default transaction. pub fn new_default() -> Self { Self::default() } #[staticmethod] /// Deserialize a serialized ``Transaction`` object. /// /// Args: /// data (bytes): the serialized ``Transaction``. /// /// Returns: /// Transaction: the deserialized ``Transaction``. /// /// Example: /// >>> from solders.transaction import Transaction /// >>> tx = Transaction.default() /// >>> assert Transaction.from_bytes(bytes(tx)) == tx /// pub fn from_bytes(data: &[u8]) -> PyResult<Self> { handle_py_err(bincode::deserialize::<TransactionOriginal>(data)) } pub fn __richcmp__(&self, other: &Self, op: CompareOp) -> PyResult<bool> { self.richcmp(other, op) } pub fn __repr__(&self) -> String { format!("{:#?}", self) } pub fn __str__(&self) -> String { format!("{:?}", self) } /// Deprecated in the Solana Rust SDK, expose here only for testing. pub fn get_nonce_pubkey_from_instruction(&self, ix: &CompiledInstruction) -> Option<Pubkey> { get_nonce_pubkey_from_instruction(ix.as_ref(), self.as_ref()).map(Pubkey::from) } } impl RichcmpEqualityOnly for Transaction {} impl From<TransactionOriginal> for Transaction { fn from(tx: TransactionOriginal) -> Self { Self(tx) } } impl AsRef<TransactionOriginal> for Transaction { fn as_ref(&self) -> &TransactionOriginal { &self.0 } }
38.342857
156
0.621548
71f25f6a3f8fd717bf3bbe3522a961bad29f0e3c
1,806
use std::cmp::max; fn max_product(words: Vec<String>) -> i32 { if words.is_empty() { return 0; } let bit_maps: Vec<u32> = words .iter() .map(|word| { word.bytes() .fold(0, |bit_map, b| bit_map | (1u32 << (b - b'a'))) }) .collect(); let mut ret = 0usize; for i in 0..(words.len() - 1) { for j in (i + 1)..words.len() { if (bit_maps[i] & bit_maps[j]) == 0 { ret = max(ret, words[i].len() * words[j].len()); } } } ret as i32 } fn main() { let input = vec![ "abcw".to_string(), "baz".to_string(), "foo".to_string(), "bar".to_string(), "xtfn".to_string(), "abcdef".to_string(), ]; let ret = max_product(input.clone()); println!("max_product({:?}) = {}", input, ret); } #[test] fn test_max_product() { { let input = vec![ "abcw".to_string(), "baz".to_string(), "foo".to_string(), "bar".to_string(), "xtfn".to_string(), "abcdef".to_string(), ]; assert_eq!(max_product(input), 16); } { let input = vec![ "a".to_string(), "ab".to_string(), "abc".to_string(), "d".to_string(), "cd".to_string(), "bcd".to_string(), "abcd".to_string(), ]; assert_eq!(max_product(input), 4); } { let input = vec![ "a".to_string(), "aa".to_string(), "aaa".to_string(), "aaaa".to_string(), ]; assert_eq!(max_product(input), 0); } { let input = vec![]; assert_eq!(max_product(input), 0); } }
22.296296
69
0.422481
56def0260523fac9300337d0ae73cb30b06762da
29,439
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::SC1 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = "Possible values of the field `ADCH`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum ADCHR { #[doc = "When DIFF=0, DADP0 is selected as input; when DIFF=1, DAD0 is selected as input."] _00000, #[doc = "When DIFF=0, DADP1 is selected as input; when DIFF=1, DAD1 is selected as input."] _00001, #[doc = "When DIFF=0, DADP2 is selected as input; when DIFF=1, DAD2 is selected as input."] _00010, #[doc = "When DIFF=0, DADP3 is selected as input; when DIFF=1, DAD3 is selected as input."] _00011, #[doc = "When DIFF=0, AD4 is selected as input; when DIFF=1, it is reserved."] _00100, #[doc = "When DIFF=0, AD5 is selected as input; when DIFF=1, it is reserved."] _00101, #[doc = "When DIFF=0, AD6 is selected as input; when DIFF=1, it is reserved."] _00110, #[doc = "When DIFF=0, AD7 is selected as input; when DIFF=1, it is reserved."] _00111, #[doc = "When DIFF=0, AD8 is selected as input; when DIFF=1, it is reserved."] _01000, #[doc = "When DIFF=0, AD9 is selected as input; when DIFF=1, it is reserved."] _01001, #[doc = "When DIFF=0, AD10 is selected as input; when DIFF=1, it is reserved."] _01010, #[doc = "When DIFF=0, AD11 is selected as input; when DIFF=1, it is reserved."] _01011, #[doc = "When DIFF=0, AD12 is selected as input; when DIFF=1, it is reserved."] _01100, #[doc = "When DIFF=0, AD13 is selected as input; when DIFF=1, it is reserved."] _01101, #[doc = "When DIFF=0, AD14 is selected as input; when DIFF=1, it is reserved."] _01110, #[doc = "When DIFF=0, AD15 is selected as input; when DIFF=1, it is reserved."] _01111, #[doc = "When DIFF=0, AD16 is selected as input; when DIFF=1, it is reserved."] _10000, #[doc = "When DIFF=0, AD17 is selected as input; when DIFF=1, it is reserved."] _10001, #[doc = "When DIFF=0, AD18 is selected as input; when DIFF=1, it is reserved."] _10010, #[doc = "When DIFF=0, AD19 is selected as input; when DIFF=1, it is reserved."] _10011, #[doc = "When DIFF=0, AD20 is selected as input; when DIFF=1, it is reserved."] _10100, #[doc = "When DIFF=0, AD21 is selected as input; when DIFF=1, it is reserved."] _10101, #[doc = "When DIFF=0, AD22 is selected as input; when DIFF=1, it is reserved."] _10110, #[doc = "When DIFF=0, AD23 is selected as input; when DIFF=1, it is reserved."] _10111, #[doc = "When DIFF=0, Temp Sensor (single-ended) is selected as input; when DIFF=1, Temp Sensor (differential) is selected as input."] _11010, #[doc = "When DIFF=0, Bandgap (single-ended) is selected as input; when DIFF=1, Bandgap (differential) is selected as input."] _11011, #[doc = "When DIFF=0,VREFSH is selected as input; when DIFF=1, -VREFSH (differential) is selected as input. Voltage reference selected is determined by SC2[REFSEL]."] _11101, #[doc = "When DIFF=0,VREFSL is selected as input; when DIFF=1, it is reserved. Voltage reference selected is determined by SC2[REFSEL]."] _11110, #[doc = "Module is disabled."] _11111, #[doc = r" Reserved"] _Reserved(u8), } impl ADCHR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { match *self { ADCHR::_00000 => 0, ADCHR::_00001 => 1, ADCHR::_00010 => 2, ADCHR::_00011 => 3, ADCHR::_00100 => 4, ADCHR::_00101 => 5, ADCHR::_00110 => 6, ADCHR::_00111 => 7, ADCHR::_01000 => 8, ADCHR::_01001 => 9, ADCHR::_01010 => 10, ADCHR::_01011 => 11, ADCHR::_01100 => 12, ADCHR::_01101 => 13, ADCHR::_01110 => 14, ADCHR::_01111 => 15, ADCHR::_10000 => 16, ADCHR::_10001 => 17, ADCHR::_10010 => 18, ADCHR::_10011 => 19, ADCHR::_10100 => 20, ADCHR::_10101 => 21, ADCHR::_10110 => 22, ADCHR::_10111 => 23, ADCHR::_11010 => 26, ADCHR::_11011 => 27, ADCHR::_11101 => 29, ADCHR::_11110 => 30, ADCHR::_11111 => 31, ADCHR::_Reserved(bits) => bits, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: u8) -> ADCHR { match value { 0 => ADCHR::_00000, 1 => ADCHR::_00001, 2 => ADCHR::_00010, 3 => ADCHR::_00011, 4 => ADCHR::_00100, 5 => ADCHR::_00101, 6 => ADCHR::_00110, 7 => ADCHR::_00111, 8 => ADCHR::_01000, 9 => ADCHR::_01001, 10 => ADCHR::_01010, 11 => ADCHR::_01011, 12 => ADCHR::_01100, 13 => ADCHR::_01101, 14 => ADCHR::_01110, 15 => ADCHR::_01111, 16 => ADCHR::_10000, 17 => ADCHR::_10001, 18 => ADCHR::_10010, 19 => ADCHR::_10011, 20 => ADCHR::_10100, 21 => ADCHR::_10101, 22 => ADCHR::_10110, 23 => ADCHR::_10111, 26 => ADCHR::_11010, 27 => ADCHR::_11011, 29 => ADCHR::_11101, 30 => ADCHR::_11110, 31 => ADCHR::_11111, i => ADCHR::_Reserved(i), } } #[doc = "Checks if the value of the field is `_00000`"] #[inline] pub fn is_00000(&self) -> bool { *self == ADCHR::_00000 } #[doc = "Checks if the value of the field is `_00001`"] #[inline] pub fn is_00001(&self) -> bool { *self == ADCHR::_00001 } #[doc = "Checks if the value of the field is `_00010`"] #[inline] pub fn is_00010(&self) -> bool { *self == ADCHR::_00010 } #[doc = "Checks if the value of the field is `_00011`"] #[inline] pub fn is_00011(&self) -> bool { *self == ADCHR::_00011 } #[doc = "Checks if the value of the field is `_00100`"] #[inline] pub fn is_00100(&self) -> bool { *self == ADCHR::_00100 } #[doc = "Checks if the value of the field is `_00101`"] #[inline] pub fn is_00101(&self) -> bool { *self == ADCHR::_00101 } #[doc = "Checks if the value of the field is `_00110`"] #[inline] pub fn is_00110(&self) -> bool { *self == ADCHR::_00110 } #[doc = "Checks if the value of the field is `_00111`"] #[inline] pub fn is_00111(&self) -> bool { *self == ADCHR::_00111 } #[doc = "Checks if the value of the field is `_01000`"] #[inline] pub fn is_01000(&self) -> bool { *self == ADCHR::_01000 } #[doc = "Checks if the value of the field is `_01001`"] #[inline] pub fn is_01001(&self) -> bool { *self == ADCHR::_01001 } #[doc = "Checks if the value of the field is `_01010`"] #[inline] pub fn is_01010(&self) -> bool { *self == ADCHR::_01010 } #[doc = "Checks if the value of the field is `_01011`"] #[inline] pub fn is_01011(&self) -> bool { *self == ADCHR::_01011 } #[doc = "Checks if the value of the field is `_01100`"] #[inline] pub fn is_01100(&self) -> bool { *self == ADCHR::_01100 } #[doc = "Checks if the value of the field is `_01101`"] #[inline] pub fn is_01101(&self) -> bool { *self == ADCHR::_01101 } #[doc = "Checks if the value of the field is `_01110`"] #[inline] pub fn is_01110(&self) -> bool { *self == ADCHR::_01110 } #[doc = "Checks if the value of the field is `_01111`"] #[inline] pub fn is_01111(&self) -> bool { *self == ADCHR::_01111 } #[doc = "Checks if the value of the field is `_10000`"] #[inline] pub fn is_10000(&self) -> bool { *self == ADCHR::_10000 } #[doc = "Checks if the value of the field is `_10001`"] #[inline] pub fn is_10001(&self) -> bool { *self == ADCHR::_10001 } #[doc = "Checks if the value of the field is `_10010`"] #[inline] pub fn is_10010(&self) -> bool { *self == ADCHR::_10010 } #[doc = "Checks if the value of the field is `_10011`"] #[inline] pub fn is_10011(&self) -> bool { *self == ADCHR::_10011 } #[doc = "Checks if the value of the field is `_10100`"] #[inline] pub fn is_10100(&self) -> bool { *self == ADCHR::_10100 } #[doc = "Checks if the value of the field is `_10101`"] #[inline] pub fn is_10101(&self) -> bool { *self == ADCHR::_10101 } #[doc = "Checks if the value of the field is `_10110`"] #[inline] pub fn is_10110(&self) -> bool { *self == ADCHR::_10110 } #[doc = "Checks if the value of the field is `_10111`"] #[inline] pub fn is_10111(&self) -> bool { *self == ADCHR::_10111 } #[doc = "Checks if the value of the field is `_11010`"] #[inline] pub fn is_11010(&self) -> bool { *self == ADCHR::_11010 } #[doc = "Checks if the value of the field is `_11011`"] #[inline] pub fn is_11011(&self) -> bool { *self == ADCHR::_11011 } #[doc = "Checks if the value of the field is `_11101`"] #[inline] pub fn is_11101(&self) -> bool { *self == ADCHR::_11101 } #[doc = "Checks if the value of the field is `_11110`"] #[inline] pub fn is_11110(&self) -> bool { *self == ADCHR::_11110 } #[doc = "Checks if the value of the field is `_11111`"] #[inline] pub fn is_11111(&self) -> bool { *self == ADCHR::_11111 } } #[doc = "Possible values of the field `DIFF`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DIFFR { #[doc = "Single-ended conversions and input channels are selected."] _0, #[doc = "Differential conversions and input channels are selected."] _1, } impl DIFFR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { DIFFR::_0 => false, DIFFR::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> DIFFR { match value { false => DIFFR::_0, true => DIFFR::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == DIFFR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == DIFFR::_1 } } #[doc = "Possible values of the field `AIEN`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum AIENR { #[doc = "Conversion complete interrupt is disabled."] _0, #[doc = "Conversion complete interrupt is enabled."] _1, } impl AIENR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { AIENR::_0 => false, AIENR::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> AIENR { match value { false => AIENR::_0, true => AIENR::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == AIENR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == AIENR::_1 } } #[doc = "Possible values of the field `COCO`"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum COCOR { #[doc = "Conversion is not completed."] _0, #[doc = "Conversion is completed."] _1, } impl COCOR { #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { match *self { COCOR::_0 => false, COCOR::_1 => true, } } #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _from(value: bool) -> COCOR { match value { false => COCOR::_0, true => COCOR::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline] pub fn is_0(&self) -> bool { *self == COCOR::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline] pub fn is_1(&self) -> bool { *self == COCOR::_1 } } #[doc = "Values that can be written to the field `ADCH`"] pub enum ADCHW { #[doc = "When DIFF=0, DADP0 is selected as input; when DIFF=1, DAD0 is selected as input."] _00000, #[doc = "When DIFF=0, DADP1 is selected as input; when DIFF=1, DAD1 is selected as input."] _00001, #[doc = "When DIFF=0, DADP2 is selected as input; when DIFF=1, DAD2 is selected as input."] _00010, #[doc = "When DIFF=0, DADP3 is selected as input; when DIFF=1, DAD3 is selected as input."] _00011, #[doc = "When DIFF=0, AD4 is selected as input; when DIFF=1, it is reserved."] _00100, #[doc = "When DIFF=0, AD5 is selected as input; when DIFF=1, it is reserved."] _00101, #[doc = "When DIFF=0, AD6 is selected as input; when DIFF=1, it is reserved."] _00110, #[doc = "When DIFF=0, AD7 is selected as input; when DIFF=1, it is reserved."] _00111, #[doc = "When DIFF=0, AD8 is selected as input; when DIFF=1, it is reserved."] _01000, #[doc = "When DIFF=0, AD9 is selected as input; when DIFF=1, it is reserved."] _01001, #[doc = "When DIFF=0, AD10 is selected as input; when DIFF=1, it is reserved."] _01010, #[doc = "When DIFF=0, AD11 is selected as input; when DIFF=1, it is reserved."] _01011, #[doc = "When DIFF=0, AD12 is selected as input; when DIFF=1, it is reserved."] _01100, #[doc = "When DIFF=0, AD13 is selected as input; when DIFF=1, it is reserved."] _01101, #[doc = "When DIFF=0, AD14 is selected as input; when DIFF=1, it is reserved."] _01110, #[doc = "When DIFF=0, AD15 is selected as input; when DIFF=1, it is reserved."] _01111, #[doc = "When DIFF=0, AD16 is selected as input; when DIFF=1, it is reserved."] _10000, #[doc = "When DIFF=0, AD17 is selected as input; when DIFF=1, it is reserved."] _10001, #[doc = "When DIFF=0, AD18 is selected as input; when DIFF=1, it is reserved."] _10010, #[doc = "When DIFF=0, AD19 is selected as input; when DIFF=1, it is reserved."] _10011, #[doc = "When DIFF=0, AD20 is selected as input; when DIFF=1, it is reserved."] _10100, #[doc = "When DIFF=0, AD21 is selected as input; when DIFF=1, it is reserved."] _10101, #[doc = "When DIFF=0, AD22 is selected as input; when DIFF=1, it is reserved."] _10110, #[doc = "When DIFF=0, AD23 is selected as input; when DIFF=1, it is reserved."] _10111, #[doc = "When DIFF=0, Temp Sensor (single-ended) is selected as input; when DIFF=1, Temp Sensor (differential) is selected as input."] _11010, #[doc = "When DIFF=0, Bandgap (single-ended) is selected as input; when DIFF=1, Bandgap (differential) is selected as input."] _11011, #[doc = "When DIFF=0,VREFSH is selected as input; when DIFF=1, -VREFSH (differential) is selected as input. Voltage reference selected is determined by SC2[REFSEL]."] _11101, #[doc = "When DIFF=0,VREFSL is selected as input; when DIFF=1, it is reserved. Voltage reference selected is determined by SC2[REFSEL]."] _11110, #[doc = "Module is disabled."] _11111, } impl ADCHW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> u8 { match *self { ADCHW::_00000 => 0, ADCHW::_00001 => 1, ADCHW::_00010 => 2, ADCHW::_00011 => 3, ADCHW::_00100 => 4, ADCHW::_00101 => 5, ADCHW::_00110 => 6, ADCHW::_00111 => 7, ADCHW::_01000 => 8, ADCHW::_01001 => 9, ADCHW::_01010 => 10, ADCHW::_01011 => 11, ADCHW::_01100 => 12, ADCHW::_01101 => 13, ADCHW::_01110 => 14, ADCHW::_01111 => 15, ADCHW::_10000 => 16, ADCHW::_10001 => 17, ADCHW::_10010 => 18, ADCHW::_10011 => 19, ADCHW::_10100 => 20, ADCHW::_10101 => 21, ADCHW::_10110 => 22, ADCHW::_10111 => 23, ADCHW::_11010 => 26, ADCHW::_11011 => 27, ADCHW::_11101 => 29, ADCHW::_11110 => 30, ADCHW::_11111 => 31, } } } #[doc = r" Proxy"] pub struct _ADCHW<'a> { w: &'a mut W, } impl<'a> _ADCHW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: ADCHW) -> &'a mut W { unsafe { self.bits(variant._bits()) } } #[doc = "When DIFF=0, DADP0 is selected as input; when DIFF=1, DAD0 is selected as input."] #[inline] pub fn _00000(self) -> &'a mut W { self.variant(ADCHW::_00000) } #[doc = "When DIFF=0, DADP1 is selected as input; when DIFF=1, DAD1 is selected as input."] #[inline] pub fn _00001(self) -> &'a mut W { self.variant(ADCHW::_00001) } #[doc = "When DIFF=0, DADP2 is selected as input; when DIFF=1, DAD2 is selected as input."] #[inline] pub fn _00010(self) -> &'a mut W { self.variant(ADCHW::_00010) } #[doc = "When DIFF=0, DADP3 is selected as input; when DIFF=1, DAD3 is selected as input."] #[inline] pub fn _00011(self) -> &'a mut W { self.variant(ADCHW::_00011) } #[doc = "When DIFF=0, AD4 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _00100(self) -> &'a mut W { self.variant(ADCHW::_00100) } #[doc = "When DIFF=0, AD5 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _00101(self) -> &'a mut W { self.variant(ADCHW::_00101) } #[doc = "When DIFF=0, AD6 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _00110(self) -> &'a mut W { self.variant(ADCHW::_00110) } #[doc = "When DIFF=0, AD7 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _00111(self) -> &'a mut W { self.variant(ADCHW::_00111) } #[doc = "When DIFF=0, AD8 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01000(self) -> &'a mut W { self.variant(ADCHW::_01000) } #[doc = "When DIFF=0, AD9 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01001(self) -> &'a mut W { self.variant(ADCHW::_01001) } #[doc = "When DIFF=0, AD10 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01010(self) -> &'a mut W { self.variant(ADCHW::_01010) } #[doc = "When DIFF=0, AD11 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01011(self) -> &'a mut W { self.variant(ADCHW::_01011) } #[doc = "When DIFF=0, AD12 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01100(self) -> &'a mut W { self.variant(ADCHW::_01100) } #[doc = "When DIFF=0, AD13 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01101(self) -> &'a mut W { self.variant(ADCHW::_01101) } #[doc = "When DIFF=0, AD14 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01110(self) -> &'a mut W { self.variant(ADCHW::_01110) } #[doc = "When DIFF=0, AD15 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _01111(self) -> &'a mut W { self.variant(ADCHW::_01111) } #[doc = "When DIFF=0, AD16 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10000(self) -> &'a mut W { self.variant(ADCHW::_10000) } #[doc = "When DIFF=0, AD17 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10001(self) -> &'a mut W { self.variant(ADCHW::_10001) } #[doc = "When DIFF=0, AD18 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10010(self) -> &'a mut W { self.variant(ADCHW::_10010) } #[doc = "When DIFF=0, AD19 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10011(self) -> &'a mut W { self.variant(ADCHW::_10011) } #[doc = "When DIFF=0, AD20 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10100(self) -> &'a mut W { self.variant(ADCHW::_10100) } #[doc = "When DIFF=0, AD21 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10101(self) -> &'a mut W { self.variant(ADCHW::_10101) } #[doc = "When DIFF=0, AD22 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10110(self) -> &'a mut W { self.variant(ADCHW::_10110) } #[doc = "When DIFF=0, AD23 is selected as input; when DIFF=1, it is reserved."] #[inline] pub fn _10111(self) -> &'a mut W { self.variant(ADCHW::_10111) } #[doc = "When DIFF=0, Temp Sensor (single-ended) is selected as input; when DIFF=1, Temp Sensor (differential) is selected as input."] #[inline] pub fn _11010(self) -> &'a mut W { self.variant(ADCHW::_11010) } #[doc = "When DIFF=0, Bandgap (single-ended) is selected as input; when DIFF=1, Bandgap (differential) is selected as input."] #[inline] pub fn _11011(self) -> &'a mut W { self.variant(ADCHW::_11011) } #[doc = "When DIFF=0,VREFSH is selected as input; when DIFF=1, -VREFSH (differential) is selected as input. Voltage reference selected is determined by SC2[REFSEL]."] #[inline] pub fn _11101(self) -> &'a mut W { self.variant(ADCHW::_11101) } #[doc = "When DIFF=0,VREFSL is selected as input; when DIFF=1, it is reserved. Voltage reference selected is determined by SC2[REFSEL]."] #[inline] pub fn _11110(self) -> &'a mut W { self.variant(ADCHW::_11110) } #[doc = "Module is disabled."] #[inline] pub fn _11111(self) -> &'a mut W { self.variant(ADCHW::_11111) } #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 31; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `DIFF`"] pub enum DIFFW { #[doc = "Single-ended conversions and input channels are selected."] _0, #[doc = "Differential conversions and input channels are selected."] _1, } impl DIFFW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { DIFFW::_0 => false, DIFFW::_1 => true, } } } #[doc = r" Proxy"] pub struct _DIFFW<'a> { w: &'a mut W, } impl<'a> _DIFFW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: DIFFW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Single-ended conversions and input channels are selected."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(DIFFW::_0) } #[doc = "Differential conversions and input channels are selected."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(DIFFW::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 5; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } #[doc = "Values that can be written to the field `AIEN`"] pub enum AIENW { #[doc = "Conversion complete interrupt is disabled."] _0, #[doc = "Conversion complete interrupt is enabled."] _1, } impl AIENW { #[allow(missing_docs)] #[doc(hidden)] #[inline] pub fn _bits(&self) -> bool { match *self { AIENW::_0 => false, AIENW::_1 => true, } } } #[doc = r" Proxy"] pub struct _AIENW<'a> { w: &'a mut W, } impl<'a> _AIENW<'a> { #[doc = r" Writes `variant` to the field"] #[inline] pub fn variant(self, variant: AIENW) -> &'a mut W { { self.bit(variant._bits()) } } #[doc = "Conversion complete interrupt is disabled."] #[inline] pub fn _0(self) -> &'a mut W { self.variant(AIENW::_0) } #[doc = "Conversion complete interrupt is enabled."] #[inline] pub fn _1(self) -> &'a mut W { self.variant(AIENW::_1) } #[doc = r" Sets the field bit"] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r" Clears the field bit"] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r" Writes raw bits to the field"] #[inline] pub fn bit(self, value: bool) -> &'a mut W { const MASK: bool = true; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:4 - Input channel select"] #[inline] pub fn adch(&self) -> ADCHR { ADCHR::_from({ const MASK: u8 = 31; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) as u8 }) } #[doc = "Bit 5 - Differential Mode Enable"] #[inline] pub fn diff(&self) -> DIFFR { DIFFR::_from({ const MASK: bool = true; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 6 - Interrupt Enable"] #[inline] pub fn aien(&self) -> AIENR { AIENR::_from({ const MASK: bool = true; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } #[doc = "Bit 7 - Conversion Complete Flag"] #[inline] pub fn coco(&self) -> COCOR { COCOR::_from({ const MASK: bool = true; const OFFSET: u8 = 7; ((self.bits >> OFFSET) & MASK as u32) != 0 }) } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 31 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:4 - Input channel select"] #[inline] pub fn adch(&mut self) -> _ADCHW { _ADCHW { w: self } } #[doc = "Bit 5 - Differential Mode Enable"] #[inline] pub fn diff(&mut self) -> _DIFFW { _DIFFW { w: self } } #[doc = "Bit 6 - Interrupt Enable"] #[inline] pub fn aien(&mut self) -> _AIENW { _AIENW { w: self } } }
31.964169
170
0.539556
b9a2677529896ebb923c7d8406240a8a1aa9e80c
1,699
use tomorrow_core::Result; pub trait Request {} pub trait Response {} pub trait Recuperator<Req, Res> where Req: Request, Res: Response { fn compute(&self, request: Req) -> Result<Res>; } #[cfg(test)] mod tests { use super::*; struct TestRequest { expected: bool } impl TestRequest { pub fn new(expected: bool) -> Self { TestRequest { expected: expected } } } impl Request for TestRequest {} struct TestResponse { actual: bool } impl TestResponse { pub fn new(actual: bool) -> Self { TestResponse { actual: actual } } } impl Response for TestResponse {} struct TestRecuperator {} impl TestRecuperator { pub fn new() -> Self { TestRecuperator{} } } impl Recuperator<TestRequest, TestResponse> for TestRecuperator { fn compute(&self, request: TestRequest) -> Result<TestResponse> { Ok(TestResponse::new(request.expected)) } } #[test] fn recuperator_should_return_ok_with_true() { let request = TestRequest::new(true); let recuperator = TestRecuperator::new(); let response = recuperator.compute(request); assert!(response.is_ok()); assert!(response.unwrap().actual); } #[test] fn recuperator_should_return_ok_with_false() { let request = TestRequest::new(false); let recuperator = TestRecuperator::new(); let response = recuperator.compute(request); assert!(response.is_ok()); assert!(!response.unwrap().actual); } }
22.355263
85
0.572101
e2ac532c3866df035e847e5d431d99619d0d6852
2,638
use crate::{Beatmap, ControlPoint, ControlPointIter}; pub(crate) struct SliderState<'p> { control_points: ControlPointIter<'p>, next: Option<ControlPoint>, pub(crate) beat_len: f32, pub(crate) speed_mult: f32, } impl<'p> SliderState<'p> { #[inline] pub(crate) fn new(map: &'p Beatmap) -> Self { let mut control_points = ControlPointIter::new(map); let (beat_len, speed_mult) = match control_points.next() { Some(ControlPoint::Timing { beat_len, .. }) => (beat_len, 1.0), Some(ControlPoint::Difficulty { speed_mult, .. }) => (1000.0, speed_mult), None => (1000.0, 1.0), }; Self { next: control_points.next(), control_points, beat_len, speed_mult, } } #[inline] pub(crate) fn update(&mut self, time: f32) { while let Some(next) = self.next.as_ref().filter(|n| time >= n.time()) { match next { ControlPoint::Timing { beat_len, .. } => { self.beat_len = *beat_len; self.speed_mult = 1.0; } ControlPoint::Difficulty { speed_mult, .. } => self.speed_mult = *speed_mult, } self.next = self.control_points.next(); } } } #[cfg(test)] mod test { use crate::{ parse::{DifficultyPoint, TimingPoint}, Beatmap, }; use super::SliderState; #[test] fn fruits_slider_state() { let map = Beatmap { timing_points: vec![ TimingPoint { time: 1.0, beat_len: 10.0, }, TimingPoint { time: 3.0, beat_len: 20.0, }, TimingPoint { time: 4.0, beat_len: 30.0, }, ], difficulty_points: vec![ DifficultyPoint { time: 2.0, speed_multiplier: 15.0, }, DifficultyPoint { time: 5.0, speed_multiplier: 45.0, }, ], ..Default::default() }; let mut state = SliderState::new(&map); state.update(2.0); assert_eq!(state.beat_len, 10.0); state.update(3.0); assert_eq!(state.beat_len, 20.0); assert_eq!(state.speed_mult, 1.0); state.update(5.0); assert_eq!(state.beat_len, 30.0); assert_eq!(state.speed_mult, 45.0); } }
26.918367
93
0.463988
d6f85d209e56f22ce1f812274a8de2922f3438f9
4,664
//! Compact representation of `U256` use codec::{Decode, Encode}; use primitive_types::U256; /// Compact representation of `U256` #[derive(Ord, PartialOrd, Debug, Eq, PartialEq, Clone, Copy, Default, Encode, Decode)] pub struct Compact(u32); impl From<u32> for Compact { fn from(u: u32) -> Self { Compact(u) } } impl From<Compact> for u32 { fn from(c: Compact) -> Self { c.0 } } impl From<U256> for Compact { fn from(u: U256) -> Self { Compact::from_u256(u) } } impl From<Compact> for U256 { fn from(c: Compact) -> Self { // ignore overflows and negative values c.to_u256().unwrap_or_else(|x| x) } } impl Compact { pub fn new(u: u32) -> Self { Compact(u) } pub fn max_value() -> Self { U256::max_value().into() } /// Computes the target [0, T] that a blockhash must land in to be valid /// Returns value in error, if there is an overflow or its negative value pub fn to_u256(self) -> Result<U256, U256> { let size = self.0 >> 24; let mut word = self.0 & 0x007f_ffff; let result = if size <= 3 { word >>= 8 * (3 - size as usize); word.into() } else { U256::from(word) << (8 * (size as usize - 3)) }; let is_negative = word != 0 && (self.0 & 0x0080_0000) != 0; let is_overflow = (word != 0 && size > 34) || (word > 0xff && size > 33) || (word > 0xffff && size > 32); if is_negative || is_overflow { Err(result) } else { Ok(result) } } pub fn from_u256(val: U256) -> Self { let mut size = (val.bits() + 7) / 8; let mut compact = if size <= 3 { (val.low_u64() << (8 * (3 - size))) as u32 } else { let bn = val >> (8 * (size - 3)); bn.low_u32() }; if (compact & 0x0080_0000) != 0 { compact >>= 8; size += 1; } assert_eq!((compact & !0x007f_ffff), 0); assert!(size < 256); Compact(compact | (size << 24) as u32) } pub fn to_f64(self) -> f64 { let mut shift = (self.0 >> 24) & 0xff; let mut diff = f64::from(0x0000_ffffu32) / f64::from(self.0 & 0x00ff_ffffu32); while shift < 29 { diff *= f64::from(256); shift += 1; } while shift > 29 { diff /= f64::from(256); shift -= 1; } diff } } #[cfg(test)] mod tests { use super::*; #[test] fn test_compact_to_u256() { assert_eq!(Compact::new(0x01003456).to_u256(), Ok(0.into())); assert_eq!(Compact::new(0x01123456).to_u256(), Ok(0x12.into())); assert_eq!(Compact::new(0x02008000).to_u256(), Ok(0x80.into())); assert_eq!(Compact::new(0x05009234).to_u256(), Ok(0x92340000u64.into())); // negative -0x12345600 assert!(Compact::new(0x04923456).to_u256().is_err()); assert_eq!(Compact::new(0x04123456).to_u256(), Ok(0x12345600u64.into())); } #[test] fn test_from_u256() { let test1 = U256::from(1000u64); assert_eq!(Compact::new(0x0203e800), Compact::from_u256(test1)); let test2 = U256::from(2).pow(U256::from(256 - 32)) - U256::from(1); assert_eq!(Compact::new(0x1d00ffff), Compact::from_u256(test2)); } #[test] fn test_compact_to_from_u256() { // TODO: it does not work both ways for small values... check why let compact = Compact::new(0x1d00ffff); let compact2 = Compact::from_u256(compact.to_u256().unwrap()); assert_eq!(compact, compact2); let compact = Compact::new(0x05009234); let compact2 = Compact::from_u256(compact.to_u256().unwrap()); assert_eq!(compact, compact2); } #[test] fn difficulty() { fn compare_f64(v1: f64, v2: f64) -> bool { (v1 - v2).abs() < 0.00001 } assert!(compare_f64(Compact::new(0x1b0404cb).to_f64(), 16307.42094)); // tests from original bitcoin client: // https://github.com/bitcoin/bitcoin/blob/1e8f88e071019907785b260477bd359bef6f9a8f/src/test/blockchain_tests.cpp assert!(compare_f64(Compact::new(0x1f111111).to_f64(), 0.000001)); assert!(compare_f64(Compact::new(0x1ef88f6f).to_f64(), 0.000016)); assert!(compare_f64(Compact::new(0x1df88f6f).to_f64(), 0.004023)); assert!(compare_f64(Compact::new(0x1cf88f6f).to_f64(), 1.029916)); assert!(compare_f64( Compact::new(0x12345678).to_f64(), 5913134931067755359633408.0 )); } }
29.333333
121
0.549957
09726186f52adea69219de94a1f5e31760a34def
2,705
use crate::tables::*; use crate::utils::bin::{Encode, Decode}; use crate::intel::{*, alu}; pub(super) fn asm_vmx(item: &mut Instruction, index: &mut usize) -> InstructionResult{ if ![0x78,0x79,0xc7].iter().any(|i| i == &item.opcode[1]){ item.size += 1; } return match (item.opcode[1], item.det.extended[item.size]){ (0x01, ext @ _) => { item.det.istr = match ext{ 0xc1 => "vmcall", 0xc2 => "vmlaunch", 0xc3 => "vmresume", 0xc4 => "vmxoff", _ => "" }.to_string(); item.size += 2; *index += 2; Ok(item.clone()) }, _ => { let modrm = ModRegRm{ byte: item.det.extended[item.size + 1].encode(), index: item.size + 1 }; let re: u8 = ([0,0,0,0,0,modrm.byte[2],modrm.byte[3],modrm.byte[4]] as Byte).decode(); item.modrm = Some(modrm); let (oop, s, x) = match (item.opcode[1], item.det.extended[item.size], re){ (0x38, e @ _, _) => { item.det.istr = match e{ 0x80 => "invept", 0x81 => "invvpid", _ => "" }.to_string(); item.det.x16 = false; (Some(0), Some(X16_32), Some(REG_MEM)) }, (op @ 0x78 | op @ 0x79, _, _) => { item.det.istr = match op{ 0x78 => "vmread", 0x79 => "vmwrite", _ => "" }.to_string(); (Some(0), Some(X16_32), Some(match op{ 0x78 => MEM_REG, 0x79 => REG_MEM, _ => 0 })) }, (0xc7, _, re @ 6 | re @ 7) => { item.det.istr = match (re, item.det.x16, item.prefixes.contains(&0xf3)){ (6, false, false) => "vmptrld", (6, true, false) => "vmclear", (6, false, true) => "vmxon", (7, _, _) => "vmptrst", (_, _, _) => "" }.to_string(); item.det.x16 = false; (Some(1), Some(X16_32), Some(MEM_REG)) }, _ => (None, None, None) }; if [oop,s,x].iter().any(|i| i.is_none()){ return Err(InstructionError::InstructionNotRecognized); } let oop: bool = match oop.unwrap(){ 0 => false, _ => true }; return match alu::asm_mdrm(item, index, Settings{ predefined: true, one_operand: oop, immediate: false, op_indx: 1, s: s, x: x }){ Ok(mut item @ _) => { asm_handle_special(&mut item) }, e @ Err(_) => e }; } }; } pub(self) fn asm_handle_special(item: &mut Instruction) -> InstructionResult{ match item.opcode[1]{ 0x38 => { for x in ["dword","word","byte"].iter(){ if item.det.istr.contains(x){ item.det.istr = item.det.istr.replace(x, "oword"); break; } } }, 0xc7 => { for x in ["dword","word","byte"].iter(){ if item.det.istr.contains(x){ item.det.istr = item.det.istr.replace(x, "qword"); break; } } }, _ => () } Ok(item.clone()) }
24.590909
89
0.521627
8a7e599ebeb63bf21ec425e4bbf37e2c7f5281ad
22,108
use crate::cstore::{self, LoadedMacro}; use crate::encoder; use crate::link_args; use crate::native_libs; use crate::foreign_modules; use crate::schema; use rustc::ty::query::QueryConfig; use rustc::middle::cstore::{CrateStore, DepKind, EncodedMetadata, NativeLibraryKind}; use rustc::middle::exported_symbols::ExportedSymbol; use rustc::middle::stability::DeprecationEntry; use rustc::middle::dependency_format::Linkage; use rustc::hir::def; use rustc::hir; use rustc::session::{CrateDisambiguator, Session}; use rustc::ty::{self, TyCtxt}; use rustc::ty::query::Providers; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX}; use rustc::hir::map::{DefKey, DefPath, DefPathHash}; use rustc::hir::map::definitions::DefPathTable; use rustc::util::nodemap::DefIdMap; use rustc_data_structures::svh::Svh; use smallvec::SmallVec; use std::any::Any; use rustc_data_structures::sync::Lrc; use std::sync::Arc; use syntax::ast; use syntax::attr; use syntax::source_map; use syntax::edition::Edition; use syntax::parse::source_file_to_stream; use syntax::parse::parser::emit_unclosed_delims; use syntax::symbol::Symbol; use syntax_pos::{Span, FileName}; use rustc_data_structures::bit_set::BitSet; macro_rules! provide { (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $($name:ident => $compute:block)*) => { pub fn provide_extern<$lt>(providers: &mut Providers<$lt>) { // HACK(eddyb) `$lt: $lt` forces `$lt` to be early-bound, which // allows the associated type in the return type to be normalized. $(fn $name<$lt: $lt, T: IntoArgs>( $tcx: TyCtxt<$lt>, def_id_arg: T, ) -> <ty::queries::$name<$lt> as QueryConfig<$lt>>::Value { #[allow(unused_variables)] let ($def_id, $other) = def_id_arg.into_args(); assert!(!$def_id.is_local()); let def_path_hash = $tcx.def_path_hash(DefId { krate: $def_id.krate, index: CRATE_DEF_INDEX }); let dep_node = def_path_hash .to_dep_node(rustc::dep_graph::DepKind::CrateMetadata); // The DepNodeIndex of the DepNode::CrateMetadata should be // cached somewhere, so that we can use read_index(). $tcx.dep_graph.read(dep_node); let $cdata = $tcx.crate_data_as_rc_any($def_id.krate); let $cdata = $cdata.downcast_ref::<cstore::CrateMetadata>() .expect("CrateStore created data is not a CrateMetadata"); $compute })* *providers = Providers { $($name,)* ..*providers }; } } } // small trait to work around different signature queries all being defined via // the macro above. trait IntoArgs { fn into_args(self) -> (DefId, DefId); } impl IntoArgs for DefId { fn into_args(self) -> (DefId, DefId) { (self, self) } } impl IntoArgs for CrateNum { fn into_args(self) -> (DefId, DefId) { (self.as_def_id(), self.as_def_id()) } } impl IntoArgs for (CrateNum, DefId) { fn into_args(self) -> (DefId, DefId) { (self.0.as_def_id(), self.1) } } provide! { <'tcx> tcx, def_id, other, cdata, type_of => { cdata.get_type(def_id.index, tcx) } generics_of => { tcx.arena.alloc(cdata.get_generics(def_id.index, tcx.sess)) } predicates_of => { tcx.arena.alloc(cdata.get_predicates(def_id.index, tcx)) } predicates_defined_on => { tcx.arena.alloc(cdata.get_predicates_defined_on(def_id.index, tcx)) } super_predicates_of => { tcx.arena.alloc(cdata.get_super_predicates(def_id.index, tcx)) } trait_def => { tcx.arena.alloc(cdata.get_trait_def(def_id.index, tcx.sess)) } adt_def => { cdata.get_adt_def(def_id.index, tcx) } adt_destructor => { let _ = cdata; tcx.calculate_dtor(def_id, &mut |_,_| Ok(())) } variances_of => { tcx.arena.alloc_from_iter(cdata.get_item_variances(def_id.index)) } associated_item_def_ids => { let mut result = SmallVec::<[_; 8]>::new(); cdata.each_child_of_item(def_id.index, |child| result.push(child.res.def_id()), tcx.sess); tcx.arena.alloc_slice(&result) } associated_item => { cdata.get_associated_item(def_id.index) } impl_trait_ref => { cdata.get_impl_trait(def_id.index, tcx) } impl_polarity => { cdata.get_impl_polarity(def_id.index) } coerce_unsized_info => { cdata.get_coerce_unsized_info(def_id.index).unwrap_or_else(|| { bug!("coerce_unsized_info: `{:?}` is missing its info", def_id); }) } optimized_mir => { tcx.arena.alloc(cdata.get_optimized_mir(tcx, def_id.index)) } promoted_mir => { tcx.arena.alloc(cdata.get_promoted_mir(tcx, def_id.index)) } mir_const_qualif => { (cdata.mir_const_qualif(def_id.index), tcx.arena.alloc(BitSet::new_empty(0))) } fn_sig => { cdata.fn_sig(def_id.index, tcx) } inherent_impls => { cdata.get_inherent_implementations_for_type(tcx, def_id.index) } is_const_fn_raw => { cdata.is_const_fn_raw(def_id.index) } asyncness => { cdata.asyncness(def_id.index) } is_foreign_item => { cdata.is_foreign_item(def_id.index) } static_mutability => { cdata.static_mutability(def_id.index) } def_kind => { cdata.def_kind(def_id.index) } def_span => { cdata.get_span(def_id.index, &tcx.sess) } lookup_stability => { cdata.get_stability(def_id.index).map(|s| tcx.intern_stability(s)) } lookup_deprecation_entry => { cdata.get_deprecation(def_id.index).map(DeprecationEntry::external) } item_attrs => { cdata.get_item_attrs(def_id.index, tcx.sess) } // FIXME(#38501) We've skipped a `read` on the `HirBody` of // a `fn` when encoding, so the dep-tracking wouldn't work. // This is only used by rustdoc anyway, which shouldn't have // incremental recompilation ever enabled. fn_arg_names => { cdata.get_fn_param_names(def_id.index) } rendered_const => { cdata.get_rendered_const(def_id.index) } impl_parent => { cdata.get_parent_impl(def_id.index) } trait_of_item => { cdata.get_trait_of_item(def_id.index) } const_is_rvalue_promotable_to_static => { cdata.const_is_rvalue_promotable_to_static(def_id.index) } is_mir_available => { cdata.is_item_mir_available(def_id.index) } dylib_dependency_formats => { cdata.get_dylib_dependency_formats(tcx) } is_panic_runtime => { cdata.root.panic_runtime } is_compiler_builtins => { cdata.root.compiler_builtins } has_global_allocator => { cdata.root.has_global_allocator } has_panic_handler => { cdata.root.has_panic_handler } is_sanitizer_runtime => { cdata.root.sanitizer_runtime } is_profiler_runtime => { cdata.root.profiler_runtime } panic_strategy => { cdata.root.panic_strategy } extern_crate => { let r = *cdata.extern_crate.lock(); r.map(|c| &*tcx.arena.alloc(c)) } is_no_builtins => { cdata.root.no_builtins } symbol_mangling_version => { cdata.root.symbol_mangling_version } impl_defaultness => { cdata.get_impl_defaultness(def_id.index) } reachable_non_generics => { let reachable_non_generics = tcx .exported_symbols(cdata.cnum) .iter() .filter_map(|&(exported_symbol, export_level)| { if let ExportedSymbol::NonGeneric(def_id) = exported_symbol { return Some((def_id, export_level)) } else { None } }) .collect(); tcx.arena.alloc(reachable_non_generics) } native_libraries => { Lrc::new(cdata.get_native_libraries(tcx.sess)) } foreign_modules => { cdata.get_foreign_modules(tcx) } plugin_registrar_fn => { cdata.root.plugin_registrar_fn.map(|index| { DefId { krate: def_id.krate, index } }) } proc_macro_decls_static => { cdata.root.proc_macro_decls_static.map(|index| { DefId { krate: def_id.krate, index } }) } crate_disambiguator => { cdata.root.disambiguator } crate_hash => { cdata.root.hash } original_crate_name => { cdata.root.name } extra_filename => { cdata.root.extra_filename.clone() } implementations_of_trait => { cdata.get_implementations_for_trait(tcx, Some(other)) } all_trait_implementations => { cdata.get_implementations_for_trait(tcx, None) } visibility => { cdata.get_visibility(def_id.index) } dep_kind => { let r = *cdata.dep_kind.lock(); r } crate_name => { cdata.name } item_children => { let mut result = SmallVec::<[_; 8]>::new(); cdata.each_child_of_item(def_id.index, |child| result.push(child), tcx.sess); tcx.arena.alloc_slice(&result) } defined_lib_features => { cdata.get_lib_features(tcx) } defined_lang_items => { cdata.get_lang_items(tcx) } diagnostic_items => { cdata.get_diagnostic_items(tcx) } missing_lang_items => { cdata.get_missing_lang_items(tcx) } missing_extern_crate_item => { let r = match *cdata.extern_crate.borrow() { Some(extern_crate) if !extern_crate.direct => true, _ => false, }; r } used_crate_source => { Lrc::new(cdata.source.clone()) } exported_symbols => { let mut syms = cdata.exported_symbols(tcx); // When linked into a dylib crates don't export their generic symbols, // so if that's happening then we can't load upstream monomorphizations // from this crate. let formats = tcx.dependency_formats(LOCAL_CRATE); let remove_generics = formats.iter().any(|(_ty, list)| { match list.get(def_id.krate.as_usize() - 1) { Some(Linkage::IncludedFromDylib) | Some(Linkage::Dynamic) => true, _ => false, } }); if remove_generics { syms.retain(|(sym, _threshold)| { match sym { ExportedSymbol::Generic(..) => false, _ => return true, } }); } Arc::new(syms) } } pub fn provide(providers: &mut Providers<'_>) { // FIXME(#44234) - almost all of these queries have no sub-queries and // therefore no actual inputs, they're just reading tables calculated in // resolve! Does this work? Unsure! That's what the issue is about *providers = Providers { is_dllimport_foreign_item: |tcx, id| { tcx.native_library_kind(id) == Some(NativeLibraryKind::NativeUnknown) }, is_statically_included_foreign_item: |tcx, id| { match tcx.native_library_kind(id) { Some(NativeLibraryKind::NativeStatic) | Some(NativeLibraryKind::NativeStaticNobundle) => true, _ => false, } }, native_library_kind: |tcx, id| { tcx.native_libraries(id.krate) .iter() .filter(|lib| native_libs::relevant_lib(&tcx.sess, lib)) .find(|lib| { let fm_id = match lib.foreign_module { Some(id) => id, None => return false, }; tcx.foreign_modules(id.krate) .iter() .find(|m| m.def_id == fm_id) .expect("failed to find foreign module") .foreign_items .contains(&id) }) .map(|l| l.kind) }, native_libraries: |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); Lrc::new(native_libs::collect(tcx)) }, foreign_modules: |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); &tcx.arena.alloc(foreign_modules::collect(tcx))[..] }, link_args: |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); Lrc::new(link_args::collect(tcx)) }, // Returns a map from a sufficiently visible external item (i.e., an // external item that is visible from at least one local module) to a // sufficiently visible parent (considering modules that re-export the // external item to be parents). visible_parent_map: |tcx, cnum| { use std::collections::vec_deque::VecDeque; use std::collections::hash_map::Entry; assert_eq!(cnum, LOCAL_CRATE); let mut visible_parent_map: DefIdMap<DefId> = Default::default(); // Issue 46112: We want the map to prefer the shortest // paths when reporting the path to an item. Therefore we // build up the map via a breadth-first search (BFS), // which naturally yields minimal-length paths. // // Note that it needs to be a BFS over the whole forest of // crates, not just each individual crate; otherwise you // only get paths that are locally minimal with respect to // whatever crate we happened to encounter first in this // traversal, but not globally minimal across all crates. let bfs_queue = &mut VecDeque::new(); // Preferring shortest paths alone does not guarantee a // deterministic result; so sort by crate num to avoid // hashtable iteration non-determinism. This only makes // things as deterministic as crate-nums assignment is, // which is to say, its not deterministic in general. But // we believe that libstd is consistently assigned crate // num 1, so it should be enough to resolve #46112. let mut crates: Vec<CrateNum> = (*tcx.crates()).to_owned(); crates.sort(); for &cnum in crates.iter() { // Ignore crates without a corresponding local `extern crate` item. if tcx.missing_extern_crate_item(cnum) { continue } bfs_queue.push_back(DefId { krate: cnum, index: CRATE_DEF_INDEX }); } // (restrict scope of mutable-borrow of `visible_parent_map`) { let visible_parent_map = &mut visible_parent_map; let mut add_child = |bfs_queue: &mut VecDeque<_>, child: &def::Export<hir::HirId>, parent: DefId| { if child.vis != ty::Visibility::Public { return; } if let Some(child) = child.res.opt_def_id() { match visible_parent_map.entry(child) { Entry::Occupied(mut entry) => { // If `child` is defined in crate `cnum`, ensure // that it is mapped to a parent in `cnum`. if child.krate == cnum && entry.get().krate != cnum { entry.insert(parent); } } Entry::Vacant(entry) => { entry.insert(parent); bfs_queue.push_back(child); } } } }; while let Some(def) = bfs_queue.pop_front() { for child in tcx.item_children(def).iter() { add_child(bfs_queue, child, def); } } } tcx.arena.alloc(visible_parent_map) }, dependency_formats: |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); Lrc::new(crate::dependency_format::calculate(tcx)) }, ..*providers }; } impl cstore::CStore { pub fn export_macros_untracked(&self, cnum: CrateNum) { let data = self.get_crate_data(cnum); let mut dep_kind = data.dep_kind.lock(); if *dep_kind == DepKind::UnexportedMacrosOnly { *dep_kind = DepKind::MacrosOnly; } } pub fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind { let data = self.get_crate_data(cnum); let r = *data.dep_kind.lock(); r } pub fn crate_edition_untracked(&self, cnum: CrateNum) -> Edition { self.get_crate_data(cnum).root.edition } pub fn struct_field_names_untracked(&self, def: DefId) -> Vec<ast::Name> { self.get_crate_data(def.krate).get_struct_field_names(def.index) } pub fn ctor_kind_untracked(&self, def: DefId) -> def::CtorKind { self.get_crate_data(def.krate).get_ctor_kind(def.index) } pub fn item_attrs_untracked(&self, def: DefId, sess: &Session) -> Lrc<[ast::Attribute]> { self.get_crate_data(def.krate).get_item_attrs(def.index, sess) } pub fn item_children_untracked( &self, def_id: DefId, sess: &Session ) -> Vec<def::Export<hir::HirId>> { let mut result = vec![]; self.get_crate_data(def_id.krate) .each_child_of_item(def_id.index, |child| result.push(child), sess); result } pub fn load_macro_untracked(&self, id: DefId, sess: &Session) -> LoadedMacro { let data = self.get_crate_data(id.krate); if data.is_proc_macro_crate() { return LoadedMacro::ProcMacro(data.load_proc_macro(id.index, sess)); } let def = data.get_macro(id.index); let macro_full_name = data.def_path(id.index) .to_string_friendly(|_| data.imported_name); let source_name = FileName::Macros(macro_full_name); let source_file = sess.parse_sess.source_map().new_source_file(source_name, def.body); let local_span = Span::with_root_ctxt(source_file.start_pos, source_file.end_pos); let (body, mut errors) = source_file_to_stream(&sess.parse_sess, source_file, None); emit_unclosed_delims(&mut errors, &sess.diagnostic()); // Mark the attrs as used let attrs = data.get_item_attrs(id.index, sess); for attr in attrs.iter() { attr::mark_used(attr); } let name = data.def_key(id.index).disambiguated_data.data .get_opt_name().expect("no name in load_macro"); sess.imported_macro_spans.borrow_mut() .insert(local_span, (name.to_string(), data.get_span(id.index, sess))); LoadedMacro::MacroDef(ast::Item { // FIXME: cross-crate hygiene ident: ast::Ident::with_dummy_span(name.as_symbol()), id: ast::DUMMY_NODE_ID, span: local_span, attrs: attrs.iter().cloned().collect(), kind: ast::ItemKind::MacroDef(ast::MacroDef { tokens: body.into(), legacy: def.legacy, }), vis: source_map::respan(local_span.shrink_to_lo(), ast::VisibilityKind::Inherited), tokens: None, }) } pub fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssocItem { self.get_crate_data(def.krate).get_associated_item(def.index) } } impl CrateStore for cstore::CStore { fn crate_data_as_rc_any(&self, krate: CrateNum) -> Lrc<dyn Any> { self.get_crate_data(krate) } fn item_generics_cloned_untracked(&self, def: DefId, sess: &Session) -> ty::Generics { self.get_crate_data(def.krate).get_generics(def.index, sess) } fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol { self.get_crate_data(cnum).name } fn crate_is_private_dep_untracked(&self, cnum: CrateNum) -> bool { self.get_crate_data(cnum).private_dep } fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> CrateDisambiguator { self.get_crate_data(cnum).root.disambiguator } fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh { self.get_crate_data(cnum).root.hash } /// Returns the `DefKey` for a given `DefId`. This indicates the /// parent `DefId` as well as some idea of what kind of data the /// `DefId` refers to. fn def_key(&self, def: DefId) -> DefKey { // Note: loading the def-key (or def-path) for a def-id is not // a *read* of its metadata. This is because the def-id is // really just an interned shorthand for a def-path, which is the // canonical name for an item. // // self.dep_graph.read(DepNode::MetaData(def)); self.get_crate_data(def.krate).def_key(def.index) } fn def_path(&self, def: DefId) -> DefPath { // See `Note` above in `def_key()` for why this read is // commented out: // // self.dep_graph.read(DepNode::MetaData(def)); self.get_crate_data(def.krate).def_path(def.index) } fn def_path_hash(&self, def: DefId) -> DefPathHash { self.get_crate_data(def.krate).def_path_hash(def.index) } fn def_path_table(&self, cnum: CrateNum) -> Lrc<DefPathTable> { self.get_crate_data(cnum).def_path_table.clone() } fn crates_untracked(&self) -> Vec<CrateNum> { let mut result = vec![]; self.iter_crate_data(|cnum, _| result.push(cnum)); result } fn extern_mod_stmt_cnum_untracked(&self, emod_id: ast::NodeId) -> Option<CrateNum> { self.do_extern_mod_stmt_cnum(emod_id) } fn postorder_cnums_untracked(&self) -> Vec<CrateNum> { self.do_postorder_cnums_untracked() } fn encode_metadata(&self, tcx: TyCtxt<'_>) -> EncodedMetadata { encoder::encode_metadata(tcx) } fn metadata_encoding_version(&self) -> &[u8] { schema::METADATA_HEADER } }
38.249135
95
0.593586
76b264b1ed9cb179ec8631517443f07d1f17c318
24,513
use crate::parser::hir::syntax_shape::{ expand_syntax, expression::expand_file_path, parse_single_node, BarePathShape, BarePatternShape, ExpandContext, UnitShape, UnitSyntax, }; use crate::parser::{ hir, hir::{Expression, RawNumber, TokensIterator}, parse::flag::{Flag, FlagKind}, DelimitedNode, Delimiter, FlatShape, TokenNode, Unit, UnspannedToken, }; use crate::prelude::*; use nu_source::Spanned; use std::ops::Deref; #[derive(Debug, Clone)] pub enum UnspannedAtomicToken<'tokens> { Eof { span: Span, }, Error { error: Spanned<ShellError>, }, Number { number: RawNumber, }, Size { number: RawNumber, unit: Spanned<Unit>, }, String { body: Span, }, ItVariable { name: Span, }, Variable { name: Span, }, ExternalCommand { command: Span, }, ExternalWord { text: Span, }, GlobPattern { pattern: Span, }, Word { text: Span, }, #[allow(unused)] Dot { text: Span, }, SquareDelimited { spans: (Span, Span), nodes: &'tokens Vec<TokenNode>, }, ShorthandFlag { name: Span, }, Operator { text: Span, }, Whitespace { text: Span, }, } impl<'tokens> UnspannedAtomicToken<'tokens> { pub fn into_atomic_token(self, span: impl Into<Span>) -> AtomicToken<'tokens> { AtomicToken { unspanned: self, span: span.into(), } } } impl<'tokens> ShellTypeName for UnspannedAtomicToken<'tokens> { fn type_name(&self) -> &'static str { match &self { UnspannedAtomicToken::Eof { .. } => "eof", UnspannedAtomicToken::Error { .. } => "error", UnspannedAtomicToken::Operator { .. } => "operator", UnspannedAtomicToken::ShorthandFlag { .. } => "shorthand flag", UnspannedAtomicToken::Whitespace { .. } => "whitespace", UnspannedAtomicToken::Dot { .. } => "dot", UnspannedAtomicToken::Number { .. } => "number", UnspannedAtomicToken::Size { .. } => "size", UnspannedAtomicToken::String { .. } => "string", UnspannedAtomicToken::ItVariable { .. } => "$it", UnspannedAtomicToken::Variable { .. } => "variable", UnspannedAtomicToken::ExternalCommand { .. } => "external command", UnspannedAtomicToken::ExternalWord { .. } => "external word", UnspannedAtomicToken::GlobPattern { .. } => "file pattern", UnspannedAtomicToken::Word { .. } => "word", UnspannedAtomicToken::SquareDelimited { .. } => "array literal", } } } #[derive(Debug, Clone)] pub struct AtomicToken<'tokens> { pub unspanned: UnspannedAtomicToken<'tokens>, pub span: Span, } impl<'tokens> Deref for AtomicToken<'tokens> { type Target = UnspannedAtomicToken<'tokens>; fn deref(&self) -> &UnspannedAtomicToken<'tokens> { &self.unspanned } } impl<'tokens> AtomicToken<'tokens> { pub fn into_hir( &self, context: &ExpandContext, expected: &'static str, ) -> Result<hir::Expression, ParseError> { Ok(match &self.unspanned { UnspannedAtomicToken::Eof { .. } => { return Err(ParseError::mismatch( expected, "eof atomic token".spanned(self.span), )) } UnspannedAtomicToken::Error { .. } => { return Err(ParseError::mismatch( expected, "eof atomic token".spanned(self.span), )) } UnspannedAtomicToken::Operator { .. } => { return Err(ParseError::mismatch( expected, "operator".spanned(self.span), )) } UnspannedAtomicToken::ShorthandFlag { .. } => { return Err(ParseError::mismatch( expected, "shorthand flag".spanned(self.span), )) } UnspannedAtomicToken::Whitespace { .. } => { return Err(ParseError::mismatch( expected, "whitespace".spanned(self.span), )) } UnspannedAtomicToken::Dot { .. } => { return Err(ParseError::mismatch(expected, "dot".spanned(self.span))) } UnspannedAtomicToken::Number { number } => { Expression::number(number.to_number(context.source), self.span) } UnspannedAtomicToken::Size { number, unit } => { Expression::size(number.to_number(context.source), **unit, self.span) } UnspannedAtomicToken::String { body } => Expression::string(*body, self.span), UnspannedAtomicToken::ItVariable { name } => Expression::it_variable(*name, self.span), UnspannedAtomicToken::Variable { name } => Expression::variable(*name, self.span), UnspannedAtomicToken::ExternalCommand { command } => { Expression::external_command(*command, self.span) } UnspannedAtomicToken::ExternalWord { text } => Expression::string(*text, self.span), UnspannedAtomicToken::GlobPattern { pattern } => Expression::pattern( expand_file_path(pattern.slice(context.source), context).to_string_lossy(), self.span, ), UnspannedAtomicToken::Word { text } => Expression::string(*text, *text), UnspannedAtomicToken::SquareDelimited { .. } => unimplemented!("into_hir"), }) } #[cfg(not(coloring_in_tokens))] pub fn spanned_type_name(&self) -> Spanned<&'static str> { match &self.unspanned { UnspannedAtomicToken::Eof { .. } => "eof", UnspannedAtomicToken::Error { .. } => "error", UnspannedAtomicToken::Operator { .. } => "operator", UnspannedAtomicToken::ShorthandFlag { .. } => "shorthand flag", UnspannedAtomicToken::Whitespace { .. } => "whitespace", UnspannedAtomicToken::Dot { .. } => "dot", UnspannedAtomicToken::Number { .. } => "number", UnspannedAtomicToken::Size { .. } => "size", UnspannedAtomicToken::String { .. } => "string", UnspannedAtomicToken::ItVariable { .. } => "$it", UnspannedAtomicToken::Variable { .. } => "variable", UnspannedAtomicToken::ExternalCommand { .. } => "external command", UnspannedAtomicToken::ExternalWord { .. } => "external word", UnspannedAtomicToken::GlobPattern { .. } => "file pattern", UnspannedAtomicToken::Word { .. } => "word", UnspannedAtomicToken::SquareDelimited { .. } => "array literal", } .spanned(self.span) } pub(crate) fn color_tokens(&self, shapes: &mut Vec<Spanned<FlatShape>>) { match &self.unspanned { UnspannedAtomicToken::Eof { .. } => {} UnspannedAtomicToken::Error { .. } => { return shapes.push(FlatShape::Error.spanned(self.span)) } UnspannedAtomicToken::Operator { .. } => { return shapes.push(FlatShape::Operator.spanned(self.span)); } UnspannedAtomicToken::ShorthandFlag { .. } => { return shapes.push(FlatShape::ShorthandFlag.spanned(self.span)); } UnspannedAtomicToken::Whitespace { .. } => { return shapes.push(FlatShape::Whitespace.spanned(self.span)); } UnspannedAtomicToken::Number { number: RawNumber::Decimal(_), } => { return shapes.push(FlatShape::Decimal.spanned(self.span)); } UnspannedAtomicToken::Number { number: RawNumber::Int(_), } => { return shapes.push(FlatShape::Int.spanned(self.span)); } UnspannedAtomicToken::Size { number, unit } => { return shapes.push( FlatShape::Size { number: number.span(), unit: unit.span, } .spanned(self.span), ); } UnspannedAtomicToken::String { .. } => { return shapes.push(FlatShape::String.spanned(self.span)) } UnspannedAtomicToken::ItVariable { .. } => { return shapes.push(FlatShape::ItVariable.spanned(self.span)) } UnspannedAtomicToken::Variable { .. } => { return shapes.push(FlatShape::Variable.spanned(self.span)) } UnspannedAtomicToken::ExternalCommand { .. } => { return shapes.push(FlatShape::ExternalCommand.spanned(self.span)); } UnspannedAtomicToken::ExternalWord { .. } => { return shapes.push(FlatShape::ExternalWord.spanned(self.span)) } UnspannedAtomicToken::GlobPattern { .. } => { return shapes.push(FlatShape::GlobPattern.spanned(self.span)) } UnspannedAtomicToken::Word { .. } => { return shapes.push(FlatShape::Word.spanned(self.span)) } _ => return shapes.push(FlatShape::Error.spanned(self.span)), } } } impl PrettyDebugWithSource for AtomicToken<'_> { fn pretty_debug(&self, source: &str) -> DebugDocBuilder { fn atom(value: DebugDocBuilder) -> DebugDocBuilder { b::delimit("(", b::kind("atom") + b::space() + value.group(), ")").group() } fn atom_kind(kind: impl std::fmt::Display, value: DebugDocBuilder) -> DebugDocBuilder { b::delimit( "(", (b::kind("atom") + b::delimit("[", b::kind(kind), "]")).group() + b::space() + value.group(), ")", ) .group() } atom(match &self.unspanned { UnspannedAtomicToken::Eof { .. } => b::description("eof"), UnspannedAtomicToken::Error { .. } => b::error("error"), UnspannedAtomicToken::Number { number } => number.pretty_debug(source), UnspannedAtomicToken::Size { number, unit } => { number.pretty_debug(source) + b::keyword(unit.span.slice(source)) } UnspannedAtomicToken::String { body } => b::primitive(body.slice(source)), UnspannedAtomicToken::ItVariable { .. } | UnspannedAtomicToken::Variable { .. } => { b::keyword(self.span.slice(source)) } UnspannedAtomicToken::ExternalCommand { .. } => b::primitive(self.span.slice(source)), UnspannedAtomicToken::ExternalWord { text } => { atom_kind("external word", b::primitive(text.slice(source))) } UnspannedAtomicToken::GlobPattern { pattern } => { atom_kind("pattern", b::primitive(pattern.slice(source))) } UnspannedAtomicToken::Word { text } => { atom_kind("word", b::primitive(text.slice(source))) } UnspannedAtomicToken::SquareDelimited { nodes, .. } => b::delimit( "[", b::intersperse_with_source(nodes.iter(), b::space(), source), "]", ), UnspannedAtomicToken::ShorthandFlag { name } => { atom_kind("shorthand flag", b::key(name.slice(source))) } UnspannedAtomicToken::Dot { .. } => atom(b::kind("dot")), UnspannedAtomicToken::Operator { text } => { atom_kind("operator", b::keyword(text.slice(source))) } UnspannedAtomicToken::Whitespace { text } => atom_kind( "whitespace", b::description(format!("{:?}", text.slice(source))), ), }) } } #[derive(Debug)] pub enum WhitespaceHandling { #[allow(unused)] AllowWhitespace, RejectWhitespace, } #[derive(Debug)] pub struct ExpansionRule { pub(crate) allow_external_command: bool, pub(crate) allow_external_word: bool, pub(crate) allow_operator: bool, pub(crate) allow_eof: bool, pub(crate) treat_size_as_word: bool, pub(crate) separate_members: bool, pub(crate) commit_errors: bool, pub(crate) whitespace: WhitespaceHandling, } impl ExpansionRule { pub fn new() -> ExpansionRule { ExpansionRule { allow_external_command: false, allow_external_word: false, allow_operator: false, allow_eof: false, treat_size_as_word: false, separate_members: false, commit_errors: false, whitespace: WhitespaceHandling::RejectWhitespace, } } /// The intent of permissive mode is to return an atomic token for every possible /// input token. This is important for error-correcting parsing, such as the /// syntax highlighter. pub fn permissive() -> ExpansionRule { ExpansionRule { allow_external_command: true, allow_external_word: true, allow_operator: true, allow_eof: true, separate_members: false, treat_size_as_word: false, commit_errors: true, whitespace: WhitespaceHandling::AllowWhitespace, } } #[allow(unused)] pub fn allow_external_command(mut self) -> ExpansionRule { self.allow_external_command = true; self } #[allow(unused)] pub fn allow_operator(mut self) -> ExpansionRule { self.allow_operator = true; self } #[allow(unused)] pub fn no_operator(mut self) -> ExpansionRule { self.allow_operator = false; self } #[allow(unused)] pub fn no_external_command(mut self) -> ExpansionRule { self.allow_external_command = false; self } #[allow(unused)] pub fn allow_external_word(mut self) -> ExpansionRule { self.allow_external_word = true; self } #[allow(unused)] pub fn no_external_word(mut self) -> ExpansionRule { self.allow_external_word = false; self } #[allow(unused)] pub fn treat_size_as_word(mut self) -> ExpansionRule { self.treat_size_as_word = true; self } #[allow(unused)] pub fn separate_members(mut self) -> ExpansionRule { self.separate_members = true; self } #[allow(unused)] pub fn no_separate_members(mut self) -> ExpansionRule { self.separate_members = false; self } #[allow(unused)] pub fn commit_errors(mut self) -> ExpansionRule { self.commit_errors = true; self } #[allow(unused)] pub fn allow_whitespace(mut self) -> ExpansionRule { self.whitespace = WhitespaceHandling::AllowWhitespace; self } #[allow(unused)] pub fn reject_whitespace(mut self) -> ExpansionRule { self.whitespace = WhitespaceHandling::RejectWhitespace; self } } pub fn expand_atom<'me, 'content>( token_nodes: &'me mut TokensIterator<'content>, expected: &'static str, context: &ExpandContext, rule: ExpansionRule, ) -> Result<AtomicToken<'content>, ParseError> { token_nodes.with_expand_tracer(|_, tracer| tracer.start("atom")); let result = expand_atom_inner(token_nodes, expected, context, rule); token_nodes.with_expand_tracer(|_, tracer| match &result { Ok(result) => { tracer.add_result(result.clone()); tracer.success(); } Err(err) => tracer.failed(err), }); result } /// If the caller of expand_atom throws away the returned atomic token returned, it /// must use a checkpoint to roll it back. fn expand_atom_inner<'me, 'content>( token_nodes: &'me mut TokensIterator<'content>, expected: &'static str, context: &ExpandContext, rule: ExpansionRule, ) -> Result<AtomicToken<'content>, ParseError> { if token_nodes.at_end() { match rule.allow_eof { true => { return Ok(UnspannedAtomicToken::Eof { span: Span::unknown(), } .into_atomic_token(Span::unknown())) } false => return Err(ParseError::unexpected_eof("anything", Span::unknown())), } } // First, we'll need to handle the situation where more than one token corresponds // to a single atomic token // If treat_size_as_word, don't try to parse the head of the token stream // as a size. match rule.treat_size_as_word { true => {} false => match expand_syntax(&UnitShape, token_nodes, context) { // If the head of the stream isn't a valid unit, we'll try to parse // it again next as a word Err(_) => {} // But if it was a valid unit, we're done here Ok(UnitSyntax { unit: (number, unit), span, }) => return Ok(UnspannedAtomicToken::Size { number, unit }.into_atomic_token(span)), }, } match rule.separate_members { false => {} true => { let mut next = token_nodes.peek_any(); match next.node { Some(token) if token.is_word() => { next.commit(); return Ok(UnspannedAtomicToken::Word { text: token.span() } .into_atomic_token(token.span())); } Some(token) if token.is_int() => { next.commit(); return Ok(UnspannedAtomicToken::Number { number: RawNumber::Int(token.span()), } .into_atomic_token(token.span())); } _ => {} } } } // Try to parse the head of the stream as a bare path. A bare path includes // words as well as `.`s, connected together without whitespace. match expand_syntax(&BarePathShape, token_nodes, context) { // If we didn't find a bare path Err(_) => {} Ok(span) => { let next = token_nodes.peek_any(); match next.node { Some(token) if token.is_pattern() => { // if the very next token is a pattern, we're looking at a glob, not a // word, and we should try to parse it as a glob next } _ => return Ok(UnspannedAtomicToken::Word { text: span }.into_atomic_token(span)), } } } // Try to parse the head of the stream as a pattern. A pattern includes // words, words with `*` as well as `.`s, connected together without whitespace. match expand_syntax(&BarePatternShape, token_nodes, context) { // If we didn't find a bare path Err(_) => {} Ok(span) => { return Ok(UnspannedAtomicToken::GlobPattern { pattern: span }.into_atomic_token(span)) } } // The next token corresponds to at most one atomic token // We need to `peek` because `parse_single_node` doesn't cover all of the // cases that `expand_atom` covers. We should probably collapse the two // if possible. let peeked = token_nodes.peek_any().not_eof(expected)?; match peeked.node { TokenNode::Token(_) => { // handle this next } TokenNode::Error(error) => { peeked.commit(); return Ok(UnspannedAtomicToken::Error { error: error.clone(), } .into_atomic_token(error.span)); } // [ ... ] TokenNode::Delimited(Spanned { item: DelimitedNode { delimiter: Delimiter::Square, spans, children, }, span, }) => { peeked.commit(); let span = *span; return Ok(UnspannedAtomicToken::SquareDelimited { nodes: children, spans: *spans, } .into_atomic_token(span)); } TokenNode::Flag(Flag { kind: FlagKind::Shorthand, name, span, }) => { peeked.commit(); return Ok(UnspannedAtomicToken::ShorthandFlag { name: *name }.into_atomic_token(*span)); } TokenNode::Flag(Flag { kind: FlagKind::Longhand, name, span, }) => { peeked.commit(); return Ok(UnspannedAtomicToken::ShorthandFlag { name: *name }.into_atomic_token(*span)); } // If we see whitespace, process the whitespace according to the whitespace // handling rules TokenNode::Whitespace(span) => match rule.whitespace { // if whitespace is allowed, return a whitespace token WhitespaceHandling::AllowWhitespace => { peeked.commit(); return Ok( UnspannedAtomicToken::Whitespace { text: *span }.into_atomic_token(*span) ); } // if whitespace is disallowed, return an error WhitespaceHandling::RejectWhitespace => { return Err(ParseError::mismatch(expected, "whitespace".spanned(*span))) } }, other => { let span = peeked.node.span(); peeked.commit(); return Ok(UnspannedAtomicToken::Error { error: ShellError::type_error("token", other.type_name().spanned(span)) .spanned(span), } .into_atomic_token(span)); } } parse_single_node(token_nodes, expected, |token, token_span, err| { Ok(match token { // First, the error cases. Each error case corresponds to a expansion rule // flag that can be used to allow the case // rule.allow_operator UnspannedToken::Operator(_) if !rule.allow_operator => return Err(err.error()), // rule.allow_external_command UnspannedToken::ExternalCommand(_) if !rule.allow_external_command => { return Err(ParseError::mismatch( expected, token.type_name().spanned(token_span), )) } // rule.allow_external_word UnspannedToken::ExternalWord if !rule.allow_external_word => { return Err(ParseError::mismatch( expected, "external word".spanned(token_span), )) } UnspannedToken::Number(number) => { UnspannedAtomicToken::Number { number }.into_atomic_token(token_span) } UnspannedToken::Operator(_) => { UnspannedAtomicToken::Operator { text: token_span }.into_atomic_token(token_span) } UnspannedToken::String(body) => { UnspannedAtomicToken::String { body }.into_atomic_token(token_span) } UnspannedToken::Variable(name) if name.slice(context.source) == "it" => { UnspannedAtomicToken::ItVariable { name }.into_atomic_token(token_span) } UnspannedToken::Variable(name) => { UnspannedAtomicToken::Variable { name }.into_atomic_token(token_span) } UnspannedToken::ExternalCommand(command) => { UnspannedAtomicToken::ExternalCommand { command }.into_atomic_token(token_span) } UnspannedToken::ExternalWord => UnspannedAtomicToken::ExternalWord { text: token_span } .into_atomic_token(token_span), UnspannedToken::GlobPattern => UnspannedAtomicToken::GlobPattern { pattern: token_span, } .into_atomic_token(token_span), UnspannedToken::Bare => { UnspannedAtomicToken::Word { text: token_span }.into_atomic_token(token_span) } }) }) }
35.270504
100
0.542977
bbf43145bd030f53746ad28212cb7ff00027f004
228
#[cfg_attr(feature = "stable", proc_macro_hack::proc_macro_hack)] pub use include_flate_codegen::deflate_file; #[cfg_attr(feature = "stable", proc_macro_hack::proc_macro_hack)] pub use include_flate_codegen::deflate_utf8_file;
38
65
0.811404
eb2ab37e36223cbc4929f56627b8fa061197632b
190
mod prelude; mod input; mod interruptible; mod output; pub use input::*; pub use interruptible::*; pub use output::*; pub use prelude::IntoActionStream; pub use prelude::IntoOutputStream;
15.833333
34
0.752632
9c6c8ec4e21669cb7616d43d2b6d3ee25de74b32
5,802
#![cfg(not(feature = "no_module"))] use rhai::{module_resolvers, Engine, EvalAltResult, Module, Scope, INT}; #[test] fn test_module() { let mut module = Module::new(); module.set_var("answer", 42 as INT); assert!(module.contains_var("answer")); assert_eq!(module.get_var_value::<INT>("answer").unwrap(), 42); } #[test] fn test_module_sub_module() -> Result<(), Box<EvalAltResult>> { let mut module = Module::new(); let mut sub_module = Module::new(); let mut sub_module2 = Module::new(); sub_module2.set_var("answer", 41 as INT); let hash_inc = sub_module2.set_fn_1("inc", |x: INT| Ok(x + 1)); sub_module.set_sub_module("universe", sub_module2); module.set_sub_module("life", sub_module); assert!(module.contains_sub_module("life")); let m = module.get_sub_module("life").unwrap(); assert!(m.contains_sub_module("universe")); let m2 = m.get_sub_module("universe").unwrap(); assert!(m2.contains_var("answer")); assert!(m2.contains_fn(hash_inc)); assert_eq!(m2.get_var_value::<INT>("answer").unwrap(), 41); let engine = Engine::new(); let mut scope = Scope::new(); scope.push_module("question", module); assert_eq!( engine.eval_expression_with_scope::<INT>( &mut scope, "question::life::universe::answer + 1" )?, 42 ); assert_eq!( engine.eval_expression_with_scope::<INT>( &mut scope, "question::life::universe::inc(question::life::universe::answer)" )?, 42 ); Ok(()) } #[test] fn test_module_resolver() -> Result<(), Box<EvalAltResult>> { let mut resolver = module_resolvers::StaticModuleResolver::new(); let mut module = Module::new(); module.set_var("answer", 42 as INT); resolver.insert("hello".to_string(), module); let mut engine = Engine::new(); engine.set_module_resolver(Some(resolver)); assert_eq!( engine.eval::<INT>( r#" import "hello" as h1; import "hello" as h2; h2::answer "# )?, 42 ); engine.set_max_modules(5); assert!(matches!( *engine .eval::<INT>( r#" let x = 0; for x in range(0, 10) { import "hello" as h; x += h::answer; } x "# ) .expect_err("should error"), EvalAltResult::ErrorTooManyModules(_) )); #[cfg(not(feature = "no_function"))] assert!(matches!( *engine .eval::<INT>( r#" let x = 0; fn foo() { import "hello" as h; x += h::answer; } for x in range(0, 10) { foo(); } x "# ) .expect_err("should error"), EvalAltResult::ErrorInFunctionCall(fn_name, _, _) if fn_name == "foo" )); engine.set_max_modules(0); #[cfg(not(feature = "no_function"))] engine.eval::<()>( r#" fn foo() { import "hello" as h; } for x in range(0, 10) { foo(); } "#, )?; Ok(()) } #[test] #[cfg(not(feature = "no_function"))] fn test_module_from_ast() -> Result<(), Box<EvalAltResult>> { let mut engine = Engine::new(); let mut resolver = rhai::module_resolvers::StaticModuleResolver::new(); let mut sub_module = Module::new(); sub_module.set_var("foo", true); resolver.insert("another module".to_string(), sub_module); engine.set_module_resolver(Some(resolver)); let ast = engine.compile( r#" // Functions become module functions fn calc(x) { x + 1 } fn add_len(x, y) { x + len(y) } private fn hidden() { throw "you shouldn't see me!"; } // Imported modules become sub-modules import "another module" as extra; // Variables defined at global level become module variables const x = 123; let foo = 41; let hello; // Final variable values become constant module variable values foo = calc(foo); hello = "hello, " + foo + " worlds!"; export x as abc, foo, hello, extra as foobar; "#, )?; let module = Module::eval_ast_as_new(Scope::new(), &ast, &engine)?; let mut scope = Scope::new(); scope.push_module("testing", module); assert_eq!( engine.eval_expression_with_scope::<INT>(&mut scope, "testing::abc")?, 123 ); assert_eq!( engine.eval_expression_with_scope::<INT>(&mut scope, "testing::foo")?, 42 ); assert!(engine.eval_expression_with_scope::<bool>(&mut scope, "testing::foobar::foo")?); assert_eq!( engine.eval_expression_with_scope::<String>(&mut scope, "testing::hello")?, "hello, 42 worlds!" ); assert_eq!( engine.eval_expression_with_scope::<INT>(&mut scope, "testing::calc(999)")?, 1000 ); assert_eq!( engine.eval_expression_with_scope::<INT>( &mut scope, "testing::add_len(testing::foo, testing::hello)" )?, 59 ); assert!(matches!( *engine .eval_expression_with_scope::<()>(&mut scope, "testing::hidden()") .expect_err("should error"), EvalAltResult::ErrorFunctionNotFound(fn_name, _) if fn_name == "hidden" )); Ok(()) }
25.559471
92
0.518097
9bca682739194e8a6c5915584538d511fd7e8e48
123
pub mod user; pub mod user_token; pub mod response; pub mod common; pub mod app_user; pub mod chat_room; pub mod messages;
15.375
19
0.772358
5679756e9e174469bbfc319d5bcad84c6afbdefa
18,260
// Copyright 2020 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Grin server implementation, glues the different parts of the system (mostly //! the peer-to-peer server, the blockchain and the transaction pool) and acts //! as a facade. use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::sync::{mpsc, Arc}; use std::{convert::TryInto, fs}; use std::{ thread::{self, JoinHandle}, time::{self, Duration}, }; use fs2::FileExt; use walkdir::WalkDir; use crate::api; use crate::api::TLSConfig; use crate::chain::{self, SyncState, SyncStatus}; use crate::common::adapters::{ ChainToPoolAndNetAdapter, NetToChainAdapter, PoolToChainAdapter, PoolToNetAdapter, }; use crate::common::hooks::{init_chain_hooks, init_net_hooks}; use crate::common::stats::{ ChainStats, DiffBlock, DiffStats, PeerStats, ServerStateInfo, ServerStats, TxStats, }; use crate::common::types::{Error, ServerConfig, StratumServerConfig}; use crate::core::core::hash::Hashed; use crate::core::core::verifier_cache::LruVerifierCache; use crate::core::ser::ProtocolVersion; use crate::core::{consensus, genesis, global, pow}; use crate::grin::{dandelion_monitor, seed, sync}; use crate::mining::stratumserver; use crate::mining::test_miner::Miner; use crate::p2p; use crate::p2p::types::{Capabilities, PeerAddr}; use crate::pool; use crate::util::file::get_first_line; use crate::util::{RwLock, StopState}; use grin_util::logger::LogEntry; /// Arcified thread-safe TransactionPool with type parameters used by server components pub type ServerTxPool = Arc<RwLock<pool::TransactionPool<PoolToChainAdapter, PoolToNetAdapter, LruVerifierCache>>>; /// Arcified thread-safe LruVerifierCache pub type ServerVerifierCache = Arc<RwLock<LruVerifierCache>>; /// Grin server holding internal structures. pub struct Server { /// server config pub config: ServerConfig, /// handle to our network server pub p2p: Arc<p2p::Server>, /// data store access pub chain: Arc<chain::Chain>, /// in-memory transaction pool pub tx_pool: ServerTxPool, /// Shared cache for verification results when /// verifying rangeproof and kernel signatures. verifier_cache: ServerVerifierCache, /// Whether we're currently syncing pub sync_state: Arc<SyncState>, /// To be passed around to collect stats and info state_info: ServerStateInfo, /// Stop flag pub stop_state: Arc<StopState>, /// Maintain a lock_file so we do not run multiple Grin nodes from same dir. lock_file: Arc<File>, connect_thread: Option<JoinHandle<()>>, sync_thread: JoinHandle<()>, dandelion_thread: JoinHandle<()>, } impl Server { /// Instantiates and starts a new server. Optionally takes a callback /// for the server to send an ARC copy of itself, to allow another process /// to poll info about the server status pub fn start<F>( config: ServerConfig, logs_rx: Option<mpsc::Receiver<LogEntry>>, mut info_callback: F, ) -> Result<(), Error> where F: FnMut(Server, Option<mpsc::Receiver<LogEntry>>), { let mining_config = config.stratum_mining_config.clone(); let enable_test_miner = config.run_test_miner; let test_miner_wallet_url = config.test_miner_wallet_url.clone(); let serv = Server::new(config)?; if let Some(c) = mining_config { let enable_stratum_server = c.enable_stratum_server; if let Some(s) = enable_stratum_server { if s { { let mut stratum_stats = serv.state_info.stratum_stats.write(); stratum_stats.is_enabled = true; } serv.start_stratum_server(c); } } } if let Some(s) = enable_test_miner { if s { serv.start_test_miner(test_miner_wallet_url, serv.stop_state.clone()); } } info_callback(serv, logs_rx); Ok(()) } // Exclusive (advisory) lock_file to ensure we do not run multiple // instance of grin server from the same dir. // This uses fs2 and should be safe cross-platform unless somebody abuses the file itself. fn one_grin_at_a_time(config: &ServerConfig) -> Result<Arc<File>, Error> { let path = Path::new(&config.db_root); fs::create_dir_all(&path)?; let path = path.join("grin.lock"); let lock_file = fs::OpenOptions::new() .read(true) .write(true) .create(true) .open(&path)?; lock_file.try_lock_exclusive().map_err(|e| { let mut stderr = std::io::stderr(); writeln!( &mut stderr, "Failed to lock {:?} (grin server already running?)", path ) .expect("Could not write to stderr"); e })?; Ok(Arc::new(lock_file)) } /// Instantiates a new server associated with the provided future reactor. pub fn new(config: ServerConfig) -> Result<Server, Error> { // Obtain our lock_file or fail immediately with an error. let lock_file = Server::one_grin_at_a_time(&config)?; // Defaults to None (optional) in config file. // This translates to false here. let archive_mode = match config.archive_mode { None => false, Some(b) => b, }; let stop_state = Arc::new(StopState::new()); // Shared cache for verification results. // We cache rangeproof verification and kernel signature verification. let verifier_cache = Arc::new(RwLock::new(LruVerifierCache::new())); let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new(config.dandelion_config.clone())); let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( config.pool_config.clone(), pool_adapter.clone(), verifier_cache.clone(), pool_net_adapter.clone(), ))); let sync_state = Arc::new(SyncState::new()); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new( tx_pool.clone(), init_chain_hooks(&config), )); let genesis = match config.chain_type { global::ChainTypes::AutomatedTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::UserTesting => pow::mine_genesis_block().unwrap(), global::ChainTypes::Testnet => genesis::genesis_test(), global::ChainTypes::Mainnet => genesis::genesis_main(), }; info!("Starting server, genesis block: {}", genesis.hash()); let shared_chain = Arc::new(chain::Chain::init( config.db_root.clone(), chain_adapter.clone(), genesis.clone(), pow::verify_size, verifier_cache.clone(), archive_mode, )?); pool_adapter.set_chain(shared_chain.clone()); let net_adapter = Arc::new(NetToChainAdapter::new( sync_state.clone(), shared_chain.clone(), tx_pool.clone(), verifier_cache.clone(), config.clone(), init_net_hooks(&config), )); // Use our default capabilities here. // We will advertize these to our peers during hand/shake. let capabilities = Capabilities::default(); debug!("Capabilities: {:?}", capabilities); let p2p_server = Arc::new(p2p::Server::new( &config.db_root, capabilities, config.p2p_config.clone(), net_adapter.clone(), genesis.hash(), stop_state.clone(), )?); // Initialize various adapters with our dynamic set of connected peers. chain_adapter.init(p2p_server.peers.clone()); pool_net_adapter.init(p2p_server.peers.clone()); net_adapter.init(p2p_server.peers.clone()); let mut connect_thread = None; if config.p2p_config.seeding_type != p2p::Seeding::Programmatic { let seeder = match config.p2p_config.seeding_type { p2p::Seeding::None => { warn!("No seed configured, will stay solo until connected to"); seed::predefined_seeds(vec![]) } p2p::Seeding::List => match &config.p2p_config.seeds { Some(seeds) => seed::predefined_seeds(seeds.peers.clone()), None => { return Err(Error::Configuration( "Seeds must be configured for seeding type List".to_owned(), )); } }, p2p::Seeding::DNSSeed => seed::default_dns_seeds(), _ => unreachable!(), }; let preferred_peers = match &config.p2p_config.peers_preferred { Some(addrs) => addrs.peers.clone(), None => vec![], }; connect_thread = Some(seed::connect_and_monitor( p2p_server.clone(), seeder, &preferred_peers, stop_state.clone(), )?); } // Defaults to None (optional) in config file. // This translates to false here so we do not skip by default. let skip_sync_wait = config.skip_sync_wait.unwrap_or(false); sync_state.update(SyncStatus::AwaitingPeers(!skip_sync_wait)); let sync_thread = sync::run_sync( sync_state.clone(), p2p_server.peers.clone(), shared_chain.clone(), stop_state.clone(), )?; let p2p_inner = p2p_server.clone(); let _ = thread::Builder::new() .name("p2p-server".to_string()) .spawn(move || { if let Err(e) = p2p_inner.listen() { error!("P2P server failed with erorr: {:?}", e); } })?; info!("Starting rest apis at: {}", &config.api_http_addr); let api_secret = get_first_line(config.api_secret_path.clone()); let foreign_api_secret = get_first_line(config.foreign_api_secret_path.clone()); let tls_conf = match config.tls_certificate_file.clone() { None => None, Some(file) => { let key = match config.tls_certificate_key.clone() { Some(k) => k, None => { let msg = "Private key for certificate is not set".to_string(); return Err(Error::ArgumentError(msg)); } }; Some(TLSConfig::new(file, key)) } }; // TODO fix API shutdown and join this thread api::node_apis( &config.api_http_addr, shared_chain.clone(), tx_pool.clone(), p2p_server.peers.clone(), sync_state.clone(), api_secret, foreign_api_secret, tls_conf, )?; info!("Starting dandelion monitor: {}", &config.api_http_addr); let dandelion_thread = dandelion_monitor::monitor_transactions( config.dandelion_config.clone(), tx_pool.clone(), pool_net_adapter, verifier_cache.clone(), stop_state.clone(), )?; warn!("Grin server started."); Ok(Server { config, p2p: p2p_server, chain: shared_chain, tx_pool, verifier_cache, sync_state, state_info: ServerStateInfo { ..Default::default() }, stop_state, lock_file, connect_thread, sync_thread, dandelion_thread, }) } /// Asks the server to connect to a peer at the provided network address. pub fn connect_peer(&self, addr: PeerAddr) -> Result<(), Error> { self.p2p.connect(addr)?; Ok(()) } /// Ping all peers, mostly useful for tests to have connected peers share /// their heights pub fn ping_peers(&self) -> Result<(), Error> { let head = self.chain.head()?; self.p2p.peers.check_all(head.total_difficulty, head.height); Ok(()) } /// Number of peers pub fn peer_count(&self) -> u32 { self.p2p .peers .iter() .connected() .count() .try_into() .unwrap() } /// Start a minimal "stratum" mining service on a separate thread pub fn start_stratum_server(&self, config: StratumServerConfig) { let proof_size = global::proofsize(); let sync_state = self.sync_state.clone(); let mut stratum_server = stratumserver::StratumServer::new( config, self.chain.clone(), self.tx_pool.clone(), self.verifier_cache.clone(), self.state_info.stratum_stats.clone(), ); let _ = thread::Builder::new() .name("stratum_server".to_string()) .spawn(move || { stratum_server.run_loop(proof_size, sync_state); }); } /// Start mining for blocks internally on a separate thread. Relies on /// internal miner, and should only be used for automated testing. Burns /// reward if wallet_listener_url is 'None' pub fn start_test_miner( &self, wallet_listener_url: Option<String>, stop_state: Arc<StopState>, ) { info!("start_test_miner - start",); let sync_state = self.sync_state.clone(); let config_wallet_url = match wallet_listener_url.clone() { Some(u) => u, None => String::from("http://127.0.0.1:13415"), }; let config = StratumServerConfig { attempt_time_per_block: 60, burn_reward: false, enable_stratum_server: None, stratum_server_addr: None, wallet_listener_url: config_wallet_url, minimum_share_difficulty: 1, }; let mut miner = Miner::new( config, self.chain.clone(), self.tx_pool.clone(), self.verifier_cache.clone(), stop_state, sync_state, ); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port)); let _ = thread::Builder::new() .name("test_miner".to_string()) .spawn(move || miner.run_loop(wallet_listener_url)); } /// The chain head pub fn head(&self) -> Result<chain::Tip, Error> { self.chain.head().map_err(|e| e.into()) } /// The head of the block header chain pub fn header_head(&self) -> Result<chain::Tip, Error> { self.chain.header_head().map_err(|e| e.into()) } /// The p2p layer protocol version for this node. pub fn protocol_version() -> ProtocolVersion { ProtocolVersion::local() } /// Returns a set of stats about this server. This and the ServerStats /// structure /// can be updated over time to include any information needed by tests or /// other consumers pub fn get_server_stats(&self) -> Result<ServerStats, Error> { let stratum_stats = self.state_info.stratum_stats.read().clone(); // Fill out stats on our current difficulty calculation // TODO: check the overhead of calculating this again isn't too much // could return it from next_difficulty, but would rather keep consensus // code clean. This may be handy for testing but not really needed // for release let diff_stats = { let last_blocks: Vec<consensus::HeaderInfo> = global::difficulty_data_to_vector(self.chain.difficulty_iter()?) .into_iter() .collect(); let tip_height = self.head()?.height as i64; let mut height = tip_height as i64 - last_blocks.len() as i64 + 1; let diff_entries: Vec<DiffBlock> = last_blocks .windows(2) .map(|pair| { let prev = &pair[0]; let next = &pair[1]; height += 1; DiffBlock { block_height: height, block_hash: next.block_hash, difficulty: next.difficulty.to_num(), time: next.timestamp, duration: next.timestamp - prev.timestamp, secondary_scaling: next.secondary_scaling, is_secondary: next.is_secondary, } }) .collect(); let block_time_sum = diff_entries.iter().fold(0, |sum, t| sum + t.duration); let block_diff_sum = diff_entries.iter().fold(0, |sum, d| sum + d.difficulty); DiffStats { height: height as u64, last_blocks: diff_entries, average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1), average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1), window_size: consensus::DMA_WINDOW, } }; let peer_stats = self .p2p .peers .iter() .connected() .into_iter() .map(|p| PeerStats::from_peer(&p)) .collect(); // Updating TUI stats should not block any other processing so only attempt to // acquire various read locks with a timeout. let read_timeout = Duration::from_millis(500); let tx_stats = self.tx_pool.try_read_for(read_timeout).map(|pool| TxStats { tx_pool_size: pool.txpool.size(), tx_pool_kernels: pool.txpool.kernel_count(), stem_pool_size: pool.stempool.size(), stem_pool_kernels: pool.stempool.kernel_count(), }); let head = self.chain.head_header()?; let head_stats = ChainStats { latest_timestamp: head.timestamp, height: head.height, last_block_h: head.hash(), total_difficulty: head.total_difficulty(), }; let header_head = self.chain.header_head()?; let header = self.chain.get_block_header(&header_head.hash())?; let header_stats = ChainStats { latest_timestamp: header.timestamp, height: header.height, last_block_h: header.hash(), total_difficulty: header.total_difficulty(), }; let disk_usage_bytes = WalkDir::new(&self.config.db_root) .min_depth(1) .max_depth(3) .into_iter() .filter_map(|entry| entry.ok()) .filter_map(|entry| entry.metadata().ok()) .filter(|metadata| metadata.is_file()) .fold(0, |acc, m| acc + m.len()); let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64)); Ok(ServerStats { peer_count: self.peer_count(), chain_stats: head_stats, header_stats: header_stats, sync_status: self.sync_state.status(), disk_usage_gb: disk_usage_gb, stratum_stats: stratum_stats, peer_stats: peer_stats, diff_stats: diff_stats, tx_stats: tx_stats, }) } /// Stop the server. pub fn stop(self) { { self.sync_state.update(SyncStatus::Shutdown); self.stop_state.stop(); if let Some(connect_thread) = self.connect_thread { match connect_thread.join() { Err(e) => error!("failed to join to connect_and_monitor thread: {:?}", e), Ok(_) => info!("connect_and_monitor thread stopped"), } } else { info!("No active connect_and_monitor thread") } match self.sync_thread.join() { Err(e) => error!("failed to join to sync thread: {:?}", e), Ok(_) => info!("sync thread stopped"), } match self.dandelion_thread.join() { Err(e) => error!("failed to join to dandelion_monitor thread: {:?}", e), Ok(_) => info!("dandelion_monitor thread stopped"), } } // this call is blocking and makes sure all peers stop, however // we can't be sure that we stopped a listener blocked on accept, so we don't join the p2p thread self.p2p.stop(); let _ = self.lock_file.unlock(); warn!("Shutdown complete"); } /// Pause the p2p server. pub fn pause(&self) { self.stop_state.pause(); thread::sleep(time::Duration::from_secs(1)); self.p2p.pause(); } /// Resume p2p server. /// TODO - We appear not to resume the p2p server (peer connections) here? pub fn resume(&self) { self.stop_state.resume(); } /// Stops the test miner without stopping the p2p layer pub fn stop_test_miner(&self, stop: Arc<StopState>) { stop.stop(); info!("stop_test_miner - stop",); } }
30.032895
99
0.689978
e2779258ebc2f4797be43a9963c10fa1377d2e07
4,310
// Copyright (c) 2021 Joone Hur <[email protected]> All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. extern crate glfw; use glfw::{Action, Context, Key}; use stretch::{style::*, geometry::Size, geometry::Rect}; use std::sync::mpsc::Receiver; use rust_animation::play::Play; use rust_animation::stage::Stage; use rust_animation::actor::Actor; use rust_animation::actor::LayoutMode; use rust_animation::actor::EasingFunction; fn main() { let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap(); glfw.window_hint(glfw::WindowHint::ContextVersion(3, 3)); glfw.window_hint(glfw::WindowHint::OpenGlProfile(glfw::OpenGlProfileHint::Core)); #[cfg(target_os = "macos")] glfw.window_hint(glfw::WindowHint::OpenGlForwardCompat(true)); let (mut window, events) = glfw.create_window(1920, 1080, "Flex UI demo", glfw::WindowMode::Windowed) .expect("Failed to create GLFW window."); window.set_key_polling(true); window.make_current(); window.set_framebuffer_size_polling(true); gl::load_with(|symbol| window.get_proc_address(symbol) as *const _); let mut play = Play::new("Flex UI test".to_string()); play.initialize(); let mut stage = Stage::new("stage".to_string(), 1920, 1080, LayoutMode::Flex, None); stage.set_style(Style { size: Size { width: Dimension::Points(1920.0), height: Dimension::Points(1080.0), }, justify_content: JustifyContent::Center, flex_direction: FlexDirection::Column, align_items: AlignItems::Center, margin: Rect { start: Dimension::Points(1.0), end: Dimension::Points(1.0), top: Dimension::Points(1.0), bottom: Dimension::Points(1.0), ..Default::default() }, ..Default::default() } ); stage.set_visible(true); let justify_content = vec![ JustifyContent::FlexStart, JustifyContent::FlexEnd, JustifyContent::Center, JustifyContent::SpaceBetween, JustifyContent::SpaceAround, JustifyContent::SpaceEvenly, ]; let width = 1500; let height = 108; for i in 0..6 { let actor_name = format!("actor_{}", i+1); let mut actor = Actor::new(actor_name.to_string(), width, height, None); actor.set_color(i as f32 / 6.0, i as f32 / 6.0, i as f32 / 6.0); actor.set_style(Style { size: Size { width: Dimension::Points(width as f32), height: Dimension::Points(height as f32), }, justify_content: justify_content[i], align_items: AlignItems::Center, margin: Rect { start: Dimension::Points(1.0), end: Dimension::Points(1.0), top: Dimension::Points(1.0), bottom: Dimension::Points(1.0), ..Default::default() }, padding: Rect { start: Dimension::Points(2.0), end: Dimension::Points(2.0), ..Default::default() }, ..Default::default() } ); for j in 0..10 { let mut sub_actor = Actor::new(format!("actor_{}_{}", i+1, j+1).to_string(), 100, 100, None); sub_actor.set_color(1.0, j as f32 / 10.0, j as f32 / 10.0); actor.add_sub_actor(sub_actor); } stage.add_actor(actor); } stage.set_needs_layout(); play.add_stage(stage); while !window.should_close() { // events process_events(&mut window, &events); play.render(); // glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.) window.swap_buffers(); glfw.poll_events(); } } fn process_events(window: &mut glfw::Window, events: &Receiver<(f64, glfw::WindowEvent)>) { for (_, event) in glfw::flush_messages(events) { match event { glfw::WindowEvent::FramebufferSize(width, height) => { // make sure the viewport matches the new window dimensions; note that width and // height will be significantly larger than specified on retina displays. unsafe { gl::Viewport(0, 0, width, height) } } glfw::WindowEvent::Key(Key::Escape, _, Action::Press, _) => window.set_should_close(true), _ => {} } } }
33.153846
96
0.612993
3949152e84896eb40087840a8fbe2a6493d8df13
1,306
// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Various checks //! //! # Note //! //! This API is completely unstable and subject to change. #![crate_name = "rustc_passes"] #![crate_type = "dylib"] #![crate_type = "rlib"] #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "https://doc.rust-lang.org/favicon.ico", html_root_url = "https://doc.rust-lang.org/nightly/")] #![deny(warnings)] #![feature(rustc_diagnostic_macros)] #[macro_use] extern crate rustc; extern crate rustc_const_eval; extern crate rustc_const_math; #[macro_use] extern crate log; #[macro_use] extern crate syntax; extern crate syntax_pos; extern crate rustc_errors as errors; pub mod diagnostics; pub mod ast_validation; pub mod consts; pub mod hir_stats; pub mod loops; pub mod mir_stats; pub mod no_asm; pub mod static_recursion;
27.208333
86
0.733538
ef91b2dacb10c2a5801fd034f80acd7b72a47aff
2,692
use yew::html::Scope; use yew::{html, AppHandle, Component, Context, Html}; pub enum Msg { SetOpposite(Scope<App>), SendToOpposite(String), SetTitle(String), } pub struct App { opposite: Option<Scope<App>>, selector: &'static str, title: String, } impl Component for App { type Message = Msg; type Properties = (); fn create(_ctx: &Context<Self>) -> Self { App { opposite: None, selector: "", title: "Nothing".to_owned(), } } fn update(&mut self, _ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetOpposite(opposite) => { self.opposite = Some(opposite); false } Msg::SendToOpposite(title) => { self.opposite .as_mut() .unwrap() .send_message(Msg::SetTitle(title)); false } Msg::SetTitle(title) => { let send_msg = match title.as_ref() { "Ping" => Some(Msg::SetTitle("Pong".into())), "Pong" => Some(Msg::SetTitle("Pong Done".into())), "Pong Done" => Some(Msg::SetTitle("Ping Done".into())), _ => None, }; if let Some(send_msg) = send_msg { self.opposite.as_mut().unwrap().send_message(send_msg); } self.title = title; true } } } fn view(&self, ctx: &Context<Self>) -> Html { html! { <div> <h3>{ format!("{} received <{}>", self.selector, self.title) }</h3> <button onclick={ctx.link().callback(|_| Msg::SendToOpposite("One".into()))}>{ "One" }</button> <button onclick={ctx.link().callback(|_| Msg::SendToOpposite("Two".into()))}>{ "Two" }</button> <button onclick={ctx.link().callback(|_| Msg::SendToOpposite("Three".into()))}>{ "Three" }</button> <button onclick={ctx.link().callback(|_| Msg::SendToOpposite("Ping".into()))}>{ "Ping" }</button> </div> } } } fn mount_app(selector: &'static str) -> AppHandle<App> { let document = gloo_utils::document(); let element = document.query_selector(selector).unwrap().unwrap(); yew::Renderer::<App>::with_root(element).render() } fn main() { let first_app = mount_app(".first-app"); let second_app = mount_app(".second-app"); first_app.send_message(Msg::SetOpposite(second_app.clone())); second_app.send_message(Msg::SetOpposite(first_app.clone())); }
31.670588
115
0.506686
185cadeeda464801a0c7171b48752c41fc68b682
9,512
//! Trace and batch implementations based on sorted ranges. //! //! The types and type aliases in this module start with either //! //! * `OrdVal`: Collections whose data have the form `(key, val)` where `key` is ordered. //! * `OrdKey`: Collections whose data have the form `key` where `key` is ordered. //! //! Although `OrdVal` is more general than `OrdKey`, the latter has a simpler representation //! and should consume fewer resources (computation and memory) when it applies. // use std::cmp::Ordering; use std::rc::Rc; // use ::Diff; // use lattice::Lattice; use trace::{Batch, BatchReader, Builder, Merger, Cursor, Trace, TraceReader}; use trace::description::Description; use trace::rc_blanket_impls::RcBatchCursor; // use trace::layers::MergeBuilder; use super::spine_fueled::Spine; use super::merge_batcher::MergeBatcher; use timely::progress::nested::product::Product; use timely::progress::timestamp::RootTimestamp; type Node = u32; /// struct GraphSpine<N> where N: Ord+Clone+'static { spine: Spine<Node, N, Product<RootTimestamp, ()>, isize, Rc<GraphBatch<N>>> } impl<N> TraceReader<Node, N, Product<RootTimestamp, ()>, isize> for GraphSpine<N> where N: Ord+Clone+'static, { type Batch = Rc<GraphBatch<N>>; type Cursor = RcBatchCursor<Node, N, Product<RootTimestamp, ()>, isize, GraphBatch<N>>; fn cursor_through(&mut self, upper: &[Product<RootTimestamp,()>]) -> Option<(Self::Cursor, <Self::Cursor as Cursor<Node, N, Product<RootTimestamp,()>, isize>>::Storage)> { let mut batch = Vec::new(); self.spine.map_batches(|b| batch.push(b.clone())); assert!(batch.len() <= 1); if upper == &[] { batch.pop().map(|b| (b.cursor(), b)) } else { None } } fn advance_by(&mut self, frontier: &[Product<RootTimestamp,()>]) { self.spine.advance_by(frontier) } fn advance_frontier(&mut self) -> &[Product<RootTimestamp,()>] { self.spine.advance_frontier() } fn distinguish_since(&mut self, frontier: &[Product<RootTimestamp,()>]) { self.spine.distinguish_since(frontier) } fn distinguish_frontier(&mut self) -> &[Product<RootTimestamp,()>] { &self.spine.distinguish_frontier() } fn map_batches<F: FnMut(&Self::Batch)>(&mut self, f: F) { self.spine.map_batches(f) } } // A trace implementation for any key type that can be borrowed from or converted into `Key`. // TODO: Almost all this implementation seems to be generic with respect to the trace and batch types. impl<N> Trace<Node, N, Product<RootTimestamp,()>, isize> for GraphSpine<N> where N: Ord+Clone+'static, { fn new() -> Self { GraphSpine { spine: Spine::<Node, N, Product<RootTimestamp, ()>, isize, Rc<GraphBatch<N>>>::new() } } // Ideally, this method acts as insertion of `batch`, even if we are not yet able to begin // merging the batch. This means it is a good time to perform amortized work proportional // to the size of batch. fn insert(&mut self, batch: Self::Batch) { self.spine.insert(batch) } fn close(&mut self) { self.spine.close() } } /// #[derive(Debug, Abomonation)] pub struct GraphBatch<N> { index: usize, peers: usize, keys: Vec<Node>, nodes: Vec<usize>, edges: Vec<N>, desc: Description<Product<RootTimestamp,()>>, } impl<N> BatchReader<Node, N, Product<RootTimestamp,()>, isize> for GraphBatch<N> where N: Ord+Clone+'static { type Cursor = GraphCursor; fn cursor(&self) -> Self::Cursor { GraphCursor { key: self.index as Node, key_pos: 0, val_pos: 0 } } fn len(&self) -> usize { self.edges.len() } fn description(&self) -> &Description<Product<RootTimestamp,()>> { &self.desc } } impl<N> Batch<Node, N, Product<RootTimestamp,()>, isize> for GraphBatch<N> where N: Ord+Clone+'static { type Batcher = MergeBatcher<Node, N, Product<RootTimestamp,()>, isize, Self>; type Builder = GraphBuilder<N>; type Merger = GraphMerger; fn begin_merge(&self, other: &Self) -> Self::Merger { GraphMerger::new(self, other) } } /// pub struct GraphMerger { } impl<N> Merger<Node, N, Product<RootTimestamp,()>, isize, GraphBatch<N>> for GraphMerger where N: Ord+Clone+'static { fn new(_batch1: &GraphBatch<N>, _batch2: &GraphBatch<N>) -> Self { panic!("Cannot merge GraphBatch; they are static"); } fn done(self) -> GraphBatch<N> { panic!("Cannot merge GraphBatch; they are static"); } fn work(&mut self, _source1: &GraphBatch<N>, _source2: &GraphBatch<N>, _frontier: &Option<Vec<Product<RootTimestamp,()>>>, _fuel: &mut usize) { panic!("Cannot merge GraphBatch; they are static"); } } /// A cursor for navigating a single layer. #[derive(Debug)] pub struct GraphCursor { key: Node, key_pos: usize, val_pos: usize, } impl<N> Cursor<Node, N, Product<RootTimestamp,()>, isize> for GraphCursor where N: Ord+Clone { type Storage = GraphBatch<N>; fn key<'a>(&self, storage: &'a Self::Storage) -> &'a Node { &storage.keys[self.key_pos] } fn val<'a>(&self, storage: &'a Self::Storage) -> &'a N { &storage.edges[self.val_pos] } fn map_times<L: FnMut(&Product<RootTimestamp,()>, isize)>(&mut self, _storage: &Self::Storage, mut logic: L) { logic(&Product::new(RootTimestamp, ()), 1); } fn key_valid(&self, storage: &Self::Storage) -> bool { (self.key_pos + 1) < storage.nodes.len() } fn val_valid(&self, storage: &Self::Storage) -> bool { self.val_pos < storage.nodes[self.key_pos + 1] } fn step_key(&mut self, storage: &Self::Storage){ if self.key_valid(storage) { self.key_pos += 1; self.key += storage.peers as Node; } } fn seek_key(&mut self, storage: &Self::Storage, key: &Node) { if self.key_valid(storage) { self.key_pos = (*key as usize) / storage.peers; if self.key_pos + 1 >= storage.nodes.len() { self.key_pos = storage.nodes.len() - 1; } self.val_pos = storage.nodes[self.key_pos]; self.key = (storage.peers * self.key_pos + storage.index) as Node; } } fn step_val(&mut self, storage: &Self::Storage) { if self.val_valid(storage) { self.val_pos += 1; } } fn seek_val(&mut self, storage: &Self::Storage, val: &N) { if self.val_valid(storage) { let lower = self.val_pos; let upper = storage.nodes[self.key_pos + 1]; self.val_pos += advance(&storage.edges[lower .. upper], |tuple| tuple < val); } } fn rewind_keys(&mut self, storage: &Self::Storage) { self.key_pos = 0; self.key = storage.index as Node; } fn rewind_vals(&mut self, storage: &Self::Storage) { if self.key_valid(storage) { self.val_pos = storage.nodes[self.key_pos]; } } } /// A builder for creating layers from unsorted update tuples. pub struct GraphBuilder<N: Ord> { index: usize, peers: usize, keys: Vec<Node>, nodes: Vec<usize>, edges: Vec<N>, } impl<N> Builder<Node, N, Product<RootTimestamp,()>, isize, GraphBatch<N>> for GraphBuilder<N> where N: Ord+Clone+'static { fn new() -> Self { Self::with_capacity(0) } fn with_capacity(cap: usize) -> Self { GraphBuilder { index: 0, peers: 1, keys: Vec::new(), nodes: Vec::new(), edges: Vec::with_capacity(cap), } } #[inline] fn push(&mut self, (key, val, _time, _diff): (Node, N, Product<RootTimestamp,()>, isize)) { while self.nodes.len() <= (key as usize) / self.peers { self.keys.push((self.peers * self.nodes.len() + self.index) as Node); self.nodes.push(self.edges.len()); } self.edges.push(val); } #[inline(never)] fn done(mut self, lower: &[Product<RootTimestamp,()>], upper: &[Product<RootTimestamp,()>], since: &[Product<RootTimestamp,()>]) -> GraphBatch<N> { println!("GraphBuilder::done(): {} nodes, {} edges.", self.nodes.len(), self.edges.len()); self.nodes.push(self.edges.len()); GraphBatch { index: self.index, peers: self.peers, keys: self.keys, nodes: self.nodes, edges: self.edges, desc: Description::new(lower, upper, since) } } } /// Reports the number of elements satisfing the predicate. /// /// This methods *relies strongly* on the assumption that the predicate /// stays false once it becomes false, a joint property of the predicate /// and the slice. This allows `advance` to use exponential search to /// count the number of elements in time logarithmic in the result. #[inline(never)] pub fn advance<T, F: Fn(&T)->bool>(slice: &[T], function: F) -> usize { // start with no advance let mut index = 0; if index < slice.len() && function(&slice[index]) { // advance in exponentially growing steps. let mut step = 1; while index + step < slice.len() && function(&slice[index + step]) { index += step; step = step << 1; } // advance in exponentially shrinking steps. step = step >> 1; while step > 0 { if index + step < slice.len() && function(&slice[index + step]) { index += step; } step = step >> 1; } index += 1; } index }
34.215827
175
0.60881
0995b6373aa0321828d9ae42136725561dfd2371
778
use crate::components::PathCacheComponent; use crate::indices::EntityId; use serde::{Deserialize, Serialize}; /// Update the path cache #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct CachePathIntent { pub bot: EntityId, pub cache: PathCacheComponent, } /// Remove the top item from the path cache #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MutPathCacheIntent { pub bot: EntityId, pub action: PathCacheIntentAction, } impl Default for MutPathCacheIntent { fn default() -> Self { MutPathCacheIntent { action: PathCacheIntentAction::Del, bot: EntityId::default(), } } } #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub enum PathCacheIntentAction { Pop, Del, }
24.3125
56
0.688946
50abb1d720a01fde61d9c85de19d81a8ef2b706a
42,817
use crate::error::Error; use habitat_butterfly::{member::{Health, Member, MemberList, Membership}, rumor::{election::{Election as ElectionRumor, ElectionStatus as ElectionStatusRumor, ElectionUpdate as ElectionUpdateRumor}, service::{Service as ServiceRumor, SysInfo}, service_config::ServiceConfig as ServiceConfigRumor, service_file::ServiceFile as ServiceFileRumor, ConstIdRumor as _, RumorStore}}; use habitat_common::outputln; use habitat_core::{self, package::PackageIdent, service::ServiceGroup}; use serde::{ser::SerializeStruct, Serialize, Serializer}; use std::{borrow::Cow, collections::{BTreeMap, HashMap, HashSet}, fmt, path::Path, result, str::FromStr}; use toml; static LOGKEY: &str = "CE"; pub type MemberId = String; #[derive(Debug, Serialize)] pub struct CensusRing { changed: bool, census_groups: HashMap<ServiceGroup, CensusGroup>, local_member_id: MemberId, last_service_counter: usize, last_election_counter: usize, last_election_update_counter: usize, last_membership_counter: usize, last_service_config_counter: usize, last_service_file_counter: usize, } impl CensusRing { /// Indicates whether the census has changed since the last time /// we looked at rumors. pub fn changed(&self) -> bool { self.changed } pub fn new<I>(local_member_id: I) -> Self where I: Into<MemberId> { CensusRing { changed: false, census_groups: HashMap::new(), local_member_id: local_member_id.into(), last_service_counter: 0, last_election_counter: 0, last_election_update_counter: 0, last_membership_counter: 0, last_service_config_counter: 0, last_service_file_counter: 0, } } /// # Locking (see locking.md) /// * `RumorStore::list` (write) /// * `MemberList::entries` (read) #[allow(clippy::too_many_arguments)] pub fn update_from_rumors_rsr_mlr(&mut self, cache_key_path: &Path, service_rumors: &RumorStore<ServiceRumor>, election_rumors: &RumorStore<ElectionRumor>, election_update_rumors: &RumorStore<ElectionUpdateRumor>, member_list: &MemberList, service_config_rumors: &RumorStore<ServiceConfigRumor>, service_file_rumors: &RumorStore<ServiceFileRumor>) { // If ANY new rumor, of any type, has been received, // reconstruct the entire census state to ensure consistency if (service_rumors.get_update_counter() > self.last_service_counter) || (member_list.get_update_counter() > self.last_membership_counter) || (election_rumors.get_update_counter() > self.last_election_counter) || (election_update_rumors.get_update_counter() > self.last_election_update_counter) || (service_config_rumors.get_update_counter() > self.last_service_config_counter) || (service_file_rumors.get_update_counter() > self.last_service_file_counter) { self.changed = true; self.populate_census_rsr_mlr(service_rumors, member_list); self.update_from_election_store_rsr(election_rumors); self.update_from_election_update_store_rsr(election_update_rumors); self.update_from_service_config_rsr(cache_key_path, service_config_rumors); self.update_from_service_files_rsr(cache_key_path, service_file_rumors); // Update our counters to reflect current state. self.last_membership_counter = member_list.get_update_counter(); self.last_service_counter = service_rumors.get_update_counter(); self.last_election_counter = election_rumors.get_update_counter(); self.last_election_update_counter = election_update_rumors.get_update_counter(); self.last_service_config_counter = service_config_rumors.get_update_counter(); self.last_service_file_counter = service_file_rumors.get_update_counter(); } else { self.changed = false; } } pub fn census_group_for(&self, sg: &ServiceGroup) -> Option<&CensusGroup> { self.census_groups.get(sg) } pub fn groups(&self) -> Vec<&CensusGroup> { self.census_groups.values().map(|cg| cg).collect() } /// Populates the census from `ServiceRumor`s and Butterfly-level /// membership lists. /// /// (Butterfly provides the health, the ServiceRumors provide the /// rest). /// /// # Locking (see locking.md) /// * `RumorStore::list` (read) /// * `MemberList::entries` (read) fn populate_census_rsr_mlr(&mut self, service_rumors: &RumorStore<ServiceRumor>, member_list: &MemberList) { // Populate our census; new groups are created here, as are // new members of those groups. // // NOTE: In the current implementation, these members have an // indeterminate health status until we process the contents // of `member_list`. In the future, it would be nice to // incorporate the member list into // `census_group.update_from_service_rumors`, where new census // members are created, so there would be no time that there // is an indeterminate health anywhere. for (service_group, rumors) in service_rumors.lock_rsr().iter() { if let Ok(sg) = service_group_from_str(service_group) { let local_member_id = Cow::from(&self.local_member_id); let census_group = self.census_groups .entry(sg.clone()) .or_insert_with(|| CensusGroup::new(sg, &local_member_id)); census_group.update_from_service_rumors(rumors); } } member_list.with_memberships_mlr(|Membership { member, health }| { for group in self.census_groups.values_mut() { if let Some(census_member) = group.find_member_mut(&member.id) { census_member.update_from_member(&member); census_member.update_from_health(health); } } Ok(()) }) .ok(); } /// # Locking (see locking.md) /// * `RumorStore::list` (read) fn update_from_election_store_rsr(&mut self, election_rumors: &RumorStore<ElectionRumor>) { for (service_group, rumors) in election_rumors.lock_rsr().iter() { let election = rumors.get(ElectionRumor::const_id()).unwrap(); if let Ok(sg) = service_group_from_str(service_group) { if let Some(census_group) = self.census_groups.get_mut(&sg) { census_group.update_from_election_rumor(election); } } } } /// # Locking (see locking.md) /// * `RumorStore::list` (read) fn update_from_election_update_store_rsr(&mut self, election_update_rumors: &RumorStore<ElectionUpdateRumor>) { for (service_group, rumors) in election_update_rumors.lock_rsr().iter() { if let Ok(sg) = service_group_from_str(service_group) { if let Some(census_group) = self.census_groups.get_mut(&sg) { let election = rumors.get(ElectionUpdateRumor::const_id()).unwrap(); census_group.update_from_election_update_rumor(election); } } } } /// # Locking (see locking.md) /// * `RumorStore::list` (read) fn update_from_service_config_rsr(&mut self, cache_key_path: &Path, service_config_rumors: &RumorStore<ServiceConfigRumor>) { for (service_group, rumors) in service_config_rumors.lock_rsr().iter() { if let Ok(sg) = service_group_from_str(service_group) { if let Some(service_config) = rumors.get(ServiceConfigRumor::const_id()) { if let Some(census_group) = self.census_groups.get_mut(&sg) { census_group.update_from_service_config_rumor(cache_key_path, service_config); } } } } } /// # Locking (see locking.md) /// * `RumorStore::list` (read) fn update_from_service_files_rsr(&mut self, cache_key_path: &Path, service_file_rumors: &RumorStore<ServiceFileRumor>) { for (service_group, rumors) in service_file_rumors.lock_rsr().iter() { if let Ok(sg) = service_group_from_str(service_group) { let local_member_id = Cow::from(&self.local_member_id); let census_group = self.census_groups .entry(sg.clone()) .or_insert_with(|| CensusGroup::new(sg, &local_member_id)); census_group.update_from_service_file_rumors(cache_key_path, rumors); } } } } /// This is a proxy struct to represent what information we're writing to the dat file, and /// therefore what information gets sent out via the HTTP API. Right now, we're just wrapping the /// actual CensusRing struct, but this will give us something we can refactor against without /// worrying about breaking the data returned to users. pub struct CensusRingProxy<'a>(&'a CensusRing); impl<'a> CensusRingProxy<'a> { pub fn new(c: &'a CensusRing) -> Self { CensusRingProxy(&c) } } impl<'a> Serialize for CensusRingProxy<'a> { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer { let mut strukt = serializer.serialize_struct("census_ring", 9)?; strukt.serialize_field("changed", &self.0.changed)?; strukt.serialize_field("census_groups", &self.0.census_groups)?; strukt.serialize_field("local_member_id", &self.0.local_member_id)?; strukt.serialize_field("last_service_counter", &self.0.last_service_counter)?; strukt.serialize_field("last_election_counter", &self.0.last_election_counter)?; strukt.serialize_field("last_election_update_counter", &self.0.last_election_update_counter)?; strukt.serialize_field("last_membership_counter", &self.0.last_membership_counter)?; strukt.serialize_field("last_service_config_counter", &self.0.last_service_config_counter)?; strukt.serialize_field("last_service_file_counter", &self.0.last_service_file_counter)?; strukt.end() } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] pub enum ElectionStatus { None, ElectionInProgress, ElectionNoQuorum, ElectionFinished, } impl Default for ElectionStatus { fn default() -> ElectionStatus { ElectionStatus::None } } impl fmt::Display for ElectionStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let value = match *self { ElectionStatus::ElectionInProgress => "in-progress", ElectionStatus::ElectionNoQuorum => "no-quorum", ElectionStatus::ElectionFinished => "finished", ElectionStatus::None => "none", }; write!(f, "{}", value) } } impl FromStr for ElectionStatus { type Err = Error; fn from_str(value: &str) -> Result<Self, Self::Err> { match value.to_lowercase().as_ref() { "in-progress" => Ok(ElectionStatus::ElectionInProgress), "no-quorum" => Ok(ElectionStatus::ElectionNoQuorum), "finished" => Ok(ElectionStatus::ElectionFinished), "none" => Ok(ElectionStatus::None), _ => Err(Error::BadElectionStatus(value.to_string())), } } } impl From<ElectionStatusRumor> for ElectionStatus { fn from(val: ElectionStatusRumor) -> ElectionStatus { match val { ElectionStatusRumor::Running => ElectionStatus::ElectionInProgress, ElectionStatusRumor::NoQuorum => ElectionStatus::ElectionNoQuorum, ElectionStatusRumor::Finished => ElectionStatus::ElectionFinished, } } } #[derive(Debug, Default, Serialize)] pub struct ServiceFile { pub filename: String, pub incarnation: u64, pub body: Vec<u8>, } #[derive(Debug, Serialize)] pub struct ServiceConfig { pub incarnation: u64, pub value: toml::value::Table, } #[derive(Debug)] pub struct CensusGroup { pub service_group: ServiceGroup, pub election_status: ElectionStatus, pub update_election_status: ElectionStatus, pub leader_id: Option<MemberId>, pub service_config: Option<ServiceConfig>, local_member_id: MemberId, population: BTreeMap<MemberId, CensusMember>, update_leader_id: Option<MemberId>, changed_service_files: Vec<String>, service_files: HashMap<String, ServiceFile>, } impl CensusGroup { fn new(sg: ServiceGroup, local_member_id: &str) -> Self { CensusGroup { service_group: sg, election_status: ElectionStatus::None, update_election_status: ElectionStatus::None, local_member_id: local_member_id.to_string(), population: BTreeMap::new(), leader_id: None, update_leader_id: None, service_config: None, service_files: HashMap::new(), changed_service_files: Vec::new(), } } /// Returns the census member in the census ring for the running Supervisor. pub fn me(&self) -> Option<&CensusMember> { self.population.get(&self.local_member_id) } pub fn leader(&self) -> Option<&CensusMember> { match self.leader_id { Some(ref id) => self.population.get(id), None => None, } } pub fn update_leader(&self) -> Option<&CensusMember> { match self.update_leader_id { Some(ref id) => self.population.get(id), None => None, } } /// Returns a list of all members in the census ring. pub fn members(&self) -> impl Iterator<Item = &CensusMember> { self.population.values() } /// Same as `members`, but only returns members that are either /// alive or suspect, i.e., nothing that is confirmed dead or /// departed. These are the members that we'll reasonably be /// interacting with at runtime. pub fn active_members(&self) -> impl Iterator<Item = &CensusMember> { self.population .values() .filter(|cm| cm.alive() || cm.suspect()) } pub fn changed_service_files(&self) -> Vec<&ServiceFile> { self.changed_service_files .iter() .map(|f| &self.service_files[f]) .collect() } /// Return previous alive peer, the peer to your left in the ordered members list, or None if /// you have no alive peers. // XXX: Is me ever None or not Alive? // XXX: Should we include Suspect members too, or only strictly Alive ones? pub fn previous_peer(&self) -> Option<&CensusMember> { self.me() .and_then(|me| Self::previous_peer_impl(self.population.values(), me)) } fn previous_peer_impl<'a>(members: impl Iterator<Item = &'a CensusMember>, me: &CensusMember) -> Option<&'a CensusMember> { let mut alive_members = members.filter(|cm| cm.alive()); let mut previous = None; for member in alive_members.by_ref() { if member.member_id == me.member_id { return previous.or_else(|| alive_members.last()); } else { previous = Some(member); } } None } fn update_from_service_rumors(&mut self, rumors: &HashMap<String, ServiceRumor>) { for (member_id, service_rumor) in rumors.iter() { // Yeah - we are ourself - we're alive. let is_self = member_id == &self.local_member_id; let member = self.population .entry(member_id.to_string()) .or_insert_with(|| { // Note: this is where CensusMembers are created let mut new_member = CensusMember::default(); new_member.alive = is_self; new_member }); member.update_from_service_rumor(&self.service_group, service_rumor); } } fn update_from_election_rumor(&mut self, election: &ElectionRumor) { self.leader_id = None; for census_member in self.population.values_mut() { if census_member.update_from_election_rumor(election) { self.leader_id = Some(census_member.member_id.clone()); } } match election.status { ElectionStatusRumor::Running => { self.election_status = ElectionStatus::ElectionInProgress; } ElectionStatusRumor::NoQuorum => { self.election_status = ElectionStatus::ElectionNoQuorum; } ElectionStatusRumor::Finished => { self.election_status = ElectionStatus::ElectionFinished; } } } fn update_from_election_update_rumor(&mut self, election: &ElectionUpdateRumor) { self.update_leader_id = None; for census_member in self.population.values_mut() { if census_member.update_from_election_update_rumor(election) { self.update_leader_id = Some(census_member.member_id.clone()); } } match election.status { ElectionStatusRumor::Running => { self.update_election_status = ElectionStatus::ElectionInProgress; } ElectionStatusRumor::NoQuorum => { self.update_election_status = ElectionStatus::ElectionNoQuorum; } ElectionStatusRumor::Finished => { self.update_election_status = ElectionStatus::ElectionFinished; } } } fn update_from_service_config_rumor(&mut self, cache_key_path: &Path, service_config: &ServiceConfigRumor) { match service_config.config(cache_key_path) { Ok(config) => { if self.service_config.is_none() || service_config.incarnation > self.service_config.as_ref().unwrap().incarnation { self.service_config = Some(ServiceConfig { incarnation: service_config.incarnation, value: config, }); } } Err(err) => warn!("{}", err), } } fn update_from_service_file_rumors(&mut self, cache_key_path: &Path, service_file_rumors: &HashMap<String, ServiceFileRumor>) { self.changed_service_files.clear(); for (_m_id, service_file_rumor) in service_file_rumors.iter() { let filename = service_file_rumor.filename.to_string(); let file = self.service_files .entry(filename.clone()) .or_insert_with(ServiceFile::default); if service_file_rumor.incarnation > file.incarnation { match service_file_rumor.body(cache_key_path) { Ok(body) => { self.changed_service_files.push(filename.clone()); file.filename = filename.clone(); file.incarnation = service_file_rumor.incarnation; file.body = body; } Err(e) => { warn!("Cannot decrypt service file for {} {} {}: {}", self.service_group, service_file_rumor.filename, service_file_rumor.incarnation, e) } } } } } fn find_member_mut(&mut self, member_id: &str) -> Option<&mut CensusMember> { self.population.get_mut(member_id) } /// Determine what configuration keys the group as a whole /// exports. Returns a set of the top-level exported keys. /// /// This implementation is a righteous hack to cover the fact that /// there is not yet a centralized view of what a "group" actually /// exports! There has been some talk of having a "leader" role in /// all topologies, in which case we could just ask the leader /// what the group exports. Until that time, the best we can do is /// ask an active member what *they* export (if there is a leader, /// though, we'll just ask them). pub fn group_exports<'a>(&'a self) -> Result<HashSet<&'a String>, Error> { self.leader() .or_else(|| self.active_members().next()) .ok_or_else(|| Error::NoActiveMembers(self.service_group.clone())) .map(|m| m.cfg.keys().collect()) } } impl Serialize for CensusGroup { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer { let mut strukt = serializer.serialize_struct("census_group", 10)?; strukt.serialize_field("service_group", &self.service_group)?; strukt.serialize_field("election_status", &self.election_status)?; strukt.serialize_field("update_election_status", &self.update_election_status)?; strukt.serialize_field("leader_id", &self.leader_id)?; strukt.serialize_field("service_config", &self.service_config)?; strukt.serialize_field("local_member_id", &self.local_member_id)?; let new_pop: BTreeMap<MemberId, CensusMemberProxy<'_>> = self.population .iter() .map(|(k, v)| (k.clone(), CensusMemberProxy::new(v))) .collect(); strukt.serialize_field("population", &new_pop)?; strukt.serialize_field("update_leader_id", &self.update_leader_id)?; strukt.serialize_field("changed_service_files", &self.changed_service_files)?; strukt.serialize_field("service_files", &self.service_files)?; strukt.end() } } // NOTE: This is exposed to users in templates. Any public member is // accessible to users, so change this interface with care. // // User-facing documentation is available at // https://www.habitat.sh/docs/reference/#template-data; update that // as required. #[derive(Clone, Debug, Default, Serialize)] pub struct CensusMember { pub member_id: MemberId, pub pkg: Option<PackageIdent>, pub application: Option<String>, pub environment: Option<String>, pub service: String, pub group: String, pub org: Option<String>, pub persistent: bool, pub leader: bool, pub follower: bool, pub update_leader: bool, pub update_follower: bool, pub election_is_running: bool, pub election_is_no_quorum: bool, pub election_is_finished: bool, pub update_election_is_running: bool, pub update_election_is_no_quorum: bool, pub update_election_is_finished: bool, pub sys: SysInfo, alive: bool, suspect: bool, confirmed: bool, departed: bool, // Maps must be represented last in a serializable struct for the current version of the toml // crate. Additionally, this deserialization method is required to correct any ordering issues // with the table being serialized - https://docs.rs/toml/0.4.0/toml/ser/fn.tables_last.html #[serde(serialize_with = "toml::ser::tables_last")] pub cfg: toml::value::Table, } impl CensusMember { fn update_from_service_rumor(&mut self, sg: &ServiceGroup, rumor: &ServiceRumor) { self.member_id = rumor.member_id.to_string(); self.service = sg.service().to_string(); self.group = sg.group().to_string(); if let Some(org) = sg.org() { self.org = Some(org.to_string()); } if let Some(appenv) = sg.application_environment() { self.application = Some(appenv.application().to_string()); self.environment = Some(appenv.environment().to_string()); } match PackageIdent::from_str(&rumor.pkg) { Ok(ident) => self.pkg = Some(ident), Err(err) => warn!("Received a bad package ident from gossip data, err={}", err), }; self.sys = rumor.sys.clone(); self.cfg = toml::from_slice(&rumor.cfg).unwrap_or_default(); } fn update_from_election_rumor(&mut self, election: &ElectionRumor) -> bool { self.election_is_running = election.status == ElectionStatusRumor::Running; self.election_is_no_quorum = election.status == ElectionStatusRumor::NoQuorum; self.election_is_finished = election.status == ElectionStatusRumor::Finished; if self.election_is_finished { if self.member_id == election.member_id { self.leader = true; self.follower = false; } else { self.leader = false; self.follower = true; } } self.leader } fn update_from_election_update_rumor(&mut self, election: &ElectionUpdateRumor) -> bool { self.update_election_is_running = election.status == ElectionStatusRumor::Running; self.update_election_is_no_quorum = election.status == ElectionStatusRumor::NoQuorum; self.update_election_is_finished = election.status == ElectionStatusRumor::Finished; if self.update_election_is_finished { if self.member_id == election.member_id { self.update_leader = true; self.update_follower = false; } else { self.update_leader = false; self.update_follower = true; } } self.update_leader } fn update_from_member(&mut self, member: &Member) { self.sys.gossip_ip = member.address.to_string(); self.sys.gossip_port = u32::from(member.gossip_port); self.persistent = true; } fn update_from_health(&mut self, health: Health) { self.alive = false; self.suspect = false; self.confirmed = false; self.departed = false; match health { Health::Alive => self.alive = true, Health::Suspect => self.suspect = true, Health::Confirmed => self.confirmed = true, Health::Departed => self.departed = true, } } /// Is this member currently considered to be alive or not? pub fn alive(&self) -> bool { self.alive } pub fn suspect(&self) -> bool { self.suspect } pub fn confirmed(&self) -> bool { self.confirmed } pub fn departed(&self) -> bool { self.departed } } /// This data structure just wraps the CensusMember and allows us to tweak the serialization logic. pub struct CensusMemberProxy<'a>(&'a CensusMember); impl<'a> CensusMemberProxy<'a> { pub fn new(c: &'a CensusMember) -> Self { CensusMemberProxy(&c) } } impl<'a> Serialize for CensusMemberProxy<'a> { fn serialize<S>(&self, serializer: S) -> result::Result<S::Ok, S::Error> where S: Serializer { let mut strukt = serializer.serialize_struct("census_member", 24)?; strukt.serialize_field("member_id", &self.0.member_id)?; strukt.serialize_field("pkg", &self.0.pkg)?; if let Some(ref p) = self.0.pkg { strukt.serialize_field("package", &p.to_string())?; } else { strukt.serialize_field("package", &None::<String>)?; } strukt.serialize_field("application", &self.0.application)?; strukt.serialize_field("environment", &self.0.environment)?; strukt.serialize_field("service", &self.0.service)?; strukt.serialize_field("group", &self.0.group)?; strukt.serialize_field("org", &self.0.org)?; strukt.serialize_field("persistent", &self.0.persistent)?; strukt.serialize_field("leader", &self.0.leader)?; strukt.serialize_field("follower", &self.0.follower)?; strukt.serialize_field("update_leader", &self.0.update_leader)?; strukt.serialize_field("update_follower", &self.0.update_follower)?; strukt.serialize_field("election_is_running", &self.0.election_is_running)?; strukt.serialize_field("election_is_no_quorum", &self.0.election_is_no_quorum)?; strukt.serialize_field("election_is_finished", &self.0.election_is_finished)?; strukt.serialize_field("update_election_is_running", &self.0.update_election_is_running)?; strukt.serialize_field("update_election_is_no_quorum", &self.0.update_election_is_no_quorum)?; strukt.serialize_field("update_election_is_finished", &self.0.update_election_is_finished)?; strukt.serialize_field("sys", &self.0.sys)?; strukt.serialize_field("alive", &self.0.alive)?; strukt.serialize_field("suspect", &self.0.suspect)?; strukt.serialize_field("confirmed", &self.0.confirmed)?; strukt.serialize_field("departed", &self.0.departed)?; strukt.serialize_field("cfg", &self.0.cfg)?; strukt.end() } } fn service_group_from_str(sg: &str) -> Result<ServiceGroup, habitat_core::Error> { ServiceGroup::from_str(sg).map_err(|e| { outputln!("Malformed service group; cannot populate \ configuration data. Aborting.: {}", e); e }) } #[cfg(test)] mod tests { use super::*; use crate::test_helpers::*; use habitat_butterfly::{member::{Health, MemberList}, rumor::{election::{self, Election as ElectionRumor, ElectionUpdate as ElectionUpdateRumor}, service::{Service as ServiceRumor, SysInfo}, service_config::ServiceConfig as ServiceConfigRumor, service_file::ServiceFile as ServiceFileRumor, RumorStore}}; use habitat_common::cli::FS_ROOT; use habitat_core::{fs::cache_key_path, package::ident::PackageIdent, service::ServiceGroup}; use serde_json; #[test] fn update_from_rumors() { let (ring, sg_one, sg_two) = test_census_ring(); let census_group_one = ring.census_group_for(&sg_one).unwrap(); assert!(census_group_one.me().is_none()); assert_eq!(census_group_one.leader().unwrap().member_id, "member-a"); assert!(census_group_one.update_leader().is_none()); let census_group_two = ring.census_group_for(&sg_two).unwrap(); assert_eq!(census_group_two.me().unwrap().member_id, "member-b".to_string()); assert_eq!(census_group_two.update_leader().unwrap().member_id, "member-b".to_string()); let mut members = census_group_two.members(); assert_eq!(members.next().unwrap().member_id, "member-a"); assert_eq!(members.next().unwrap().member_id, "member-b"); } #[test] fn census_ring_proxy_conforms_to_the_schema() { let (ring, ..) = test_census_ring(); let crp = CensusRingProxy::new(&ring); let json = serde_json::to_string(&crp).unwrap(); assert_valid(&json, "http_gateway_census_schema.json"); } fn test_census_ring() -> (CensusRing, ServiceGroup, ServiceGroup) { let mut sys_info = SysInfo::default(); sys_info.ip = "1.2.3.4".to_string(); sys_info.hostname = "hostname".to_string(); sys_info.gossip_ip = "0.0.0.0".to_string(); sys_info.gossip_port = 7777; sys_info.http_gateway_ip = "0.0.0.0".to_string(); sys_info.http_gateway_port = 9631; let pg_id = PackageIdent::new("starkandwayne", "shield", Some("0.10.4"), Some("20170419115548")); let sg_one = ServiceGroup::new(None, "shield", "one", None).unwrap(); let service_store: RumorStore<ServiceRumor> = RumorStore::default(); let service_one = ServiceRumor::new("member-a".to_string(), &pg_id, sg_one.clone(), sys_info.clone(), None); let sg_two = ServiceGroup::new(None, "shield", "two", None).unwrap(); let service_two = ServiceRumor::new("member-b".to_string(), &pg_id, sg_two.clone(), sys_info.clone(), None); let service_three = ServiceRumor::new("member-a".to_string(), &pg_id, sg_two.clone(), sys_info.clone(), None); service_store.insert_rsw(service_one); service_store.insert_rsw(service_two); service_store.insert_rsw(service_three); let election_store: RumorStore<ElectionRumor> = RumorStore::default(); let mut election = ElectionRumor::new("member-a", &sg_one, election::Term::default(), 10, true /* has_quorum */); election.finish(); election_store.insert_rsw(election); let election_update_store: RumorStore<ElectionUpdateRumor> = RumorStore::default(); let mut election_update = ElectionUpdateRumor::new("member-b", &sg_two, election::Term::default(), 10, true /* has_quorum */); election_update.finish(); election_update_store.insert_rsw(election_update); let member_list = MemberList::new(); let service_config_store: RumorStore<ServiceConfigRumor> = RumorStore::default(); let service_file_store: RumorStore<ServiceFileRumor> = RumorStore::default(); let mut ring = CensusRing::new("member-b".to_string()); ring.update_from_rumors_rsr_mlr(&cache_key_path(Some(&*FS_ROOT)), &service_store, &election_store, &election_update_store, &member_list, &service_config_store, &service_file_store); (ring, sg_one, sg_two) } /// Create a bare-minimum CensusMember with the given Health fn test_census_member(id: &str, health: Health) -> CensusMember { CensusMember { member_id: id.into(), pkg: None, application: None, environment: None, service: "test_service".to_string(), group: "default".to_string(), org: None, persistent: false, leader: false, follower: false, update_leader: false, update_follower: false, election_is_running: false, election_is_no_quorum: false, election_is_finished: false, update_election_is_running: false, update_election_is_no_quorum: false, update_election_is_finished: false, sys: SysInfo::default(), alive: health == Health::Alive, suspect: health == Health::Suspect, confirmed: health == Health::Confirmed, departed: health == Health::Departed, cfg: toml::value::Table::new(), } } #[test] fn active_members_leaves_only_active_members() { let population = vec![test_census_member("live-one", Health::Alive), test_census_member("suspect-one", Health::Suspect), test_census_member("confirmed-one", Health::Confirmed), test_census_member("departed-one", Health::Departed),]; let sg: ServiceGroup = "test-service.default".parse() .expect("This should be a valid service group"); let mut census_group = CensusGroup::new(sg, &"live-one".to_string()); for member in population { census_group.population .insert(member.member_id.clone(), member); } let mut active_members = census_group.active_members(); assert_eq!(active_members.next().unwrap().member_id, "live-one"); assert_eq!(active_members.next().unwrap().member_id, "suspect-one"); assert!(active_members.next().is_none()); } fn assert_eq_member_ids(cm: Option<&CensusMember>, id: Option<&str>) { assert_eq!(cm.map(|cm| cm.member_id.as_str()), id); } #[test] fn previous_peer_with_no_members() { let me = test_census_member("me", Health::Alive); let members = vec![]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), None); } #[test] fn previous_peer_with_no_alive_members() { let me = test_census_member("me", Health::Alive); let members = vec![test_census_member("left_of_me", Health::Confirmed), me.clone(),]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), None); } #[test] fn previous_peer_with_only_me() { let me = test_census_member("me", Health::Alive); let members = vec![me.clone()]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), None); } #[test] fn previous_peer_simple() { let me = test_census_member("me", Health::Alive); let members = vec![test_census_member("left_of_me", Health::Alive), me.clone()]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), Some("left_of_me")); } #[test] fn previous_peer_wraparound() { let me = test_census_member("me", Health::Alive); let members = vec![me.clone(), test_census_member("left_of_me_with_wrapping", Health::Alive),]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), Some("left_of_me_with_wrapping")); } #[test] fn previous_peer_normal() { let me = test_census_member("me", Health::Alive); let members = vec![test_census_member("2_left_of_me", Health::Alive), test_census_member("left_of_me", Health::Alive), me.clone(), test_census_member("right_of_me", Health::Alive),]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), Some("left_of_me")); } #[test] fn previous_peer_with_confirmed() { let me = test_census_member("me", Health::Alive); let members = vec![test_census_member("2_left_of_me", Health::Alive), test_census_member("left_of_me", Health::Confirmed), me.clone(), test_census_member("right_of_me", Health::Alive),]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), Some("2_left_of_me")); } #[test] fn previous_peer_with_confirmed_and_wraparound() { let me = test_census_member("me", Health::Alive); let members = vec![test_census_member("left_of_me", Health::Confirmed), me.clone(), test_census_member("left_of_me_with_wrapping", Health::Alive), test_census_member("2_right_of_me", Health::Confirmed),]; assert_eq_member_ids(CensusGroup::previous_peer_impl(members.iter(), &me), Some("left_of_me_with_wrapping")); } }
43.735444
102
0.566527
0137080938fdd13637313c96a26c112ac319d576
40,921
use crate::expand::{self, AstFragment, Invocation}; use crate::module::DirectoryOwnership; use rustc_ast::ast::{self, Attribute, NodeId, PatKind}; use rustc_ast::mut_visit::{self, MutVisitor}; use rustc_ast::ptr::P; use rustc_ast::token; use rustc_ast::tokenstream::{self, TokenStream, TokenTree}; use rustc_ast::visit::{AssocCtxt, Visitor}; use rustc_attr::{self as attr, Deprecation, HasAttrs, Stability}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::{self, Lrc}; use rustc_errors::{DiagnosticBuilder, ErrorReported}; use rustc_parse::{self, parser, MACRO_ARGUMENTS}; use rustc_session::parse::ParseSess; use rustc_span::def_id::DefId; use rustc_span::edition::Edition; use rustc_span::hygiene::{AstPass, ExpnData, ExpnId, ExpnKind}; use rustc_span::source_map::SourceMap; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{FileName, MultiSpan, Span, DUMMY_SP}; use smallvec::{smallvec, SmallVec}; use std::default::Default; use std::iter; use std::path::PathBuf; use std::rc::Rc; crate use rustc_span::hygiene::MacroKind; #[derive(Debug, Clone)] pub enum Annotatable { Item(P<ast::Item>), TraitItem(P<ast::AssocItem>), ImplItem(P<ast::AssocItem>), ForeignItem(P<ast::ForeignItem>), Stmt(P<ast::Stmt>), Expr(P<ast::Expr>), Arm(ast::Arm), Field(ast::Field), FieldPat(ast::FieldPat), GenericParam(ast::GenericParam), Param(ast::Param), StructField(ast::StructField), Variant(ast::Variant), } impl HasAttrs for Annotatable { fn attrs(&self) -> &[Attribute] { match *self { Annotatable::Item(ref item) => &item.attrs, Annotatable::TraitItem(ref trait_item) => &trait_item.attrs, Annotatable::ImplItem(ref impl_item) => &impl_item.attrs, Annotatable::ForeignItem(ref foreign_item) => &foreign_item.attrs, Annotatable::Stmt(ref stmt) => stmt.attrs(), Annotatable::Expr(ref expr) => &expr.attrs, Annotatable::Arm(ref arm) => &arm.attrs, Annotatable::Field(ref field) => &field.attrs, Annotatable::FieldPat(ref fp) => &fp.attrs, Annotatable::GenericParam(ref gp) => &gp.attrs, Annotatable::Param(ref p) => &p.attrs, Annotatable::StructField(ref sf) => &sf.attrs, Annotatable::Variant(ref v) => &v.attrs(), } } fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) { match self { Annotatable::Item(item) => item.visit_attrs(f), Annotatable::TraitItem(trait_item) => trait_item.visit_attrs(f), Annotatable::ImplItem(impl_item) => impl_item.visit_attrs(f), Annotatable::ForeignItem(foreign_item) => foreign_item.visit_attrs(f), Annotatable::Stmt(stmt) => stmt.visit_attrs(f), Annotatable::Expr(expr) => expr.visit_attrs(f), Annotatable::Arm(arm) => arm.visit_attrs(f), Annotatable::Field(field) => field.visit_attrs(f), Annotatable::FieldPat(fp) => fp.visit_attrs(f), Annotatable::GenericParam(gp) => gp.visit_attrs(f), Annotatable::Param(p) => p.visit_attrs(f), Annotatable::StructField(sf) => sf.visit_attrs(f), Annotatable::Variant(v) => v.visit_attrs(f), } } } impl Annotatable { pub fn span(&self) -> Span { match *self { Annotatable::Item(ref item) => item.span, Annotatable::TraitItem(ref trait_item) => trait_item.span, Annotatable::ImplItem(ref impl_item) => impl_item.span, Annotatable::ForeignItem(ref foreign_item) => foreign_item.span, Annotatable::Stmt(ref stmt) => stmt.span, Annotatable::Expr(ref expr) => expr.span, Annotatable::Arm(ref arm) => arm.span, Annotatable::Field(ref field) => field.span, Annotatable::FieldPat(ref fp) => fp.pat.span, Annotatable::GenericParam(ref gp) => gp.ident.span, Annotatable::Param(ref p) => p.span, Annotatable::StructField(ref sf) => sf.span, Annotatable::Variant(ref v) => v.span, } } pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) { match self { Annotatable::Item(item) => visitor.visit_item(item), Annotatable::TraitItem(item) => visitor.visit_assoc_item(item, AssocCtxt::Trait), Annotatable::ImplItem(item) => visitor.visit_assoc_item(item, AssocCtxt::Impl), Annotatable::ForeignItem(foreign_item) => visitor.visit_foreign_item(foreign_item), Annotatable::Stmt(stmt) => visitor.visit_stmt(stmt), Annotatable::Expr(expr) => visitor.visit_expr(expr), Annotatable::Arm(arm) => visitor.visit_arm(arm), Annotatable::Field(field) => visitor.visit_field(field), Annotatable::FieldPat(fp) => visitor.visit_field_pattern(fp), Annotatable::GenericParam(gp) => visitor.visit_generic_param(gp), Annotatable::Param(p) => visitor.visit_param(p), Annotatable::StructField(sf) => visitor.visit_struct_field(sf), Annotatable::Variant(v) => visitor.visit_variant(v), } } crate fn into_tokens(self) -> TokenStream { // `Annotatable` can be converted into tokens directly, but we // are packing it into a nonterminal as a piece of AST to make // the produced token stream look nicer in pretty-printed form. let nt = match self { Annotatable::Item(item) => token::NtItem(item), Annotatable::TraitItem(item) | Annotatable::ImplItem(item) => { token::NtItem(P(item.and_then(ast::AssocItem::into_item))) } Annotatable::ForeignItem(item) => { token::NtItem(P(item.and_then(ast::ForeignItem::into_item))) } Annotatable::Stmt(stmt) => token::NtStmt(stmt.into_inner()), Annotatable::Expr(expr) => token::NtExpr(expr), Annotatable::Arm(..) | Annotatable::Field(..) | Annotatable::FieldPat(..) | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::StructField(..) | Annotatable::Variant(..) => panic!("unexpected annotatable"), }; TokenTree::token(token::Interpolated(Lrc::new(nt)), DUMMY_SP).into() } pub fn expect_item(self) -> P<ast::Item> { match self { Annotatable::Item(i) => i, _ => panic!("expected Item"), } } pub fn map_item_or<F, G>(self, mut f: F, mut or: G) -> Annotatable where F: FnMut(P<ast::Item>) -> P<ast::Item>, G: FnMut(Annotatable) -> Annotatable, { match self { Annotatable::Item(i) => Annotatable::Item(f(i)), _ => or(self), } } pub fn expect_trait_item(self) -> P<ast::AssocItem> { match self { Annotatable::TraitItem(i) => i, _ => panic!("expected Item"), } } pub fn expect_impl_item(self) -> P<ast::AssocItem> { match self { Annotatable::ImplItem(i) => i, _ => panic!("expected Item"), } } pub fn expect_foreign_item(self) -> P<ast::ForeignItem> { match self { Annotatable::ForeignItem(i) => i, _ => panic!("expected foreign item"), } } pub fn expect_stmt(self) -> ast::Stmt { match self { Annotatable::Stmt(stmt) => stmt.into_inner(), _ => panic!("expected statement"), } } pub fn expect_expr(self) -> P<ast::Expr> { match self { Annotatable::Expr(expr) => expr, _ => panic!("expected expression"), } } pub fn expect_arm(self) -> ast::Arm { match self { Annotatable::Arm(arm) => arm, _ => panic!("expected match arm"), } } pub fn expect_field(self) -> ast::Field { match self { Annotatable::Field(field) => field, _ => panic!("expected field"), } } pub fn expect_field_pattern(self) -> ast::FieldPat { match self { Annotatable::FieldPat(fp) => fp, _ => panic!("expected field pattern"), } } pub fn expect_generic_param(self) -> ast::GenericParam { match self { Annotatable::GenericParam(gp) => gp, _ => panic!("expected generic parameter"), } } pub fn expect_param(self) -> ast::Param { match self { Annotatable::Param(param) => param, _ => panic!("expected parameter"), } } pub fn expect_struct_field(self) -> ast::StructField { match self { Annotatable::StructField(sf) => sf, _ => panic!("expected struct field"), } } pub fn expect_variant(self) -> ast::Variant { match self { Annotatable::Variant(v) => v, _ => panic!("expected variant"), } } pub fn derive_allowed(&self) -> bool { match *self { Annotatable::Item(ref item) => match item.kind { ast::ItemKind::Struct(..) | ast::ItemKind::Enum(..) | ast::ItemKind::Union(..) => { true } _ => false, }, _ => false, } } } /// Result of an expansion that may need to be retried. /// Consider using this for non-`MultiItemModifier` expanders as well. pub enum ExpandResult<T, U> { /// Expansion produced a result (possibly dummy). Ready(T), /// Expansion could not produce a result and needs to be retried. /// The string is an explanation that will be printed if we are stuck in an infinite retry loop. Retry(U, String), } // `meta_item` is the attribute, and `item` is the item being modified. pub trait MultiItemModifier { fn expand( &self, ecx: &mut ExtCtxt<'_>, span: Span, meta_item: &ast::MetaItem, item: Annotatable, ) -> ExpandResult<Vec<Annotatable>, Annotatable>; } impl<F> MultiItemModifier for F where F: Fn(&mut ExtCtxt<'_>, Span, &ast::MetaItem, Annotatable) -> Vec<Annotatable>, { fn expand( &self, ecx: &mut ExtCtxt<'_>, span: Span, meta_item: &ast::MetaItem, item: Annotatable, ) -> ExpandResult<Vec<Annotatable>, Annotatable> { ExpandResult::Ready(self(ecx, span, meta_item, item)) } } pub trait ProcMacro { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, ts: TokenStream, ) -> Result<TokenStream, ErrorReported>; } impl<F> ProcMacro for F where F: Fn(TokenStream) -> TokenStream, { fn expand<'cx>( &self, _ecx: &'cx mut ExtCtxt<'_>, _span: Span, ts: TokenStream, ) -> Result<TokenStream, ErrorReported> { // FIXME setup implicit context in TLS before calling self. Ok((*self)(ts)) } } pub trait AttrProcMacro { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, annotation: TokenStream, annotated: TokenStream, ) -> Result<TokenStream, ErrorReported>; } impl<F> AttrProcMacro for F where F: Fn(TokenStream, TokenStream) -> TokenStream, { fn expand<'cx>( &self, _ecx: &'cx mut ExtCtxt<'_>, _span: Span, annotation: TokenStream, annotated: TokenStream, ) -> Result<TokenStream, ErrorReported> { // FIXME setup implicit context in TLS before calling self. Ok((*self)(annotation, annotated)) } } /// Represents a thing that maps token trees to Macro Results pub trait TTMacroExpander { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, input: TokenStream, ) -> Box<dyn MacResult + 'cx>; } pub type MacroExpanderFn = for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> Box<dyn MacResult + 'cx>; impl<F> TTMacroExpander for F where F: for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> Box<dyn MacResult + 'cx>, { fn expand<'cx>( &self, ecx: &'cx mut ExtCtxt<'_>, span: Span, mut input: TokenStream, ) -> Box<dyn MacResult + 'cx> { struct AvoidInterpolatedIdents; impl MutVisitor for AvoidInterpolatedIdents { fn visit_tt(&mut self, tt: &mut tokenstream::TokenTree) { if let tokenstream::TokenTree::Token(token) = tt { if let token::Interpolated(nt) = &token.kind { if let token::NtIdent(ident, is_raw) = **nt { *tt = tokenstream::TokenTree::token( token::Ident(ident.name, is_raw), ident.span, ); } } } mut_visit::noop_visit_tt(tt, self) } fn visit_mac(&mut self, mac: &mut ast::MacCall) { mut_visit::noop_visit_mac(mac, self) } } AvoidInterpolatedIdents.visit_tts(&mut input); (*self)(ecx, span, input) } } // Use a macro because forwarding to a simple function has type system issues macro_rules! make_stmts_default { ($me:expr) => { $me.make_expr().map(|e| { smallvec![ast::Stmt { id: ast::DUMMY_NODE_ID, span: e.span, kind: ast::StmtKind::Expr(e), }] }) }; } /// The result of a macro expansion. The return values of the various /// methods are spliced into the AST at the callsite of the macro. pub trait MacResult { /// Creates an expression. fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> { None } /// Creates zero or more items. fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> { None } /// Creates zero or more impl items. fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { None } /// Creates zero or more trait items. fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { None } /// Creates zero or more items in an `extern {}` block fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> { None } /// Creates a pattern. fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> { None } /// Creates zero or more statements. /// /// By default this attempts to create an expression statement, /// returning None if that fails. fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> { make_stmts_default!(self) } fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> { None } fn make_arms(self: Box<Self>) -> Option<SmallVec<[ast::Arm; 1]>> { None } fn make_fields(self: Box<Self>) -> Option<SmallVec<[ast::Field; 1]>> { None } fn make_field_patterns(self: Box<Self>) -> Option<SmallVec<[ast::FieldPat; 1]>> { None } fn make_generic_params(self: Box<Self>) -> Option<SmallVec<[ast::GenericParam; 1]>> { None } fn make_params(self: Box<Self>) -> Option<SmallVec<[ast::Param; 1]>> { None } fn make_struct_fields(self: Box<Self>) -> Option<SmallVec<[ast::StructField; 1]>> { None } fn make_variants(self: Box<Self>) -> Option<SmallVec<[ast::Variant; 1]>> { None } } macro_rules! make_MacEager { ( $( $fld:ident: $t:ty, )* ) => { /// `MacResult` implementation for the common case where you've already /// built each form of AST that you might return. #[derive(Default)] pub struct MacEager { $( pub $fld: Option<$t>, )* } impl MacEager { $( pub fn $fld(v: $t) -> Box<dyn MacResult> { Box::new(MacEager { $fld: Some(v), ..Default::default() }) } )* } } } make_MacEager! { expr: P<ast::Expr>, pat: P<ast::Pat>, items: SmallVec<[P<ast::Item>; 1]>, impl_items: SmallVec<[P<ast::AssocItem>; 1]>, trait_items: SmallVec<[P<ast::AssocItem>; 1]>, foreign_items: SmallVec<[P<ast::ForeignItem>; 1]>, stmts: SmallVec<[ast::Stmt; 1]>, ty: P<ast::Ty>, } impl MacResult for MacEager { fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> { self.expr } fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> { self.items } fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { self.impl_items } fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { self.trait_items } fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> { self.foreign_items } fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> { match self.stmts.as_ref().map_or(0, |s| s.len()) { 0 => make_stmts_default!(self), _ => self.stmts, } } fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> { if let Some(p) = self.pat { return Some(p); } if let Some(e) = self.expr { if let ast::ExprKind::Lit(_) = e.kind { return Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, span: e.span, kind: PatKind::Lit(e), })); } } None } fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> { self.ty } } /// Fill-in macro expansion result, to allow compilation to continue /// after hitting errors. #[derive(Copy, Clone)] pub struct DummyResult { is_error: bool, span: Span, } impl DummyResult { /// Creates a default MacResult that can be anything. /// /// Use this as a return value after hitting any errors and /// calling `span_err`. pub fn any(span: Span) -> Box<dyn MacResult + 'static> { Box::new(DummyResult { is_error: true, span }) } /// Same as `any`, but must be a valid fragment, not error. pub fn any_valid(span: Span) -> Box<dyn MacResult + 'static> { Box::new(DummyResult { is_error: false, span }) } /// A plain dummy expression. pub fn raw_expr(sp: Span, is_error: bool) -> P<ast::Expr> { P(ast::Expr { id: ast::DUMMY_NODE_ID, kind: if is_error { ast::ExprKind::Err } else { ast::ExprKind::Tup(Vec::new()) }, span: sp, attrs: ast::AttrVec::new(), }) } /// A plain dummy pattern. pub fn raw_pat(sp: Span) -> ast::Pat { ast::Pat { id: ast::DUMMY_NODE_ID, kind: PatKind::Wild, span: sp } } /// A plain dummy type. pub fn raw_ty(sp: Span, is_error: bool) -> P<ast::Ty> { P(ast::Ty { id: ast::DUMMY_NODE_ID, kind: if is_error { ast::TyKind::Err } else { ast::TyKind::Tup(Vec::new()) }, span: sp, }) } } impl MacResult for DummyResult { fn make_expr(self: Box<DummyResult>) -> Option<P<ast::Expr>> { Some(DummyResult::raw_expr(self.span, self.is_error)) } fn make_pat(self: Box<DummyResult>) -> Option<P<ast::Pat>> { Some(P(DummyResult::raw_pat(self.span))) } fn make_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::Item>; 1]>> { Some(SmallVec::new()) } fn make_impl_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { Some(SmallVec::new()) } fn make_trait_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> { Some(SmallVec::new()) } fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> { Some(SmallVec::new()) } fn make_stmts(self: Box<DummyResult>) -> Option<SmallVec<[ast::Stmt; 1]>> { Some(smallvec![ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Expr(DummyResult::raw_expr(self.span, self.is_error)), span: self.span, }]) } fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> { Some(DummyResult::raw_ty(self.span, self.is_error)) } fn make_arms(self: Box<DummyResult>) -> Option<SmallVec<[ast::Arm; 1]>> { Some(SmallVec::new()) } fn make_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::Field; 1]>> { Some(SmallVec::new()) } fn make_field_patterns(self: Box<DummyResult>) -> Option<SmallVec<[ast::FieldPat; 1]>> { Some(SmallVec::new()) } fn make_generic_params(self: Box<DummyResult>) -> Option<SmallVec<[ast::GenericParam; 1]>> { Some(SmallVec::new()) } fn make_params(self: Box<DummyResult>) -> Option<SmallVec<[ast::Param; 1]>> { Some(SmallVec::new()) } fn make_struct_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::StructField; 1]>> { Some(SmallVec::new()) } fn make_variants(self: Box<DummyResult>) -> Option<SmallVec<[ast::Variant; 1]>> { Some(SmallVec::new()) } } /// A syntax extension kind. pub enum SyntaxExtensionKind { /// A token-based function-like macro. Bang( /// An expander with signature TokenStream -> TokenStream. Box<dyn ProcMacro + sync::Sync + sync::Send>, ), /// An AST-based function-like macro. LegacyBang( /// An expander with signature TokenStream -> AST. Box<dyn TTMacroExpander + sync::Sync + sync::Send>, ), /// A token-based attribute macro. Attr( /// An expander with signature (TokenStream, TokenStream) -> TokenStream. /// The first TokenSteam is the attribute itself, the second is the annotated item. /// The produced TokenSteam replaces the input TokenSteam. Box<dyn AttrProcMacro + sync::Sync + sync::Send>, ), /// An AST-based attribute macro. LegacyAttr( /// An expander with signature (AST, AST) -> AST. /// The first AST fragment is the attribute itself, the second is the annotated item. /// The produced AST fragment replaces the input AST fragment. Box<dyn MultiItemModifier + sync::Sync + sync::Send>, ), /// A trivial attribute "macro" that does nothing, /// only keeps the attribute and marks it as inert, /// thus making it ineligible for further expansion. NonMacroAttr { /// Suppresses the `unused_attributes` lint for this attribute. mark_used: bool, }, /// A token-based derive macro. Derive( /// An expander with signature TokenStream -> TokenStream (not yet). /// The produced TokenSteam is appended to the input TokenSteam. Box<dyn MultiItemModifier + sync::Sync + sync::Send>, ), /// An AST-based derive macro. LegacyDerive( /// An expander with signature AST -> AST. /// The produced AST fragment is appended to the input AST fragment. Box<dyn MultiItemModifier + sync::Sync + sync::Send>, ), } /// A struct representing a macro definition in "lowered" form ready for expansion. pub struct SyntaxExtension { /// A syntax extension kind. pub kind: SyntaxExtensionKind, /// Span of the macro definition. pub span: Span, /// Whitelist of unstable features that are treated as stable inside this macro. pub allow_internal_unstable: Option<Lrc<[Symbol]>>, /// Suppresses the `unsafe_code` lint for code produced by this macro. pub allow_internal_unsafe: bool, /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro. pub local_inner_macros: bool, /// The macro's stability info. pub stability: Option<Stability>, /// The macro's deprecation info. pub deprecation: Option<Deprecation>, /// Names of helper attributes registered by this macro. pub helper_attrs: Vec<Symbol>, /// Edition of the crate in which this macro is defined. pub edition: Edition, /// Built-in macros have a couple of special properties like availability /// in `#[no_implicit_prelude]` modules, so we have to keep this flag. pub is_builtin: bool, /// We have to identify macros providing a `Copy` impl early for compatibility reasons. pub is_derive_copy: bool, } impl SyntaxExtension { /// Returns which kind of macro calls this syntax extension. pub fn macro_kind(&self) -> MacroKind { match self.kind { SyntaxExtensionKind::Bang(..) | SyntaxExtensionKind::LegacyBang(..) => MacroKind::Bang, SyntaxExtensionKind::Attr(..) | SyntaxExtensionKind::LegacyAttr(..) | SyntaxExtensionKind::NonMacroAttr { .. } => MacroKind::Attr, SyntaxExtensionKind::Derive(..) | SyntaxExtensionKind::LegacyDerive(..) => { MacroKind::Derive } } } /// Constructs a syntax extension with default properties. pub fn default(kind: SyntaxExtensionKind, edition: Edition) -> SyntaxExtension { SyntaxExtension { span: DUMMY_SP, allow_internal_unstable: None, allow_internal_unsafe: false, local_inner_macros: false, stability: None, deprecation: None, helper_attrs: Vec::new(), edition, is_builtin: false, is_derive_copy: false, kind, } } /// Constructs a syntax extension with the given properties /// and other properties converted from attributes. pub fn new( sess: &ParseSess, kind: SyntaxExtensionKind, span: Span, helper_attrs: Vec<Symbol>, edition: Edition, name: Symbol, attrs: &[ast::Attribute], ) -> SyntaxExtension { let allow_internal_unstable = attr::allow_internal_unstable(&attrs, &sess.span_diagnostic) .map(|features| features.collect::<Vec<Symbol>>().into()); let mut local_inner_macros = false; if let Some(macro_export) = attr::find_by_name(attrs, sym::macro_export) { if let Some(l) = macro_export.meta_item_list() { local_inner_macros = attr::list_contains_name(&l, sym::local_inner_macros); } } let is_builtin = attr::contains_name(attrs, sym::rustc_builtin_macro); let (stability, const_stability) = attr::find_stability(&sess, attrs, span); if const_stability.is_some() { sess.span_diagnostic.span_err(span, "macros cannot have const stability attributes"); } SyntaxExtension { kind, span, allow_internal_unstable, allow_internal_unsafe: attr::contains_name(attrs, sym::allow_internal_unsafe), local_inner_macros, stability, deprecation: attr::find_deprecation(&sess, attrs, span), helper_attrs, edition, is_builtin, is_derive_copy: is_builtin && name == sym::Copy, } } pub fn dummy_bang(edition: Edition) -> SyntaxExtension { fn expander<'cx>( _: &'cx mut ExtCtxt<'_>, span: Span, _: TokenStream, ) -> Box<dyn MacResult + 'cx> { DummyResult::any(span) } SyntaxExtension::default(SyntaxExtensionKind::LegacyBang(Box::new(expander)), edition) } pub fn dummy_derive(edition: Edition) -> SyntaxExtension { fn expander( _: &mut ExtCtxt<'_>, _: Span, _: &ast::MetaItem, _: Annotatable, ) -> Vec<Annotatable> { Vec::new() } SyntaxExtension::default(SyntaxExtensionKind::Derive(Box::new(expander)), edition) } pub fn non_macro_attr(mark_used: bool, edition: Edition) -> SyntaxExtension { SyntaxExtension::default(SyntaxExtensionKind::NonMacroAttr { mark_used }, edition) } pub fn expn_data( &self, parent: ExpnId, call_site: Span, descr: Symbol, macro_def_id: Option<DefId>, ) -> ExpnData { ExpnData { kind: ExpnKind::Macro(self.macro_kind(), descr), parent, call_site, def_site: self.span, allow_internal_unstable: self.allow_internal_unstable.clone(), allow_internal_unsafe: self.allow_internal_unsafe, local_inner_macros: self.local_inner_macros, edition: self.edition, macro_def_id, } } } /// Result of resolving a macro invocation. pub enum InvocationRes { Single(Lrc<SyntaxExtension>), DeriveContainer(Vec<Lrc<SyntaxExtension>>), } /// Error type that denotes indeterminacy. pub struct Indeterminate; pub trait Resolver { fn next_node_id(&mut self) -> NodeId; fn resolve_dollar_crates(&mut self); fn visit_ast_fragment_with_placeholders(&mut self, expn_id: ExpnId, fragment: &AstFragment); fn register_builtin_macro(&mut self, ident: Ident, ext: SyntaxExtension); fn expansion_for_ast_pass( &mut self, call_site: Span, pass: AstPass, features: &[Symbol], parent_module_id: Option<NodeId>, ) -> ExpnId; fn resolve_imports(&mut self); fn resolve_macro_invocation( &mut self, invoc: &Invocation, eager_expansion_root: ExpnId, force: bool, ) -> Result<InvocationRes, Indeterminate>; fn check_unused_macros(&mut self); fn has_derive_copy(&self, expn_id: ExpnId) -> bool; fn add_derive_copy(&mut self, expn_id: ExpnId); fn cfg_accessible(&mut self, expn_id: ExpnId, path: &ast::Path) -> Result<bool, Indeterminate>; } #[derive(Clone)] pub struct ModuleData { pub mod_path: Vec<Ident>, pub directory: PathBuf, } #[derive(Clone)] pub struct ExpansionData { pub id: ExpnId, pub depth: usize, pub module: Rc<ModuleData>, pub directory_ownership: DirectoryOwnership, pub prior_type_ascription: Option<(Span, bool)>, } /// One of these is made during expansion and incrementally updated as we go; /// when a macro expansion occurs, the resulting nodes have the `backtrace() /// -> expn_data` of their expansion context stored into their span. pub struct ExtCtxt<'a> { pub parse_sess: &'a ParseSess, pub ecfg: expand::ExpansionConfig<'a>, pub reduced_recursion_limit: Option<usize>, pub root_path: PathBuf, pub resolver: &'a mut dyn Resolver, pub current_expansion: ExpansionData, pub expansions: FxHashMap<Span, Vec<String>>, /// Called directly after having parsed an external `mod foo;` in expansion. pub(super) extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate)>, } impl<'a> ExtCtxt<'a> { pub fn new( parse_sess: &'a ParseSess, ecfg: expand::ExpansionConfig<'a>, resolver: &'a mut dyn Resolver, extern_mod_loaded: Option<&'a dyn Fn(&ast::Crate)>, ) -> ExtCtxt<'a> { ExtCtxt { parse_sess, ecfg, reduced_recursion_limit: None, resolver, extern_mod_loaded, root_path: PathBuf::new(), current_expansion: ExpansionData { id: ExpnId::root(), depth: 0, module: Rc::new(ModuleData { mod_path: Vec::new(), directory: PathBuf::new() }), directory_ownership: DirectoryOwnership::Owned { relative: None }, prior_type_ascription: None, }, expansions: FxHashMap::default(), } } /// Returns a `Folder` for deeply expanding all macros in an AST node. pub fn expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> { expand::MacroExpander::new(self, false) } /// Returns a `Folder` that deeply expands all macros and assigns all `NodeId`s in an AST node. /// Once `NodeId`s are assigned, the node may not be expanded, removed, or otherwise modified. pub fn monotonic_expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> { expand::MacroExpander::new(self, true) } pub fn new_parser_from_tts(&self, stream: TokenStream) -> parser::Parser<'a> { rustc_parse::stream_to_parser(self.parse_sess, stream, MACRO_ARGUMENTS) } pub fn source_map(&self) -> &'a SourceMap { self.parse_sess.source_map() } pub fn parse_sess(&self) -> &'a ParseSess { self.parse_sess } pub fn call_site(&self) -> Span { self.current_expansion.id.expn_data().call_site } /// Equivalent of `Span::def_site` from the proc macro API, /// except that the location is taken from the span passed as an argument. pub fn with_def_site_ctxt(&self, span: Span) -> Span { span.with_def_site_ctxt(self.current_expansion.id) } /// Equivalent of `Span::call_site` from the proc macro API, /// except that the location is taken from the span passed as an argument. pub fn with_call_site_ctxt(&self, span: Span) -> Span { span.with_call_site_ctxt(self.current_expansion.id) } /// Equivalent of `Span::mixed_site` from the proc macro API, /// except that the location is taken from the span passed as an argument. pub fn with_mixed_site_ctxt(&self, span: Span) -> Span { span.with_mixed_site_ctxt(self.current_expansion.id) } /// Returns span for the macro which originally caused the current expansion to happen. /// /// Stops backtracing at include! boundary. pub fn expansion_cause(&self) -> Option<Span> { self.current_expansion.id.expansion_cause() } pub fn struct_span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> DiagnosticBuilder<'a> { self.parse_sess.span_diagnostic.struct_span_err(sp, msg) } /// Emit `msg` attached to `sp`, without immediately stopping /// compilation. /// /// Compilation will be stopped in the near future (at the end of /// the macro expansion phase). pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) { self.parse_sess.span_diagnostic.span_err(sp, msg); } pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) { self.parse_sess.span_diagnostic.span_warn(sp, msg); } pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! { self.parse_sess.span_diagnostic.span_bug(sp, msg); } pub fn trace_macros_diag(&mut self) { for (sp, notes) in self.expansions.iter() { let mut db = self.parse_sess.span_diagnostic.span_note_diag(*sp, "trace_macro"); for note in notes { db.note(note); } db.emit(); } // Fixme: does this result in errors? self.expansions.clear(); } pub fn bug(&self, msg: &str) -> ! { self.parse_sess.span_diagnostic.bug(msg); } pub fn trace_macros(&self) -> bool { self.ecfg.trace_mac } pub fn set_trace_macros(&mut self, x: bool) { self.ecfg.trace_mac = x } pub fn ident_of(&self, st: &str, sp: Span) -> Ident { Ident::from_str_and_span(st, sp) } pub fn std_path(&self, components: &[Symbol]) -> Vec<Ident> { let def_site = self.with_def_site_ctxt(DUMMY_SP); iter::once(Ident::new(kw::DollarCrate, def_site)) .chain(components.iter().map(|&s| Ident::with_dummy_span(s))) .collect() } pub fn name_of(&self, st: &str) -> Symbol { Symbol::intern(st) } pub fn check_unused_macros(&mut self) { self.resolver.check_unused_macros(); } /// Resolves a path mentioned inside Rust code. /// /// This unifies the logic used for resolving `include_X!`, and `#[doc(include)]` file paths. /// /// Returns an absolute path to the file that `path` refers to. pub fn resolve_path( &self, path: impl Into<PathBuf>, span: Span, ) -> Result<PathBuf, DiagnosticBuilder<'a>> { let path = path.into(); // Relative paths are resolved relative to the file in which they are found // after macro expansion (that is, they are unhygienic). if !path.is_absolute() { let callsite = span.source_callsite(); let mut result = match self.source_map().span_to_unmapped_path(callsite) { FileName::Real(path) => path, FileName::DocTest(path, _) => path, other => { return Err(self.struct_span_err( span, &format!("cannot resolve relative path in non-file source `{}`", other), )); } }; result.pop(); result.push(path); Ok(result) } else { Ok(path) } } } /// Extracts a string literal from the macro expanded version of `expr`, /// emitting `err_msg` if `expr` is not a string literal. This does not stop /// compilation on error, merely emits a non-fatal error and returns `None`. pub fn expr_to_spanned_string<'a>( cx: &'a mut ExtCtxt<'_>, expr: P<ast::Expr>, err_msg: &str, ) -> Result<(Symbol, ast::StrStyle, Span), Option<DiagnosticBuilder<'a>>> { // Perform eager expansion on the expression. // We want to be able to handle e.g., `concat!("foo", "bar")`. let expr = cx.expander().fully_expand_fragment(AstFragment::Expr(expr)).make_expr(); Err(match expr.kind { ast::ExprKind::Lit(ref l) => match l.kind { ast::LitKind::Str(s, style) => return Ok((s, style, expr.span)), ast::LitKind::Err(_) => None, _ => Some(cx.struct_span_err(l.span, err_msg)), }, ast::ExprKind::Err => None, _ => Some(cx.struct_span_err(expr.span, err_msg)), }) } pub fn expr_to_string( cx: &mut ExtCtxt<'_>, expr: P<ast::Expr>, err_msg: &str, ) -> Option<(Symbol, ast::StrStyle)> { expr_to_spanned_string(cx, expr, err_msg) .map_err(|err| { err.map(|mut err| { err.emit(); }) }) .ok() .map(|(symbol, style, _)| (symbol, style)) } /// Non-fatally assert that `tts` is empty. Note that this function /// returns even when `tts` is non-empty, macros that *need* to stop /// compilation should call /// `cx.parse_sess.span_diagnostic.abort_if_errors()` (this should be /// done as rarely as possible). pub fn check_zero_tts(cx: &ExtCtxt<'_>, sp: Span, tts: TokenStream, name: &str) { if !tts.is_empty() { cx.span_err(sp, &format!("{} takes no arguments", name)); } } /// Parse an expression. On error, emit it, advancing to `Eof`, and return `None`. pub fn parse_expr(p: &mut parser::Parser<'_>) -> Option<P<ast::Expr>> { match p.parse_expr() { Ok(e) => return Some(e), Err(mut err) => err.emit(), } while p.token != token::Eof { p.bump(); } None } /// Interpreting `tts` as a comma-separated sequence of expressions, /// expect exactly one string literal, or emit an error and return `None`. pub fn get_single_str_from_tts( cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream, name: &str, ) -> Option<String> { let mut p = cx.new_parser_from_tts(tts); if p.token == token::Eof { cx.span_err(sp, &format!("{} takes 1 argument", name)); return None; } let ret = parse_expr(&mut p)?; let _ = p.eat(&token::Comma); if p.token != token::Eof { cx.span_err(sp, &format!("{} takes 1 argument", name)); } expr_to_string(cx, ret, "argument must be a string literal").map(|(s, _)| s.to_string()) } /// Extracts comma-separated expressions from `tts`. /// On error, emit it, and return `None`. pub fn get_exprs_from_tts( cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream, ) -> Option<Vec<P<ast::Expr>>> { let mut p = cx.new_parser_from_tts(tts); let mut es = Vec::new(); while p.token != token::Eof { let expr = parse_expr(&mut p)?; // Perform eager expansion on the expression. // We want to be able to handle e.g., `concat!("foo", "bar")`. let expr = cx.expander().fully_expand_fragment(AstFragment::Expr(expr)).make_expr(); es.push(expr); if p.eat(&token::Comma) { continue; } if p.token != token::Eof { cx.span_err(sp, "expected token: `,`"); return None; } } Some(es) }
33.404898
100
0.584663
7ab7bca3ab8237cd1f8ef494ed07e09e7a0e72fe
4,389
use crate::sys::externals::Function; use crate::sys::store::{Store, StoreObject}; use crate::sys::RuntimeError; use wasmer_types::Value; pub use wasmer_types::{ ExportType, ExternType, FunctionType, GlobalType, ImportType, MemoryType, Mutability, TableType, Type as ValType, }; use wasmer_vm::VMFuncRef; /// WebAssembly computations manipulate values of basic value types: /// * Integers (32 or 64 bit width) /// * Floating-point (32 or 64 bit width) /// * Vectors (128 bits, with 32 or 64 bit lanes) /// /// Spec: <https://webassembly.github.io/spec/core/exec/runtime.html#values> pub type Val = Value<Function>; impl StoreObject for Val { fn comes_from_same_store(&self, store: &Store) -> bool { match self { Self::FuncRef(None) => true, Self::FuncRef(Some(f)) => Store::same(store, f.store()), // `ExternRef`s are not tied to specific stores Self::ExternRef(_) => true, Self::I32(_) | Self::I64(_) | Self::F32(_) | Self::F64(_) | Self::V128(_) => true, } } } impl From<Function> for Val { fn from(val: Function) -> Self { Self::FuncRef(Some(val)) } } /// It provides useful functions for converting back and forth /// from [`Val`] into `FuncRef`. pub trait ValFuncRef { fn into_vm_funcref(self, store: &Store) -> Result<VMFuncRef, RuntimeError>; fn from_vm_funcref(item: VMFuncRef, store: &Store) -> Self; fn into_table_reference(self, store: &Store) -> Result<wasmer_vm::TableElement, RuntimeError>; fn from_table_reference(item: wasmer_vm::TableElement, store: &Store) -> Self; } impl ValFuncRef for Val { fn into_vm_funcref(self, store: &Store) -> Result<VMFuncRef, RuntimeError> { if !self.comes_from_same_store(store) { return Err(RuntimeError::new("cross-`Store` values are not supported")); } Ok(match self { Self::FuncRef(None) => VMFuncRef::null(), Self::FuncRef(Some(f)) => f.vm_funcref(), _ => return Err(RuntimeError::new("val is not func ref")), }) } fn from_vm_funcref(func_ref: VMFuncRef, store: &Store) -> Self { if func_ref.is_null() { return Self::FuncRef(None); } let item: &wasmer_vm::VMCallerCheckedAnyfunc = unsafe { let anyfunc: *const wasmer_vm::VMCallerCheckedAnyfunc = *func_ref; &*anyfunc }; let signature = store .engine() .lookup_signature(item.type_index) .expect("Signature not found in store"); let export = wasmer_engine::ExportFunction { // TODO: // figure out if we ever need a value here: need testing with complicated import patterns metadata: None, vm_function: wasmer_vm::VMFunction { address: item.func_ptr, signature, // TODO: review this comment (unclear if it's still correct): // All functions in tables are already Static (as dynamic functions // are converted to use the trampolines with static signatures). kind: wasmer_vm::VMFunctionKind::Static, vmctx: item.vmctx, call_trampoline: None, instance_ref: None, }, }; let f = Function::from_vm_export(store, export); Self::FuncRef(Some(f)) } fn into_table_reference(self, store: &Store) -> Result<wasmer_vm::TableElement, RuntimeError> { if !self.comes_from_same_store(store) { return Err(RuntimeError::new("cross-`Store` values are not supported")); } Ok(match self { // TODO(reftypes): review this clone Self::ExternRef(extern_ref) => wasmer_vm::TableElement::ExternRef(extern_ref), Self::FuncRef(None) => wasmer_vm::TableElement::FuncRef(VMFuncRef::null()), Self::FuncRef(Some(f)) => wasmer_vm::TableElement::FuncRef(f.vm_funcref()), _ => return Err(RuntimeError::new("val is not reference")), }) } fn from_table_reference(item: wasmer_vm::TableElement, store: &Store) -> Self { match item { wasmer_vm::TableElement::FuncRef(f) => Self::from_vm_funcref(f, store), wasmer_vm::TableElement::ExternRef(extern_ref) => Self::ExternRef(extern_ref), } } }
38.840708
101
0.609934
2298e752f4a1c02289b900b8624dd35701bc946c
2,607
//! Utilities to check against C signatures, when enabled. /// Casts the given pointer to another type, similarly to calling `cast()` on it, /// while verifying that the layout of the pointee stays the same after the cast. macro_rules! checked_cast { ($ptr:ident) => {{ let target_ptr = $ptr.cast(); let target = crate::use_libc::Pad::new(core::ptr::read(target_ptr)); // Uses the fact that the compiler checks for size equality, // when transmuting between types. let size_check = core::mem::transmute(core::ptr::read($ptr)); target.compare_size(size_check); let align_check = core::mem::transmute(crate::use_libc::Pad::new(core::ptr::read($ptr))); target.compare_alignment(align_check); target_ptr }}; } macro_rules! libc { ($e:expr) => { // TODO: Implement actually using libc. Right now this is just a // signature check. #[allow(unreachable_code)] if false { #[allow(unused_imports)] use crate::use_libc::*; // TODO: `dlopen` libc, `dlsym` the function, and call it... return $e; } }; } #[cfg(feature = "threads")] macro_rules! libc_type { ($name:ident, $libc:ident) => { #[cfg(test)] static_assertions::const_assert_eq!( core::mem::size_of::<$name>(), core::mem::size_of::<libc::$libc>() ); #[cfg(test)] static_assertions::const_assert_eq!( core::mem::align_of::<$name>(), core::mem::align_of::<libc::$libc>() ); }; } /// A struct that adds `align_of<T>` padding bytes to type `T`. /// /// Based on the rules of C struct alignment: /// * `align_of<Pad<T>> == max(align_of<T>, align_of<u8>) == align_of<T>` /// * `size_of<Pad<T>> / align_of<Pad<T>> == ciel( (size_of<T> + size_of<u8>) / align_of<Pad<T>>)` /// * `size_of<T> % align_of<T> == 0` /// /// Therefore `size_of<Pad<T>> == size_of<T> + align_of<T>`. #[repr(C)] pub(crate) struct Pad<T> { field: T, force_padding: u8, } impl<T> Pad<T> { pub unsafe fn new(v: T) -> Self { Pad { field: v, force_padding: 0, } } /// Used to check that `size_of<T> == size_of<U>` with `transmute`. pub fn compare_size(&self, _v: T) {} /// Used to check that `size_of<Pad<T>> == size_of<Pad<U>>` with `transmute`. /// /// Since `size_of<Pad<T>> == size_of<T> + align_of<T>`, /// if `size_of<T> == size_of<U>` then `align_of<T> == align_of<U>`. pub fn compare_alignment(&self, _pad: Pad<T>) {} }
32.5875
98
0.566552
1efb5c6131b3acf992de4210187c29fc7cac1133
26,952
use super::cluster::SlotRange; use super::utils::{has_flags, CmdParseError}; use crate::common::cluster::ClusterName; use crate::common::config::ClusterConfig; use crate::common::utils::extract_host_from_address; use crate::protocol::{Array, BulkStr, Resp}; use std::collections::HashMap; use std::convert::TryFrom; use std::iter::Peekable; use std::str; macro_rules! try_parse { ($expression:expr) => {{ match $expression { Ok(v) => (v), Err(_) => return Err(CmdParseError {}), } }}; } macro_rules! try_get { ($expression:expr) => {{ match $expression { Some(v) => (v), None => return Err(CmdParseError {}), } }}; } #[derive(Debug, Clone, PartialEq)] pub struct ClusterMapFlags { pub force: bool, } impl ClusterMapFlags { pub fn to_arg(&self) -> String { if self.force { "FORCE".to_string() } else { "NOFLAG".to_string() } } pub fn from_arg(flags_str: &str) -> Self { let force = has_flags(flags_str, ',', "FORCE"); ClusterMapFlags { force } } } const PEER_PREFIX: &str = "PEER"; const CONFIG_PREFIX: &str = "CONFIG"; #[derive(Debug, Clone)] pub struct ProxyClusterMeta { epoch: u64, flags: ClusterMapFlags, local: ProxyClusterMap, peer: ProxyClusterMap, clusters_config: ClusterConfigMap, } impl ProxyClusterMeta { pub fn new( epoch: u64, flags: ClusterMapFlags, local: ProxyClusterMap, peer: ProxyClusterMap, clusters_config: ClusterConfigMap, ) -> Self { Self { epoch, flags, local, peer, clusters_config, } } pub fn get_epoch(&self) -> u64 { self.epoch } pub fn get_flags(&self) -> ClusterMapFlags { self.flags.clone() } pub fn get_local(&self) -> &ProxyClusterMap { &self.local } pub fn get_peer(&self) -> &ProxyClusterMap { &self.peer } pub fn get_configs(&self) -> &ClusterConfigMap { &self.clusters_config } pub fn from_resp<T: AsRef<[u8]>>( resp: &Resp<T>, ) -> Result<(Self, Result<(), ParseExtendedMetaError>), CmdParseError> { let arr = match resp { Resp::Arr(Array::Arr(ref arr)) => arr, _ => return Err(CmdParseError {}), }; // Skip the "UMCTL SETCLUSTER" let it = arr.iter().skip(2).flat_map(|resp| match resp { Resp::Bulk(BulkStr::Str(safe_str)) => match str::from_utf8(safe_str.as_ref()) { Ok(s) => Some(s.to_string()), _ => None, }, _ => None, }); let mut it = it.peekable(); Self::parse(&mut it) } pub fn parse<It>( it: &mut Peekable<It>, ) -> Result<(Self, Result<(), ParseExtendedMetaError>), CmdParseError> where It: Iterator<Item = String>, { let epoch_str = try_get!(it.next()); let epoch = try_parse!(epoch_str.parse::<u64>()); let flags = ClusterMapFlags::from_arg(&try_get!(it.next())); let local = ProxyClusterMap::parse(it)?; let mut peer = ProxyClusterMap::new(HashMap::new()); let mut clusters_config = ClusterConfigMap::default(); let mut extended_meta_result = Ok(()); while let Some(token) = it.next() { match token.to_uppercase().as_str() { PEER_PREFIX => peer = ProxyClusterMap::parse(it)?, CONFIG_PREFIX => match ClusterConfigMap::parse(it) { Ok(c) => clusters_config = c, Err(_) => { if local.get_map().is_empty() || peer.get_map().is_empty() { return Err(CmdParseError {}); } else { error!("invalid cluster config from UMCTL SETCLUSTER but the local and peer metadata are complete. Ignore this error to protect the core functionality."); extended_meta_result = Err(ParseExtendedMetaError {}) } } }, _ => return Err(CmdParseError {}), } } Ok(( Self { epoch, flags, local, peer, clusters_config, }, extended_meta_result, )) } pub fn to_args(&self) -> Vec<String> { let mut args = vec![self.epoch.to_string(), self.flags.to_arg()]; let local = self.local.cluster_map_to_args(); let peer = self.peer.cluster_map_to_args(); let config = self.clusters_config.to_args(); args.extend_from_slice(&local); if !peer.is_empty() { args.push(PEER_PREFIX.to_string()); args.extend_from_slice(&peer); } if !config.is_empty() { args.push(CONFIG_PREFIX.to_string()); args.extend_from_slice(&config); } args } } #[derive(Debug, Clone)] pub struct ProxyClusterMap { cluster_map: HashMap<ClusterName, HashMap<String, Vec<SlotRange>>>, } impl ProxyClusterMap { pub fn new(cluster_map: HashMap<ClusterName, HashMap<String, Vec<SlotRange>>>) -> Self { Self { cluster_map } } pub fn get_map(&self) -> &HashMap<ClusterName, HashMap<String, Vec<SlotRange>>> { &self.cluster_map } pub fn cluster_map_to_args(&self) -> Vec<String> { let mut args = vec![]; for (cluster_name, node_map) in &self.cluster_map { for (node, slot_ranges) in node_map { for slot_range in slot_ranges { args.push(cluster_name.to_string()); args.push(node.clone()); args.extend(slot_range.clone().into_strings()); } } } args } fn parse<It>(it: &mut Peekable<It>) -> Result<Self, CmdParseError> where It: Iterator<Item = String>, { let mut cluster_map = HashMap::new(); // To workaround lifetime problem. #[allow(clippy::while_let_loop)] loop { match it.peek() { Some(first_token) => { let prefix = first_token.to_uppercase(); if prefix == PEER_PREFIX || prefix == CONFIG_PREFIX { break; } } None => break, } let (cluster_name, address, slot_range) = try_parse!(Self::parse_cluster(it)); let cluster = cluster_map.entry(cluster_name).or_insert_with(HashMap::new); let slots = cluster.entry(address).or_insert_with(Vec::new); slots.push(slot_range); } Ok(Self { cluster_map }) } fn parse_cluster<It>( it: &mut Peekable<It>, ) -> Result<(ClusterName, String, SlotRange), CmdParseError> where It: Iterator<Item = String>, { let cluster_name = try_get!(it.next()); let cluster_name = ClusterName::try_from(cluster_name.as_str()).map_err(|_| CmdParseError {})?; let addr = try_get!(it.next()); let slot_range = try_parse!(Self::parse_tagged_slot_range(it)); Ok((cluster_name, addr, slot_range)) } fn parse_tagged_slot_range<It>(it: &mut Peekable<It>) -> Result<SlotRange, CmdParseError> where It: Iterator<Item = String>, { SlotRange::from_strings(it).ok_or_else(|| CmdParseError {}) } pub fn check_hosts(&self, announce_host: &str) -> bool { for (cluster_name, nodes) in self.cluster_map.iter() { for local_node_address in nodes.keys() { let host = match extract_host_from_address(local_node_address.as_str()) { Some(host) => host, None => { error!("invalid local node address: {}", local_node_address); return false; } }; if host != announce_host { error!( "not my announce host: {} {} != {}", cluster_name, announce_host, local_node_address ); return false; } } } true } } #[derive(Debug, Clone)] pub struct ClusterConfigMap { config_map: HashMap<ClusterName, ClusterConfig>, } impl Default for ClusterConfigMap { fn default() -> Self { Self { config_map: HashMap::new(), } } } impl ClusterConfigMap { pub fn new(config_map: HashMap<ClusterName, ClusterConfig>) -> Self { Self { config_map } } pub fn get_or_default(&self, cluster_name: &ClusterName) -> ClusterConfig { self.config_map .get(cluster_name) .cloned() .unwrap_or_else(ClusterConfig::default) } pub fn get(&self, cluster_name: &ClusterName) -> Option<ClusterConfig> { self.config_map.get(cluster_name).cloned() } pub fn get_map(&self) -> &HashMap<ClusterName, ClusterConfig> { &self.config_map } fn parse<It>(it: &mut Peekable<It>) -> Result<Self, CmdParseError> where It: Iterator<Item = String>, { let mut config_map = HashMap::new(); // To workaround lifetime problem. #[allow(clippy::while_let_loop)] loop { match it.peek() { Some(first_token) => { let prefix = first_token.to_uppercase(); if prefix == PEER_PREFIX || prefix == CONFIG_PREFIX { break; } } None => break, } let (cluster_name, field, value) = try_parse!(Self::parse_config(it)); let cluster_config = config_map .entry(cluster_name) .or_insert_with(ClusterConfig::default); if let Err(err) = cluster_config.set_field(&field, &value) { warn!("failed to set config field {:?}", err); return Err(CmdParseError {}); } } Ok(Self { config_map }) } fn parse_config<It>(it: &mut It) -> Result<(ClusterName, String, String), CmdParseError> where It: Iterator<Item = String>, { let cluster_name = try_get!(it.next()); let cluster_name = ClusterName::try_from(cluster_name.as_str()).map_err(|_| CmdParseError {})?; let field = try_get!(it.next()); let value = try_get!(it.next()); Ok((cluster_name, field, value)) } pub fn to_args(&self) -> Vec<String> { let mut args = vec![]; for (cluster_name, config) in &self.config_map { for (k, v) in config.to_str_map().into_iter() { args.push(cluster_name.to_string()); args.push(k); args.push(v); } } args } } #[derive(Debug)] pub struct ParseExtendedMetaError {} #[cfg(test)] mod tests { use super::super::config::CompressionStrategy; use super::*; #[test] fn test_single_cluster() { let args = vec!["cluster_name", "127.0.0.1:6379", "1", "0-1000"]; let mut arguments = args.iter().map(|s| s.to_string()).peekable(); let r = ProxyClusterMap::parse(&mut arguments); assert!(r.is_ok()); let proxy_cluster_map = r.unwrap(); assert_eq!(proxy_cluster_map.cluster_map.len(), 1); assert_eq!(proxy_cluster_map.cluster_map_to_args(), args); } #[test] fn test_multiple_slots() { let args = vec![ "cluster_name", "127.0.0.1:6379", "1", "0-1000", "cluster_name", "127.0.0.1:6379", "1", "1001-2000", ]; let mut arguments = args.iter().map(|s| s.to_string()).peekable(); let r = ProxyClusterMap::parse(&mut arguments); assert!(r.is_ok()); let proxy_cluster_map = r.unwrap(); assert_eq!(proxy_cluster_map.cluster_map.len(), 1); let cluster_name = ClusterName::try_from("cluster_name").unwrap(); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .len(), 1 ); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .get("127.0.0.1:6379") .unwrap() .len(), 2 ); assert_eq!(proxy_cluster_map.cluster_map_to_args(), args); } #[test] fn test_multiple_nodes() { let args = vec![ "cluster_name", "127.0.0.1:7000", "1", "0-1000", "cluster_name", "127.0.0.1:7001", "1", "1001-2000", ]; let mut arguments = args.iter().map(|s| s.to_string()).peekable(); let proxy_cluster_map = ProxyClusterMap::parse(&mut arguments).unwrap(); assert_eq!(proxy_cluster_map.cluster_map.len(), 1); let cluster_name = ClusterName::try_from("cluster_name").unwrap(); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .len(), 2 ); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .get("127.0.0.1:7000") .unwrap() .len(), 1 ); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .get("127.0.0.1:7001") .unwrap() .len(), 1 ); let mut expected_args = args.clone(); let mut actual_args = proxy_cluster_map.cluster_map_to_args(); expected_args.sort(); actual_args.sort(); assert_eq!(actual_args, expected_args); } #[test] fn test_multiple_cluster() { let args = vec![ "cluster_name", "127.0.0.1:7000", "1", "0-1000", "cluster_name", "127.0.0.1:7001", "1", "1001-2000", "another_cluster", "127.0.0.1:7002", "1", "0-2000", ]; let mut arguments = args.iter().map(|s| s.to_string()).peekable(); let r = ProxyClusterMap::parse(&mut arguments); assert!(r.is_ok()); let proxy_cluster_map = r.unwrap(); assert_eq!(proxy_cluster_map.cluster_map.len(), 2); let cluster_name = ClusterName::try_from("cluster_name").unwrap(); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .len(), 2 ); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .get("127.0.0.1:7000") .unwrap() .len(), 1 ); assert_eq!( proxy_cluster_map .cluster_map .get(&cluster_name) .unwrap() .get("127.0.0.1:7001") .unwrap() .len(), 1 ); let another_cluster = ClusterName::try_from("another_cluster").unwrap(); assert_eq!( proxy_cluster_map .cluster_map .get(&another_cluster) .unwrap() .len(), 1 ); assert_eq!( proxy_cluster_map .cluster_map .get(&another_cluster) .unwrap() .get("127.0.0.1:7002") .unwrap() .len(), 1 ); let mut expected_args = args.clone(); let mut actual_args = proxy_cluster_map.cluster_map_to_args(); expected_args.sort(); actual_args.sort(); assert_eq!(actual_args, expected_args); } #[test] fn test_clusters_config() { let args = vec![ "mycluster", "compression_strategy", "allow_all", "othercluster", "migration_max_blocking_time", "66699", "mycluster", "migration_max_migration_time", "666", ]; let mut it = args.iter().map(|s| s.to_string()).peekable(); let clusters_config = ClusterConfigMap::parse(&mut it).unwrap(); assert_eq!(clusters_config.config_map.len(), 2); let mycluster = ClusterName::try_from("mycluster").unwrap(); assert_eq!( clusters_config .config_map .get(&mycluster) .unwrap() .compression_strategy, CompressionStrategy::AllowAll ); assert_eq!( clusters_config .config_map .get(&mycluster) .unwrap() .migration_config .max_migration_time, 666 ); assert_eq!( clusters_config .config_map .get(&ClusterName::try_from("othercluster").unwrap()) .unwrap() .migration_config .max_blocking_time, 66699 ); let mut result_args = clusters_config.to_args(); let mut full_args = vec![ "mycluster", "compression_strategy", "allow_all", "mycluster", "migration_max_migration_time", "666", "mycluster", "migration_max_blocking_time", "10000", "mycluster", "migration_scan_interval", "500", "mycluster", "migration_scan_count", "16", "othercluster", "compression_strategy", "disabled", "othercluster", "migration_max_migration_time", "10800", "othercluster", "migration_max_blocking_time", "66699", "othercluster", "migration_scan_interval", "500", "othercluster", "migration_scan_count", "16", ]; result_args.sort(); full_args.sort(); assert_eq!(result_args, full_args); } #[test] fn test_to_map() { let arguments = vec![ "cluster_name", "127.0.0.1:7000", "1", "0-1000", "cluster_name", "127.0.0.1:7001", "IMPORTING", "1", "1001-2000", "233", "127.0.0.2:7001", "127.0.0.2:6001", "127.0.0.1:7001", "127.0.0.1:6002", "another_cluster", "127.0.0.1:7002", "MIGRATING", "1", "0-2000", "666", "127.0.0.2:7001", "127.0.0.2:6001", "127.0.0.1:7001", "127.0.0.1:6002", ]; let mut it = arguments .clone() .into_iter() .map(|s| s.to_string()) .peekable(); let r = ProxyClusterMap::parse(&mut it); let proxy_cluster_map = r.unwrap(); let cluster_map = ProxyClusterMap::new(proxy_cluster_map.cluster_map); let mut args = cluster_map.cluster_map_to_args(); let mut cluster_args: Vec<String> = arguments.into_iter().map(|s| s.to_string()).collect(); args.sort(); cluster_args.sort(); assert_eq!(args, cluster_args); } #[test] fn test_parse_proxy_cluster_meta() { let arguments = vec![ "233", "FORCE", "cluster_name", "127.0.0.1:7000", "1", "0-1000", "cluster_name", "127.0.0.1:7001", "1", "1001-2000", "PEER", "cluster_name", "127.0.0.2:7001", "1", "2001-3000", "cluster_name", "127.0.0.2:7002", "1", "3001-4000", "CONFIG", "cluster_name", "compression_strategy", "set_get_only", ]; let mut it = arguments .clone() .into_iter() .map(|s| s.to_string()) .peekable(); let (cluster_meta, extended_res) = ProxyClusterMeta::parse(&mut it).unwrap(); assert!(extended_res.is_ok()); assert_eq!(cluster_meta.epoch, 233); assert!(cluster_meta.flags.force); let local = cluster_meta.local.get_map(); let peer = cluster_meta.peer.get_map(); let config = cluster_meta.clusters_config.get_map(); assert_eq!(local.len(), 1); let cluster_name = ClusterName::try_from("cluster_name").unwrap(); assert_eq!(local.get(&cluster_name).unwrap().len(), 2); assert_eq!( local .get(&cluster_name) .unwrap() .get("127.0.0.1:7000") .unwrap()[0] .get_range_list() .get_ranges()[0] .start(), 0 ); assert_eq!( local .get(&cluster_name) .unwrap() .get("127.0.0.1:7001") .unwrap()[0] .get_range_list() .get_ranges()[0] .start(), 1001 ); assert_eq!(peer.len(), 1); assert_eq!(peer.get(&cluster_name).unwrap().len(), 2); assert_eq!( peer.get(&cluster_name) .unwrap() .get("127.0.0.2:7001") .unwrap()[0] .get_range_list() .get_ranges()[0] .start(), 2001 ); assert_eq!( peer.get(&cluster_name) .unwrap() .get("127.0.0.2:7002") .unwrap()[0] .get_range_list() .get_ranges()[0] .start(), 3001 ); assert_eq!(config.len(), 1); assert_eq!( config.get(&cluster_name).unwrap().compression_strategy, CompressionStrategy::SetGetOnly ); let mut args = cluster_meta.to_args(); let mut cluster_args: Vec<String> = arguments.into_iter().map(|s| s.to_string()).collect(); let extended = vec![ "cluster_name", "migration_max_migration_time", "10800", "cluster_name", "migration_max_blocking_time", "10000", "cluster_name", "migration_scan_interval", "500", "cluster_name", "migration_scan_count", "16", ] .into_iter() .map(|s| s.to_string()); cluster_args.extend(extended); args.sort(); cluster_args.sort(); assert_eq!(args, cluster_args); } #[test] fn test_parse_proxy_cluster_meta_without_peer() { let arguments = vec![ "233", "FORCE", "cluster_name", "127.0.0.1:7000", "1", "0-1000", "CONFIG", "cluster_name", "compression_strategy", "set_get_only", ]; let mut it = arguments .clone() .into_iter() .map(|s| s.to_string()) .peekable(); let (cluster_meta, extended_res) = ProxyClusterMeta::parse(&mut it).unwrap(); assert!(extended_res.is_ok()); assert_eq!(cluster_meta.epoch, 233); assert!(cluster_meta.flags.force); assert_eq!( cluster_meta .get_configs() .get_or_default(&ClusterName::try_from("cluster_name").unwrap()) .compression_strategy, CompressionStrategy::SetGetOnly ); } #[test] fn test_missing_config_cluster() { let arguments = vec![ "233", "FORCE", "cluster_name", "127.0.0.1:7000", "1", "0-1000", "PEER", "cluster_name", "127.0.0.2:7001", "1", "2001-3000", "CONFIG", // "cluster_name", missing cluster_name "compression_strategy", "set_get_only", ]; let mut it = arguments .clone() .into_iter() .map(|s| s.to_string()) .peekable(); let (cluster_meta, extended_res) = ProxyClusterMeta::parse(&mut it).unwrap(); assert!(extended_res.is_err()); assert_eq!(cluster_meta.epoch, 233); assert!(cluster_meta.flags.force); } #[test] fn test_invalid_config_field() { let arguments = vec![ "233", "FORCE", "cluster_name", "127.0.0.1:7000", "1", "0-1000", "PEER", "cluster_name", "127.0.0.2:7001", "1", "2001-3000", "CONFIG", "cluster_name", "config_field_that_does_not_exist", "invalid_value", ]; let mut it = arguments .clone() .into_iter() .map(|s| s.to_string()) .peekable(); let (cluster_meta, extended_res) = ProxyClusterMeta::parse(&mut it).unwrap(); assert!(extended_res.is_err()); assert_eq!(cluster_meta.epoch, 233); assert!(cluster_meta.flags.force); } #[test] fn test_incomplete_main_meta_with_config_err() { let arguments = vec![ "233", "FORCE", "cluster_name", "127.0.0.1:7000", "1", "0-1000", "CONFIG", "cluster_name", "config_field_that_does_not_exist", "invalid_value", ]; let mut it = arguments .clone() .into_iter() .map(|s| s.to_string()) .peekable(); assert!(ProxyClusterMeta::parse(&mut it).is_err()); } }
29.137297
182
0.483638